2014-04-13 01:57:42 -04:00
|
|
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
2019-02-08 11:45:43 -05:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 13:20:29 -05:00
|
|
|
// SPDX-License-Identifier: MIT
|
2014-04-13 01:57:42 -04:00
|
|
|
|
|
|
|
package user
|
|
|
|
|
|
|
|
import (
|
2014-11-23 02:33:47 -05:00
|
|
|
"bytes"
|
2014-04-13 01:57:42 -04:00
|
|
|
"fmt"
|
2021-04-05 11:30:52 -04:00
|
|
|
"net/http"
|
2019-12-01 22:50:36 -05:00
|
|
|
"regexp"
|
2023-09-07 05:37:47 -04:00
|
|
|
"slices"
|
2017-12-03 23:39:01 -05:00
|
|
|
"sort"
|
2019-12-01 22:50:36 -05:00
|
|
|
"strconv"
|
2019-01-22 23:10:38 -05:00
|
|
|
"strings"
|
2014-04-13 01:57:42 -04:00
|
|
|
|
2022-08-24 22:31:57 -04:00
|
|
|
activities_model "code.gitea.io/gitea/models/activities"
|
2021-12-10 03:14:24 -05:00
|
|
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
2021-09-24 07:32:56 -04:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2024-07-28 11:11:40 -04:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-04-08 05:11:15 -04:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2022-03-29 02:29:02 -04:00
|
|
|
"code.gitea.io/gitea/models/organization"
|
2021-12-09 20:27:50 -05:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 14:57:58 -05:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-24 04:49:20 -05:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2016-11-10 11:24:48 -05:00
|
|
|
"code.gitea.io/gitea/modules/base"
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
"code.gitea.io/gitea/modules/container"
|
2020-02-29 01:52:05 -05:00
|
|
|
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
|
2019-10-08 13:55:16 -04:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-04-19 18:25:08 -04:00
|
|
|
"code.gitea.io/gitea/modules/markup"
|
2019-12-15 09:20:08 -05:00
|
|
|
"code.gitea.io/gitea/modules/markup/markdown"
|
2024-02-29 13:52:49 -05:00
|
|
|
"code.gitea.io/gitea/modules/optional"
|
2016-11-10 11:24:48 -05:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2023-04-27 02:06:45 -04:00
|
|
|
"code.gitea.io/gitea/routers/web/feed"
|
2024-02-27 02:12:22 -05:00
|
|
|
"code.gitea.io/gitea/services/context"
|
2020-05-14 18:55:43 -04:00
|
|
|
issue_service "code.gitea.io/gitea/services/issue"
|
2020-04-10 07:26:37 -04:00
|
|
|
pull_service "code.gitea.io/gitea/services/pull"
|
2017-12-25 18:25:16 -05:00
|
|
|
|
2024-07-14 18:14:00 -04:00
|
|
|
"github.com/ProtonMail/go-crypto/openpgp"
|
|
|
|
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
2020-03-31 03:47:00 -04:00
|
|
|
"xorm.io/builder"
|
2014-04-13 01:57:42 -04:00
|
|
|
)
|
|
|
|
|
2014-06-22 23:11:12 -04:00
|
|
|
const (
|
2019-12-15 09:20:08 -05:00
|
|
|
tplDashboard base.TplName = "user/dashboard/dashboard"
|
|
|
|
tplIssues base.TplName = "user/dashboard/issues"
|
|
|
|
tplMilestones base.TplName = "user/dashboard/milestones"
|
|
|
|
tplProfile base.TplName = "user/profile"
|
2014-06-22 23:11:12 -04:00
|
|
|
)
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// getDashboardContextUser finds out which context user dashboard is being viewed as .
|
2021-11-24 04:49:20 -05:00
|
|
|
func getDashboardContextUser(ctx *context.Context) *user_model.User {
|
2022-03-22 03:03:22 -04:00
|
|
|
ctxUser := ctx.Doer
|
2014-07-26 23:53:16 -04:00
|
|
|
orgName := ctx.Params(":org")
|
|
|
|
if len(orgName) > 0 {
|
2021-11-19 06:41:40 -05:00
|
|
|
ctxUser = ctx.Org.Organization.AsUser()
|
|
|
|
ctx.Data["Teams"] = ctx.Org.Teams
|
2015-08-25 10:58:34 -04:00
|
|
|
}
|
|
|
|
ctx.Data["ContextUser"] = ctxUser
|
|
|
|
|
2023-09-16 10:39:12 -04:00
|
|
|
orgs, err := organization.GetUserOrgsList(ctx, ctx.Doer)
|
2021-06-14 08:18:09 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetUserOrgsList", err)
|
2015-08-25 10:58:34 -04:00
|
|
|
return nil
|
|
|
|
}
|
2021-06-14 08:18:09 -04:00
|
|
|
ctx.Data["Orgs"] = orgs
|
2015-08-25 10:58:34 -04:00
|
|
|
|
|
|
|
return ctxUser
|
|
|
|
}
|
|
|
|
|
2020-08-16 23:07:38 -04:00
|
|
|
// Dashboard render the dashboard page
|
2016-03-11 11:56:52 -05:00
|
|
|
func Dashboard(ctx *context.Context) {
|
2016-03-09 23:56:03 -05:00
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
2015-08-25 10:58:34 -04:00
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-24 16:15:10 -05:00
|
|
|
var (
|
|
|
|
date = ctx.FormString("date")
|
|
|
|
page = ctx.FormInt("page")
|
|
|
|
)
|
|
|
|
|
|
|
|
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
|
2024-02-14 16:48:45 -05:00
|
|
|
ctx.Data["Title"] = ctxUser.DisplayName() + " - " + ctx.Locale.TrString("dashboard")
|
2016-07-23 13:08:22 -04:00
|
|
|
ctx.Data["PageIsDashboard"] = true
|
|
|
|
ctx.Data["PageIsNews"] = true
|
2022-03-29 02:29:02 -04:00
|
|
|
cnt, _ := organization.GetOrganizationCount(ctx, ctxUser)
|
2021-11-22 10:21:55 -05:00
|
|
|
ctx.Data["UserOrgsCount"] = cnt
|
2022-06-04 07:42:17 -04:00
|
|
|
ctx.Data["MirrorsEnabled"] = setting.Mirror.Enabled
|
2023-02-24 16:15:10 -05:00
|
|
|
ctx.Data["Date"] = date
|
2021-10-14 22:35:26 -04:00
|
|
|
|
2021-10-19 00:38:33 -04:00
|
|
|
var uid int64
|
|
|
|
if ctxUser != nil {
|
|
|
|
uid = ctxUser.ID
|
|
|
|
}
|
|
|
|
|
2023-07-04 14:36:08 -04:00
|
|
|
ctx.PageData["dashboardRepoList"] = map[string]any{
|
2021-10-14 22:35:26 -04:00
|
|
|
"searchLimit": setting.UI.User.RepoPagingNum,
|
2021-10-19 00:38:33 -04:00
|
|
|
"uid": uid,
|
2021-10-14 22:35:26 -04:00
|
|
|
}
|
2020-12-27 14:58:03 -05:00
|
|
|
|
2021-03-04 17:59:13 -05:00
|
|
|
if setting.Service.EnableUserHeatmap {
|
2023-09-25 09:17:37 -04:00
|
|
|
data, err := activities_model.GetUserHeatmapDataByUserTeam(ctx, ctxUser, ctx.Org.Team, ctx.Doer)
|
2020-11-18 17:00:16 -05:00
|
|
|
if err != nil {
|
2020-12-27 14:58:03 -05:00
|
|
|
ctx.ServerError("GetUserHeatmapDataByUserTeam", err)
|
2020-11-18 17:00:16 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Data["HeatmapData"] = data
|
2023-04-17 14:26:01 -04:00
|
|
|
ctx.Data["HeatmapTotalContributions"] = activities_model.GetTotalContributionsInHeatmap(data)
|
2020-11-18 17:00:16 -05:00
|
|
|
}
|
2016-07-23 13:08:22 -04:00
|
|
|
|
2023-02-24 16:15:10 -05:00
|
|
|
feeds, count, err := activities_model.GetFeeds(ctx, activities_model.GetFeedsOptions{
|
2018-02-21 05:55:34 -05:00
|
|
|
RequestedUser: ctxUser,
|
2020-12-27 14:58:03 -05:00
|
|
|
RequestedTeam: ctx.Org.Team,
|
2022-03-22 03:03:22 -04:00
|
|
|
Actor: ctx.Doer,
|
2017-08-22 21:30:54 -04:00
|
|
|
IncludePrivate: true,
|
|
|
|
OnlyPerformedBy: false,
|
|
|
|
IncludeDeleted: false,
|
2021-08-10 20:31:13 -04:00
|
|
|
Date: ctx.FormString("date"),
|
2023-02-24 16:15:10 -05:00
|
|
|
ListOptions: db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.FeedPagingNum,
|
|
|
|
},
|
2017-08-22 21:30:54 -04:00
|
|
|
})
|
2022-03-13 12:40:47 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetFeeds", err)
|
2014-04-13 01:57:42 -04:00
|
|
|
return
|
|
|
|
}
|
2021-10-16 10:21:16 -04:00
|
|
|
|
2023-02-24 16:15:10 -05:00
|
|
|
ctx.Data["Feeds"] = feeds
|
|
|
|
|
|
|
|
pager := context.NewPagination(int(count), setting.UI.FeedPagingNum, page, 5)
|
|
|
|
pager.AddParam(ctx, "date", "Date")
|
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 11:30:52 -04:00
|
|
|
ctx.HTML(http.StatusOK, tplDashboard)
|
2014-04-13 01:57:42 -04:00
|
|
|
}
|
|
|
|
|
2019-12-15 09:20:08 -05:00
|
|
|
// Milestones render the user milestones page
|
|
|
|
func Milestones(ctx *context.Context) {
|
2021-11-09 14:57:58 -05:00
|
|
|
if unit.TypeIssues.UnitGlobalDisabled() && unit.TypePullRequests.UnitGlobalDisabled() {
|
2020-01-17 02:34:37 -05:00
|
|
|
log.Debug("Milestones overview page not available as both issues and pull requests are globally disabled")
|
2022-03-23 00:54:07 -04:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2020-01-17 02:34:37 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-15 09:20:08 -05:00
|
|
|
ctx.Data["Title"] = ctx.Tr("milestones")
|
|
|
|
ctx.Data["PageIsMilestonesDashboard"] = true
|
|
|
|
|
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-06 04:01:49 -04:00
|
|
|
repoOpts := repo_model.SearchRepoOptions{
|
2023-08-29 08:46:52 -04:00
|
|
|
Actor: ctx.Doer,
|
2020-12-27 14:58:03 -05:00
|
|
|
OwnerID: ctxUser.ID,
|
|
|
|
Private: true,
|
2022-06-04 14:30:01 -04:00
|
|
|
AllPublic: false, // Include also all public repositories of users and public organisations
|
|
|
|
AllLimited: false, // Include also all public repositories of limited organisations
|
2024-02-29 13:52:49 -05:00
|
|
|
Archived: optional.Some(false),
|
|
|
|
HasMilestones: optional.Some(true), // Just needs display repos has milestones
|
2020-12-27 14:58:03 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if ctxUser.IsOrganization() && ctx.Org.Team != nil {
|
|
|
|
repoOpts.TeamID = ctx.Org.Team.ID
|
|
|
|
}
|
2019-12-15 09:20:08 -05:00
|
|
|
|
2020-12-27 14:58:03 -05:00
|
|
|
var (
|
2022-06-06 04:01:49 -04:00
|
|
|
userRepoCond = repo_model.SearchRepositoryCondition(&repoOpts) // all repo condition user could visit
|
2020-03-31 03:47:00 -04:00
|
|
|
repoCond = userRepoCond
|
|
|
|
repoIDs []int64
|
2019-12-15 09:20:08 -05:00
|
|
|
|
2021-08-10 20:31:13 -04:00
|
|
|
reposQuery = ctx.FormString("repos")
|
|
|
|
isShowClosed = ctx.FormString("state") == "closed"
|
|
|
|
sortType = ctx.FormString("sort")
|
2021-07-28 21:42:15 -04:00
|
|
|
page = ctx.FormInt("page")
|
2021-08-11 11:08:52 -04:00
|
|
|
keyword = ctx.FormTrim("q")
|
2020-03-31 03:47:00 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
|
2020-01-04 20:23:29 -05:00
|
|
|
if len(reposQuery) != 0 {
|
|
|
|
if issueReposQueryPattern.MatchString(reposQuery) {
|
|
|
|
// remove "[" and "]" from string
|
|
|
|
reposQuery = reposQuery[1 : len(reposQuery)-1]
|
2022-01-20 12:46:10 -05:00
|
|
|
// for each ID (delimiter ",") add to int to repoIDs
|
2020-03-31 03:47:00 -04:00
|
|
|
|
2020-01-04 20:23:29 -05:00
|
|
|
for _, rID := range strings.Split(reposQuery, ",") {
|
|
|
|
// Ensure nonempty string entries
|
|
|
|
if rID != "" && rID != "0" {
|
|
|
|
rIDint64, err := strconv.ParseInt(rID, 10, 64)
|
|
|
|
// If the repo id specified by query is not parseable or not accessible by user, just ignore it.
|
2020-03-31 03:47:00 -04:00
|
|
|
if err == nil {
|
2020-01-04 20:23:29 -05:00
|
|
|
repoIDs = append(repoIDs, rIDint64)
|
|
|
|
}
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
}
|
2020-03-31 03:47:00 -04:00
|
|
|
if len(repoIDs) > 0 {
|
|
|
|
// Don't just let repoCond = builder.In("id", repoIDs) because user may has no permission on repoIDs
|
|
|
|
// But the original repoCond has a limitation
|
|
|
|
repoCond = repoCond.And(builder.In("id", repoIDs))
|
2020-01-04 20:23:29 -05:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Warn("issueReposQueryPattern not match with query")
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-11 03:56:48 -05:00
|
|
|
counts, err := issues_model.CountMilestonesMap(ctx, issues_model.FindMilestoneOptions{
|
|
|
|
RepoCond: userRepoCond,
|
|
|
|
Name: keyword,
|
2024-03-02 10:42:31 -05:00
|
|
|
IsClosed: optional.Some(isShowClosed),
|
2023-12-11 03:56:48 -05:00
|
|
|
})
|
2019-12-15 09:20:08 -05:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("CountMilestonesByRepoIDs", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-11 03:56:48 -05:00
|
|
|
milestones, err := db.Find[issues_model.Milestone](ctx, issues_model.FindMilestoneOptions{
|
|
|
|
ListOptions: db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.IssuePagingNum,
|
|
|
|
},
|
|
|
|
RepoCond: repoCond,
|
2024-03-02 10:42:31 -05:00
|
|
|
IsClosed: optional.Some(isShowClosed),
|
2023-12-11 03:56:48 -05:00
|
|
|
SortType: sortType,
|
|
|
|
Name: keyword,
|
|
|
|
})
|
2019-12-15 09:20:08 -05:00
|
|
|
if err != nil {
|
2021-04-08 07:53:59 -04:00
|
|
|
ctx.ServerError("SearchMilestones", err)
|
2019-12-15 09:20:08 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-11-19 03:12:33 -05:00
|
|
|
showRepos, _, err := repo_model.SearchRepositoryByCondition(ctx, &repoOpts, userRepoCond, false)
|
2020-03-31 03:47:00 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("SearchRepositoryByCondition", err)
|
2019-12-15 09:20:08 -05:00
|
|
|
return
|
|
|
|
}
|
2020-03-31 03:47:00 -04:00
|
|
|
sort.Sort(showRepos)
|
2019-12-15 09:20:08 -05:00
|
|
|
|
2020-03-31 03:47:00 -04:00
|
|
|
for i := 0; i < len(milestones); {
|
|
|
|
for _, repo := range showRepos {
|
|
|
|
if milestones[i].RepoID == repo.ID {
|
|
|
|
milestones[i].Repo = repo
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if milestones[i].Repo == nil {
|
|
|
|
log.Warn("Cannot find milestone %d 's repository %d", milestones[i].ID, milestones[i].RepoID)
|
|
|
|
milestones = append(milestones[:i], milestones[i+1:]...)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-04-19 18:25:08 -04:00
|
|
|
milestones[i].RenderedContent, err = markdown.RenderString(&markup.RenderContext{
|
2024-01-15 03:49:24 -05:00
|
|
|
Links: markup.Links{
|
|
|
|
Base: milestones[i].Repo.Link(),
|
|
|
|
},
|
|
|
|
Metas: milestones[i].Repo.ComposeMetas(ctx),
|
|
|
|
Ctx: ctx,
|
2021-04-19 18:25:08 -04:00
|
|
|
}, milestones[i].Content)
|
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("RenderString", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-09 21:46:31 -05:00
|
|
|
if milestones[i].Repo.IsTimetrackerEnabled(ctx) {
|
2023-09-16 10:39:12 -04:00
|
|
|
err := milestones[i].LoadTotalTrackedTime(ctx)
|
2019-12-15 09:20:08 -05:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("LoadTotalTrackedTime", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2020-03-31 03:47:00 -04:00
|
|
|
i++
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
|
2023-09-16 10:39:12 -04:00
|
|
|
milestoneStats, err := issues_model.GetMilestonesStatsByRepoCondAndKw(ctx, repoCond, keyword)
|
2019-12-15 09:20:08 -05:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetMilestoneStats", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-04-08 05:11:15 -04:00
|
|
|
var totalMilestoneStats *issues_model.MilestonesStats
|
2020-03-31 03:47:00 -04:00
|
|
|
if len(repoIDs) == 0 {
|
|
|
|
totalMilestoneStats = milestoneStats
|
|
|
|
} else {
|
2023-09-16 10:39:12 -04:00
|
|
|
totalMilestoneStats, err = issues_model.GetMilestonesStatsByRepoCondAndKw(ctx, userRepoCond, keyword)
|
2020-03-31 03:47:00 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetMilestoneStats", err)
|
|
|
|
return
|
|
|
|
}
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
|
2024-02-15 10:19:36 -05:00
|
|
|
showRepoIDs := make(container.Set[int64], len(showRepos))
|
2023-08-04 11:16:56 -04:00
|
|
|
for _, repo := range showRepos {
|
|
|
|
if repo.ID > 0 {
|
2024-02-15 10:19:36 -05:00
|
|
|
showRepoIDs.Add(repo.ID)
|
2023-08-04 11:16:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(repoIDs) == 0 {
|
2024-02-15 10:19:36 -05:00
|
|
|
repoIDs = showRepoIDs.Values()
|
2023-08-04 11:16:56 -04:00
|
|
|
}
|
2023-09-07 05:37:47 -04:00
|
|
|
repoIDs = slices.DeleteFunc(repoIDs, func(v int64) bool {
|
2024-02-15 10:19:36 -05:00
|
|
|
return !showRepoIDs.Contains(v)
|
2023-08-04 11:16:56 -04:00
|
|
|
})
|
|
|
|
|
2019-12-15 09:20:08 -05:00
|
|
|
var pagerCount int
|
|
|
|
if isShowClosed {
|
|
|
|
ctx.Data["State"] = "closed"
|
|
|
|
ctx.Data["Total"] = totalMilestoneStats.ClosedCount
|
|
|
|
pagerCount = int(milestoneStats.ClosedCount)
|
|
|
|
} else {
|
|
|
|
ctx.Data["State"] = "open"
|
|
|
|
ctx.Data["Total"] = totalMilestoneStats.OpenCount
|
|
|
|
pagerCount = int(milestoneStats.OpenCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Data["Milestones"] = milestones
|
|
|
|
ctx.Data["Repos"] = showRepos
|
|
|
|
ctx.Data["Counts"] = counts
|
|
|
|
ctx.Data["MilestoneStats"] = milestoneStats
|
|
|
|
ctx.Data["SortType"] = sortType
|
2021-04-08 07:53:59 -04:00
|
|
|
ctx.Data["Keyword"] = keyword
|
2023-08-04 11:16:56 -04:00
|
|
|
ctx.Data["RepoIDs"] = repoIDs
|
2019-12-15 09:20:08 -05:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
|
|
|
|
|
|
|
pager := context.NewPagination(pagerCount, setting.UI.IssuePagingNum, page, 5)
|
2021-04-08 07:53:59 -04:00
|
|
|
pager.AddParam(ctx, "q", "Keyword")
|
2019-12-15 09:20:08 -05:00
|
|
|
pager.AddParam(ctx, "repos", "RepoIDs")
|
|
|
|
pager.AddParam(ctx, "sort", "SortType")
|
|
|
|
pager.AddParam(ctx, "state", "State")
|
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 11:30:52 -04:00
|
|
|
ctx.HTML(http.StatusOK, tplMilestones)
|
2019-12-15 09:20:08 -05:00
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Pulls renders the user's pull request overview page
|
|
|
|
func Pulls(ctx *context.Context) {
|
2021-11-09 14:57:58 -05:00
|
|
|
if unit.TypePullRequests.UnitGlobalDisabled() {
|
2021-01-12 23:19:17 -05:00
|
|
|
log.Debug("Pull request overview page not available as it is globally disabled.")
|
2022-03-23 00:54:07 -04:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2021-01-12 23:19:17 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Data["Title"] = ctx.Tr("pull_requests")
|
|
|
|
ctx.Data["PageIsPulls"] = true
|
2021-11-09 14:57:58 -05:00
|
|
|
buildIssueOverview(ctx, unit.TypePullRequests)
|
2021-01-12 23:19:17 -05:00
|
|
|
}
|
2019-12-01 22:50:36 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Issues renders the user's issues overview page
|
2016-03-11 11:56:52 -05:00
|
|
|
func Issues(ctx *context.Context) {
|
2021-11-09 14:57:58 -05:00
|
|
|
if unit.TypeIssues.UnitGlobalDisabled() {
|
2021-01-12 23:19:17 -05:00
|
|
|
log.Debug("Issues overview page not available as it is globally disabled.")
|
2022-03-23 00:54:07 -04:00
|
|
|
ctx.Status(http.StatusNotFound)
|
2021-01-12 23:19:17 -05:00
|
|
|
return
|
|
|
|
}
|
2020-01-17 02:34:37 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
ctx.Data["Title"] = ctx.Tr("issues")
|
|
|
|
ctx.Data["PageIsIssues"] = true
|
2021-11-09 14:57:58 -05:00
|
|
|
buildIssueOverview(ctx, unit.TypeIssues)
|
2021-01-12 23:19:17 -05:00
|
|
|
}
|
2020-01-17 02:34:37 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Regexp for repos query
|
|
|
|
var issueReposQueryPattern = regexp.MustCompile(`^\[\d+(,\d+)*,?\]$`)
|
|
|
|
|
2021-11-09 14:57:58 -05:00
|
|
|
func buildIssueOverview(ctx *context.Context, unitType unit.Type) {
|
2021-01-12 23:19:17 -05:00
|
|
|
// ----------------------------------------------------
|
|
|
|
// Determine user; can be either user or organization.
|
|
|
|
// Return with NotFound or ServerError if unsuccessful.
|
|
|
|
// ----------------------------------------------------
|
2015-08-25 10:58:34 -04:00
|
|
|
|
|
|
|
ctxUser := getDashboardContextUser(ctx)
|
|
|
|
if ctx.Written() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
viewType string
|
2021-08-10 20:31:13 -04:00
|
|
|
sortType = ctx.FormString("sort")
|
2022-03-23 18:57:09 -04:00
|
|
|
filterMode int
|
2015-08-25 10:58:34 -04:00
|
|
|
)
|
2017-02-14 09:15:18 -05:00
|
|
|
|
2023-02-17 02:13:35 -05:00
|
|
|
// Default to recently updated, unlike repository issues list
|
|
|
|
if sortType == "" {
|
|
|
|
sortType = "recentupdate"
|
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// --------------------------------------------------------------------------------
|
|
|
|
// Distinguish User from Organization.
|
|
|
|
// Org:
|
|
|
|
// - Remember pre-determined viewType string for later. Will be posted to ctx.Data.
|
|
|
|
// Organization does not have view type and filter mode.
|
|
|
|
// User:
|
2021-08-10 20:31:13 -04:00
|
|
|
// - Use ctx.FormString("type") to determine filterMode.
|
2021-01-12 23:19:17 -05:00
|
|
|
// The type is set when clicking for example "assigned to me" on the overview page.
|
|
|
|
// - Remember either this or a fallback. Will be posted to ctx.Data.
|
|
|
|
// --------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// TODO: distinguish during routing
|
|
|
|
|
2021-08-10 20:31:13 -04:00
|
|
|
viewType = ctx.FormString("type")
|
2021-01-03 12:29:12 -05:00
|
|
|
switch viewType {
|
|
|
|
case "assigned":
|
2022-06-13 05:37:59 -04:00
|
|
|
filterMode = issues_model.FilterModeAssign
|
2021-01-03 12:29:12 -05:00
|
|
|
case "created_by":
|
2022-06-13 05:37:59 -04:00
|
|
|
filterMode = issues_model.FilterModeCreate
|
2021-01-03 12:29:12 -05:00
|
|
|
case "mentioned":
|
2022-06-13 05:37:59 -04:00
|
|
|
filterMode = issues_model.FilterModeMention
|
2021-01-17 11:34:19 -05:00
|
|
|
case "review_requested":
|
2022-06-13 05:37:59 -04:00
|
|
|
filterMode = issues_model.FilterModeReviewRequested
|
2023-02-24 21:55:50 -05:00
|
|
|
case "reviewed_by":
|
|
|
|
filterMode = issues_model.FilterModeReviewed
|
2022-03-23 18:57:09 -04:00
|
|
|
case "your_repositories":
|
|
|
|
fallthrough
|
2021-01-03 12:29:12 -05:00
|
|
|
default:
|
2022-06-13 05:37:59 -04:00
|
|
|
filterMode = issues_model.FilterModeYourRepositories
|
2019-12-03 01:01:29 -05:00
|
|
|
viewType = "your_repositories"
|
2015-08-25 10:58:34 -04:00
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// --------------------------------------------------------------------------
|
|
|
|
// Build opts (IssuesOptions), which contains filter information.
|
|
|
|
// Will eventually be used to retrieve issues relevant for the overview page.
|
|
|
|
// Note: Non-final states of opts are used in-between, namely for:
|
|
|
|
// - Keyword search
|
|
|
|
// - Count Issues by repo
|
|
|
|
// --------------------------------------------------------------------------
|
2017-02-14 09:15:18 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Get repository IDs where User/Org/Team has access.
|
2022-03-29 02:29:02 -04:00
|
|
|
var team *organization.Team
|
|
|
|
var org *organization.Organization
|
2021-01-12 23:19:17 -05:00
|
|
|
if ctx.Org != nil {
|
2021-12-29 08:02:12 -05:00
|
|
|
org = ctx.Org.Organization
|
2021-01-12 23:19:17 -05:00
|
|
|
team = ctx.Org.Team
|
2017-02-16 19:58:19 -05:00
|
|
|
}
|
2021-12-29 08:02:12 -05:00
|
|
|
|
|
|
|
isPullList := unitType == unit.TypePullRequests
|
2022-06-13 05:37:59 -04:00
|
|
|
opts := &issues_model.IssuesOptions{
|
2024-03-02 10:42:31 -05:00
|
|
|
IsPull: optional.Some(isPullList),
|
2021-12-29 08:02:12 -05:00
|
|
|
SortType: sortType,
|
2024-03-02 10:42:31 -05:00
|
|
|
IsArchived: optional.Some(false),
|
2021-12-29 08:02:12 -05:00
|
|
|
Org: org,
|
|
|
|
Team: team,
|
2022-03-22 03:03:22 -04:00
|
|
|
User: ctx.Doer,
|
2019-10-08 13:55:16 -04:00
|
|
|
}
|
|
|
|
|
2024-06-17 14:58:24 -04:00
|
|
|
isFuzzy := ctx.FormBool("fuzzy")
|
|
|
|
|
2022-03-23 18:57:09 -04:00
|
|
|
// Search all repositories which
|
|
|
|
//
|
|
|
|
// As user:
|
|
|
|
// - Owns the repository.
|
|
|
|
// - Have collaborator permissions in repository.
|
|
|
|
//
|
|
|
|
// As org:
|
|
|
|
// - Owns the repository.
|
|
|
|
//
|
|
|
|
// As team:
|
|
|
|
// - Team org's owns the repository.
|
|
|
|
// - Team has read permission to repository.
|
2022-06-06 04:01:49 -04:00
|
|
|
repoOpts := &repo_model.SearchRepoOptions{
|
2023-08-22 22:29:17 -04:00
|
|
|
Actor: ctx.Doer,
|
2023-08-29 08:46:52 -04:00
|
|
|
OwnerID: ctxUser.ID,
|
2023-08-22 22:29:17 -04:00
|
|
|
Private: true,
|
|
|
|
AllPublic: false,
|
|
|
|
AllLimited: false,
|
2024-02-29 13:52:49 -05:00
|
|
|
Collaborate: optional.None[bool](),
|
2023-08-22 22:29:17 -04:00
|
|
|
UnitType: unitType,
|
2024-02-29 13:52:49 -05:00
|
|
|
Archived: optional.Some(false),
|
2022-03-23 18:57:09 -04:00
|
|
|
}
|
2022-05-16 05:49:17 -04:00
|
|
|
if team != nil {
|
|
|
|
repoOpts.TeamID = team.ID
|
2022-03-23 18:57:09 -04:00
|
|
|
}
|
2023-08-22 22:29:17 -04:00
|
|
|
accessibleRepos := container.Set[int64]{}
|
2023-08-17 13:42:17 -04:00
|
|
|
{
|
2023-10-11 00:24:07 -04:00
|
|
|
ids, _, err := repo_model.SearchRepositoryIDs(ctx, repoOpts)
|
2023-08-17 13:42:17 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("SearchRepositoryIDs", err)
|
|
|
|
return
|
|
|
|
}
|
2023-08-22 22:29:17 -04:00
|
|
|
accessibleRepos.AddMultiple(ids...)
|
2023-08-17 13:42:17 -04:00
|
|
|
opts.RepoIDs = ids
|
|
|
|
if len(opts.RepoIDs) == 0 {
|
|
|
|
// no repos found, don't let the indexer return all repos
|
|
|
|
opts.RepoIDs = []int64{0}
|
|
|
|
}
|
|
|
|
}
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 00:26:18 -05:00
|
|
|
if ctx.Doer.ID == ctxUser.ID && filterMode != issues_model.FilterModeYourRepositories {
|
|
|
|
// If the doer is the same as the context user, which means the doer is viewing his own dashboard,
|
|
|
|
// it's not enough to show the repos that the doer owns or has been explicitly granted access to,
|
|
|
|
// because the doer may create issues or be mentioned in any public repo.
|
|
|
|
// So we need search issues in all public repos.
|
|
|
|
opts.AllPublic = true
|
|
|
|
}
|
2022-03-23 18:57:09 -04:00
|
|
|
|
2017-02-14 09:15:18 -05:00
|
|
|
switch filterMode {
|
2022-06-13 05:37:59 -04:00
|
|
|
case issues_model.FilterModeAll:
|
|
|
|
case issues_model.FilterModeYourRepositories:
|
|
|
|
case issues_model.FilterModeAssign:
|
2022-03-22 03:03:22 -04:00
|
|
|
opts.AssigneeID = ctx.Doer.ID
|
2022-06-13 05:37:59 -04:00
|
|
|
case issues_model.FilterModeCreate:
|
2022-03-22 03:03:22 -04:00
|
|
|
opts.PosterID = ctx.Doer.ID
|
2022-06-13 05:37:59 -04:00
|
|
|
case issues_model.FilterModeMention:
|
2022-03-22 03:03:22 -04:00
|
|
|
opts.MentionedID = ctx.Doer.ID
|
2022-06-13 05:37:59 -04:00
|
|
|
case issues_model.FilterModeReviewRequested:
|
2022-03-22 03:03:22 -04:00
|
|
|
opts.ReviewRequestedID = ctx.Doer.ID
|
2023-02-24 21:55:50 -05:00
|
|
|
case issues_model.FilterModeReviewed:
|
|
|
|
opts.ReviewedID = ctx.Doer.ID
|
2021-01-03 12:29:12 -05:00
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// keyword holds the search term entered into the search field.
|
2021-08-10 20:31:13 -04:00
|
|
|
keyword := strings.Trim(ctx.FormString("q"), " ")
|
2021-01-12 23:19:17 -05:00
|
|
|
ctx.Data["Keyword"] = keyword
|
2020-02-29 01:52:05 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Educated guess: Do or don't show closed issues.
|
2021-08-10 20:31:13 -04:00
|
|
|
isShowClosed := ctx.FormString("state") == "closed"
|
2024-03-02 10:42:31 -05:00
|
|
|
opts.IsClosed = optional.Some(isShowClosed)
|
2020-02-29 01:52:05 -05:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Make sure page number is at least 1. Will be posted to ctx.Data.
|
2021-07-28 21:42:15 -04:00
|
|
|
page := ctx.FormInt("page")
|
2021-01-12 23:19:17 -05:00
|
|
|
if page <= 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
opts.Paginator = &db.ListOptions{
|
|
|
|
Page: page,
|
|
|
|
PageSize: setting.UI.IssuePagingNum,
|
|
|
|
}
|
2021-01-12 23:19:17 -05:00
|
|
|
|
|
|
|
// Get IDs for labels (a filter option for issues/pulls).
|
|
|
|
// Required for IssuesOptions.
|
2021-08-10 20:31:13 -04:00
|
|
|
selectedLabels := ctx.FormString("labels")
|
2021-01-12 23:19:17 -05:00
|
|
|
if len(selectedLabels) > 0 && selectedLabels != "0" {
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
var err error
|
2024-03-21 11:07:35 -04:00
|
|
|
opts.LabelIDs, err = base.StringsToInt64s(strings.Split(selectedLabels, ","))
|
2019-01-22 23:10:38 -05:00
|
|
|
if err != nil {
|
2024-03-21 11:07:35 -04:00
|
|
|
ctx.Flash.Error(ctx.Tr("invalid_data", selectedLabels), true)
|
2019-01-22 23:10:38 -05:00
|
|
|
}
|
|
|
|
}
|
2018-10-28 02:55:01 -04:00
|
|
|
|
2024-04-04 08:27:11 -04:00
|
|
|
if org != nil {
|
|
|
|
// Get Org Labels
|
|
|
|
labels, err := issues_model.GetLabelsByOrgID(ctx, ctx.Org.Organization.ID, ctx.FormString("sort"), db.ListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetLabelsByOrgID", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the exclusive scope for every label ID
|
|
|
|
labelExclusiveScopes := make([]string, 0, len(opts.LabelIDs))
|
|
|
|
for _, labelID := range opts.LabelIDs {
|
|
|
|
foundExclusiveScope := false
|
|
|
|
for _, label := range labels {
|
|
|
|
if label.ID == labelID || label.ID == -labelID {
|
|
|
|
labelExclusiveScopes = append(labelExclusiveScopes, label.ExclusiveScope())
|
|
|
|
foundExclusiveScope = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundExclusiveScope {
|
|
|
|
labelExclusiveScopes = append(labelExclusiveScopes, "")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, l := range labels {
|
|
|
|
l.LoadSelectedLabelsAfterClick(opts.LabelIDs, labelExclusiveScopes)
|
|
|
|
}
|
|
|
|
ctx.Data["Labels"] = labels
|
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// ------------------------------
|
|
|
|
// Get issues as defined by opts.
|
|
|
|
// ------------------------------
|
|
|
|
|
|
|
|
// Slice of Issues that will be displayed on the overview page
|
|
|
|
// USING FINAL STATE OF opts FOR A QUERY.
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
var issues issues_model.IssueList
|
|
|
|
{
|
2024-06-17 14:58:24 -04:00
|
|
|
issueIDs, _, err := issue_indexer.SearchIssues(ctx, issue_indexer.ToSearchOptions(keyword, opts).Copy(
|
|
|
|
func(o *issue_indexer.SearchOptions) { o.IsFuzzyKeyword = isFuzzy },
|
|
|
|
))
|
2020-02-29 01:52:05 -05:00
|
|
|
if err != nil {
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
ctx.ServerError("issueIDsFromSearch", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
issues, err = issues_model.GetIssuesByIDs(ctx, issueIDs, true)
|
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetIssuesByIDs", err)
|
2020-02-29 01:52:05 -05:00
|
|
|
return
|
|
|
|
}
|
2017-02-16 19:58:19 -05:00
|
|
|
}
|
2015-09-02 16:18:09 -04:00
|
|
|
|
2022-04-26 18:40:01 -04:00
|
|
|
commitStatuses, lastStatus, err := pull_service.GetIssuesAllCommitStatus(ctx, issues)
|
2021-04-15 13:34:43 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("GetIssuesLastCommitStatus", err)
|
|
|
|
return
|
2017-08-03 01:09:16 -04:00
|
|
|
}
|
2024-07-28 11:11:40 -04:00
|
|
|
if !ctx.Repo.CanRead(unit.TypeActions) {
|
|
|
|
for key := range commitStatuses {
|
|
|
|
git_model.CommitStatusesHideActionsURL(ctx, commitStatuses[key])
|
|
|
|
}
|
|
|
|
}
|
2017-08-03 01:09:16 -04:00
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// -------------------------------
|
|
|
|
// Fill stats to post to ctx.Data.
|
|
|
|
// -------------------------------
|
2024-06-17 14:58:24 -04:00
|
|
|
issueStats, err := getUserIssueStats(ctx, ctxUser, filterMode, issue_indexer.ToSearchOptions(keyword, opts).Copy(
|
|
|
|
func(o *issue_indexer.SearchOptions) { o.IsFuzzyKeyword = isFuzzy },
|
|
|
|
))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("getUserIssueStats", err)
|
|
|
|
return
|
2019-12-01 22:50:36 -05:00
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
// Will be posted to ctx.Data.
|
2019-12-01 22:50:36 -05:00
|
|
|
var shownIssues int
|
2015-08-25 11:22:05 -04:00
|
|
|
if !isShowClosed {
|
2021-12-29 08:02:12 -05:00
|
|
|
shownIssues = int(issueStats.OpenCount)
|
2015-08-25 11:22:05 -04:00
|
|
|
} else {
|
2021-12-29 08:02:12 -05:00
|
|
|
shownIssues = int(issueStats.ClosedCount)
|
2015-08-25 11:22:05 -04:00
|
|
|
}
|
|
|
|
|
2021-01-12 23:19:17 -05:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
|
|
|
|
2022-01-20 12:46:10 -05:00
|
|
|
ctx.Data["IssueRefEndNames"], ctx.Data["IssueRefURLs"] = issue_service.GetRefEndNamesAndURLs(issues, ctx.FormString("RepoLink"))
|
2020-05-14 18:55:43 -04:00
|
|
|
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
if err := issues.LoadAttributes(ctx); err != nil {
|
|
|
|
ctx.ServerError("issues.LoadAttributes", err)
|
|
|
|
return
|
|
|
|
}
|
2015-08-25 10:58:34 -04:00
|
|
|
ctx.Data["Issues"] = issues
|
2021-01-12 23:19:17 -05:00
|
|
|
|
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 02:28:53 -04:00
|
|
|
approvalCounts, err := issues.GetApprovalCounts(ctx)
|
2021-01-12 23:19:17 -05:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ApprovalCounts", err)
|
|
|
|
return
|
|
|
|
}
|
2020-03-05 22:44:06 -05:00
|
|
|
ctx.Data["ApprovalCounts"] = func(issueID int64, typ string) int64 {
|
|
|
|
counts, ok := approvalCounts[issueID]
|
|
|
|
if !ok || len(counts) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
2022-06-13 05:37:59 -04:00
|
|
|
reviewTyp := issues_model.ReviewTypeApprove
|
2020-03-05 22:44:06 -05:00
|
|
|
if typ == "reject" {
|
2022-06-13 05:37:59 -04:00
|
|
|
reviewTyp = issues_model.ReviewTypeReject
|
2020-04-06 12:33:34 -04:00
|
|
|
} else if typ == "waiting" {
|
2022-06-13 05:37:59 -04:00
|
|
|
reviewTyp = issues_model.ReviewTypeRequest
|
2020-03-05 22:44:06 -05:00
|
|
|
}
|
|
|
|
for _, count := range counts {
|
|
|
|
if count.Type == reviewTyp {
|
|
|
|
return count.Count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
2022-04-26 18:40:01 -04:00
|
|
|
ctx.Data["CommitLastStatus"] = lastStatus
|
|
|
|
ctx.Data["CommitStatuses"] = commitStatuses
|
2021-12-29 08:02:12 -05:00
|
|
|
ctx.Data["IssueStats"] = issueStats
|
2015-08-25 10:58:34 -04:00
|
|
|
ctx.Data["ViewType"] = viewType
|
2015-11-04 12:50:02 -05:00
|
|
|
ctx.Data["SortType"] = sortType
|
2015-08-25 10:58:34 -04:00
|
|
|
ctx.Data["IsShowClosed"] = isShowClosed
|
2021-01-12 23:19:17 -05:00
|
|
|
ctx.Data["SelectLabels"] = selectedLabels
|
2024-04-04 08:27:11 -04:00
|
|
|
ctx.Data["PageIsOrgIssues"] = org != nil
|
2024-06-17 14:58:24 -04:00
|
|
|
ctx.Data["IsFuzzy"] = isFuzzy
|
2017-02-14 09:15:18 -05:00
|
|
|
|
2015-08-25 10:58:34 -04:00
|
|
|
if isShowClosed {
|
|
|
|
ctx.Data["State"] = "closed"
|
|
|
|
} else {
|
|
|
|
ctx.Data["State"] = "open"
|
|
|
|
}
|
|
|
|
|
2019-12-01 22:50:36 -05:00
|
|
|
pager := context.NewPagination(shownIssues, setting.UI.IssuePagingNum, page, 5)
|
2020-02-29 01:52:05 -05:00
|
|
|
pager.AddParam(ctx, "q", "Keyword")
|
2019-04-20 00:15:19 -04:00
|
|
|
pager.AddParam(ctx, "type", "ViewType")
|
|
|
|
pager.AddParam(ctx, "sort", "SortType")
|
|
|
|
pager.AddParam(ctx, "state", "State")
|
|
|
|
pager.AddParam(ctx, "labels", "SelectLabels")
|
|
|
|
pager.AddParam(ctx, "milestone", "MilestoneID")
|
|
|
|
pager.AddParam(ctx, "assignee", "AssigneeID")
|
2024-06-17 14:58:24 -04:00
|
|
|
pager.AddParam(ctx, "fuzzy", "IsFuzzy")
|
2019-04-20 00:15:19 -04:00
|
|
|
ctx.Data["Page"] = pager
|
|
|
|
|
2021-04-05 11:30:52 -04:00
|
|
|
ctx.HTML(http.StatusOK, tplIssues)
|
2015-08-25 10:58:34 -04:00
|
|
|
}
|
|
|
|
|
2016-11-27 06:59:12 -05:00
|
|
|
// ShowSSHKeys output all the ssh keys of user by uid
|
2022-03-26 05:04:22 -04:00
|
|
|
func ShowSSHKeys(ctx *context.Context) {
|
2023-11-23 22:49:41 -05:00
|
|
|
keys, err := db.Find[asymkey_model.PublicKey](ctx, asymkey_model.FindPublicKeyOptions{
|
|
|
|
OwnerID: ctx.ContextUser.ID,
|
|
|
|
})
|
2014-11-23 02:33:47 -05:00
|
|
|
if err != nil {
|
2018-01-10 16:34:17 -05:00
|
|
|
ctx.ServerError("ListPublicKeys", err)
|
2014-11-23 02:33:47 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
for i := range keys {
|
|
|
|
buf.WriteString(keys[i].OmitEmail())
|
2015-06-08 03:40:38 -04:00
|
|
|
buf.WriteString("\n")
|
2014-11-23 02:33:47 -05:00
|
|
|
}
|
2021-12-15 01:59:57 -05:00
|
|
|
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
|
2014-11-23 02:33:47 -05:00
|
|
|
}
|
|
|
|
|
2019-04-14 12:43:56 -04:00
|
|
|
// ShowGPGKeys output all the public GPG keys of user by uid
|
2022-03-26 05:04:22 -04:00
|
|
|
func ShowGPGKeys(ctx *context.Context) {
|
2024-01-14 21:19:25 -05:00
|
|
|
keys, err := db.Find[asymkey_model.GPGKey](ctx, asymkey_model.FindGPGKeyOptions{
|
|
|
|
ListOptions: db.ListOptionsAll,
|
|
|
|
OwnerID: ctx.ContextUser.ID,
|
|
|
|
})
|
2019-04-14 12:43:56 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ListGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
2022-03-02 11:32:18 -05:00
|
|
|
|
2019-04-14 12:43:56 -04:00
|
|
|
entities := make([]*openpgp.Entity, 0)
|
|
|
|
failedEntitiesID := make([]string, 0)
|
|
|
|
for _, k := range keys {
|
2023-10-14 04:37:24 -04:00
|
|
|
e, err := asymkey_model.GPGKeyToEntity(ctx, k)
|
2019-04-14 12:43:56 -04:00
|
|
|
if err != nil {
|
2021-12-10 03:14:24 -05:00
|
|
|
if asymkey_model.IsErrGPGKeyImportNotExist(err) {
|
2019-04-14 12:43:56 -04:00
|
|
|
failedEntitiesID = append(failedEntitiesID, k.KeyID)
|
2022-01-20 12:46:10 -05:00
|
|
|
continue // Skip previous import without backup of imported armored key
|
2019-04-14 12:43:56 -04:00
|
|
|
}
|
|
|
|
ctx.ServerError("ShowGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
entities = append(entities, e)
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
headers := make(map[string]string)
|
2022-01-20 12:46:10 -05:00
|
|
|
if len(failedEntitiesID) > 0 { // If some key need re-import to be exported
|
2019-04-14 12:43:56 -04:00
|
|
|
headers["Note"] = fmt.Sprintf("The keys with the following IDs couldn't be exported and need to be reuploaded %s", strings.Join(failedEntitiesID, ", "))
|
2022-03-02 11:32:18 -05:00
|
|
|
} else if len(entities) == 0 {
|
|
|
|
headers["Note"] = "This user hasn't uploaded any GPG keys."
|
2019-04-14 12:43:56 -04:00
|
|
|
}
|
|
|
|
writer, _ := armor.Encode(&buf, "PGP PUBLIC KEY BLOCK", headers)
|
|
|
|
for _, e := range entities {
|
2022-01-20 12:46:10 -05:00
|
|
|
err = e.Serialize(writer) // TODO find why key are exported with a different cipherTypeByte as original (should not be blocking but strange)
|
2019-04-14 12:43:56 -04:00
|
|
|
if err != nil {
|
|
|
|
ctx.ServerError("ShowGPGKeys", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
writer.Close()
|
2021-12-15 01:59:57 -05:00
|
|
|
ctx.PlainTextBytes(http.StatusOK, buf.Bytes())
|
2019-04-14 12:43:56 -04:00
|
|
|
}
|
2023-04-27 02:06:45 -04:00
|
|
|
|
|
|
|
func UsernameSubRoute(ctx *context.Context) {
|
|
|
|
// WORKAROUND to support usernames with "." in it
|
|
|
|
// https://github.com/go-chi/chi/issues/781
|
|
|
|
username := ctx.Params("username")
|
|
|
|
reloadParam := func(suffix string) (success bool) {
|
|
|
|
ctx.SetParams("username", strings.TrimSuffix(username, suffix))
|
2024-02-27 02:12:22 -05:00
|
|
|
context.UserAssignmentWeb()(ctx)
|
2023-12-18 12:14:04 -05:00
|
|
|
if ctx.Written() {
|
|
|
|
return false
|
|
|
|
}
|
2023-11-13 17:30:24 -05:00
|
|
|
// check view permissions
|
|
|
|
if !user_model.IsUserVisibleToViewer(ctx, ctx.ContextUser, ctx.Doer) {
|
|
|
|
ctx.NotFound("user", fmt.Errorf(ctx.ContextUser.Name))
|
|
|
|
return false
|
|
|
|
}
|
2023-12-18 12:14:04 -05:00
|
|
|
return true
|
2023-04-27 02:06:45 -04:00
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(username, ".png"):
|
|
|
|
if reloadParam(".png") {
|
|
|
|
AvatarByUserName(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".keys"):
|
|
|
|
if reloadParam(".keys") {
|
|
|
|
ShowSSHKeys(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".gpg"):
|
|
|
|
if reloadParam(".gpg") {
|
|
|
|
ShowGPGKeys(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".rss"):
|
|
|
|
if !setting.Other.EnableFeed {
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if reloadParam(".rss") {
|
|
|
|
feed.ShowUserFeedRSS(ctx)
|
|
|
|
}
|
|
|
|
case strings.HasSuffix(username, ".atom"):
|
|
|
|
if !setting.Other.EnableFeed {
|
|
|
|
ctx.Error(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if reloadParam(".atom") {
|
|
|
|
feed.ShowUserFeedAtom(ctx)
|
|
|
|
}
|
|
|
|
default:
|
2024-02-27 02:12:22 -05:00
|
|
|
context.UserAssignmentWeb()(ctx)
|
2023-04-27 02:06:45 -04:00
|
|
|
if !ctx.Written() {
|
|
|
|
ctx.Data["EnableFeed"] = setting.Other.EnableFeed
|
2023-07-06 14:59:24 -04:00
|
|
|
OwnerProfile(ctx)
|
2023-04-27 02:06:45 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-08-22 22:29:17 -04:00
|
|
|
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 00:26:18 -05:00
|
|
|
func getUserIssueStats(ctx *context.Context, ctxUser *user_model.User, filterMode int, opts *issue_indexer.SearchOptions) (*issues_model.IssueStats, error) {
|
|
|
|
doerID := ctx.Doer.ID
|
|
|
|
|
2023-08-22 22:29:17 -04:00
|
|
|
opts = opts.Copy(func(o *issue_indexer.SearchOptions) {
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 00:26:18 -05:00
|
|
|
// If the doer is the same as the context user, which means the doer is viewing his own dashboard,
|
|
|
|
// it's not enough to show the repos that the doer owns or has been explicitly granted access to,
|
|
|
|
// because the doer may create issues or be mentioned in any public repo.
|
|
|
|
// So we need search issues in all public repos.
|
|
|
|
o.AllPublic = doerID == ctxUser.ID
|
2023-08-22 22:29:17 -04:00
|
|
|
o.AssigneeID = nil
|
|
|
|
o.PosterID = nil
|
|
|
|
o.MentionID = nil
|
|
|
|
o.ReviewRequestedID = nil
|
|
|
|
o.ReviewedID = nil
|
|
|
|
})
|
|
|
|
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
ret = &issues_model.IssueStats{}
|
|
|
|
)
|
|
|
|
|
|
|
|
{
|
|
|
|
openClosedOpts := opts.Copy()
|
|
|
|
switch filterMode {
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 00:26:18 -05:00
|
|
|
case issues_model.FilterModeAll:
|
|
|
|
// no-op
|
|
|
|
case issues_model.FilterModeYourRepositories:
|
|
|
|
openClosedOpts.AllPublic = false
|
2023-08-22 22:29:17 -04:00
|
|
|
case issues_model.FilterModeAssign:
|
2024-03-13 04:25:53 -04:00
|
|
|
openClosedOpts.AssigneeID = optional.Some(doerID)
|
2023-08-22 22:29:17 -04:00
|
|
|
case issues_model.FilterModeCreate:
|
2024-03-13 04:25:53 -04:00
|
|
|
openClosedOpts.PosterID = optional.Some(doerID)
|
2023-08-22 22:29:17 -04:00
|
|
|
case issues_model.FilterModeMention:
|
2024-03-13 04:25:53 -04:00
|
|
|
openClosedOpts.MentionID = optional.Some(doerID)
|
2023-08-22 22:29:17 -04:00
|
|
|
case issues_model.FilterModeReviewRequested:
|
2024-03-13 04:25:53 -04:00
|
|
|
openClosedOpts.ReviewRequestedID = optional.Some(doerID)
|
2023-08-22 22:29:17 -04:00
|
|
|
case issues_model.FilterModeReviewed:
|
2024-03-13 04:25:53 -04:00
|
|
|
openClosedOpts.ReviewedID = optional.Some(doerID)
|
2023-08-22 22:29:17 -04:00
|
|
|
}
|
2024-03-02 10:42:31 -05:00
|
|
|
openClosedOpts.IsClosed = optional.Some(false)
|
2023-08-22 22:29:17 -04:00
|
|
|
ret.OpenCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-02 10:42:31 -05:00
|
|
|
openClosedOpts.IsClosed = optional.Some(true)
|
2023-08-22 22:29:17 -04:00
|
|
|
ret.ClosedCount, err = issue_indexer.CountIssues(ctx, openClosedOpts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Include public repos in doer's dashboard for issue search (#28304)
It will fix #28268 .
<img width="1313" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/cb1e07d5-7a12-4691-a054-8278ba255bfc">
<img width="1318" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/4fd60820-97f1-4c2c-a233-d3671a5039e9">
## :warning: BREAKING :warning:
But need to give up some features:
<img width="1312" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/281c0d51-0e7d-473f-bbed-216e2f645610">
However, such abandonment may fix #28055 .
## Backgroud
When the user switches the dashboard context to an org, it means they
want to search issues in the repos that belong to the org. However, when
they switch to themselves, it means all repos they can access because
they may have created an issue in a public repo that they don't own.
<img width="286" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/182dcd5b-1c20-4725-93af-96e8dfae5b97">
It's a confusing design. Think about this: What does "In your
repositories" mean when the user switches to an org? Repos belong to the
user or the org?
Whatever, it has been broken by #26012 and its following PRs. After the
PR, it searches for issues in repos that the dashboard context user owns
or has been explicitly granted access to, so it causes #28268.
## How to fix it
It's not really difficult to fix it. Just extend the repo scope to
search issues when the dashboard context user is the doer. Since the
user may create issues or be mentioned in any public repo, we can just
set `AllPublic` to true, which is already supported by indexers. The DB
condition will also support it in this PR.
But the real difficulty is how to count the search results grouped by
repos. It's something like "search issues with this keyword and those
filters, and return the total number and the top results. **Then, group
all of them by repo and return the counts of each group.**"
<img width="314" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/5206eb20-f8f5-49b9-b45a-1be2fcf679f4">
Before #26012, it was being done in the DB, but it caused the results to
be incomplete (see the description of #26012).
And to keep this, #26012 implement it in an inefficient way, just count
the issues by repo one by one, so it cannot work when `AllPublic` is
true because it's almost impossible to do this for all public repos.
https://github.com/go-gitea/gitea/blob/1bfcdeef4cca0f5509476358e5931c13d37ed1ca/modules/indexer/issues/indexer.go#L318-L338
## Give up unnecessary features
We may can resovle `TODO: use "group by" of the indexer engines to
implement it`, I'm sure it can be done with Elasticsearch, but IIRC,
Bleve and Meilisearch don't support "group by".
And the real question is, does it worth it? Why should we need to know
the counts grouped by repos?
Let me show you my search dashboard on gitea.com.
<img width="1304" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/2bca2d46-6c71-4de1-94cb-0c9af27c62ff">
I never think the long repo list helps anything.
And if we agree to abandon it, things will be much easier. That is this
PR.
## TODO
I know it's important to filter by repos when searching issues. However,
it shouldn't be the way we have it now. It could be implemented like
this.
<img width="1316" alt="image"
src="https://github.com/go-gitea/gitea/assets/9418365/99ee5f21-cbb5-4dfe-914d-cb796cb79fbe">
The indexers support it well now, but it requires some frontend work,
which I'm not good at. So, I think someone could help do that in another
PR and merge this one to fix the bug first.
Or please block this PR and help to complete it.
Finally, "Switch dashboard context" is also a design that needs
improvement. In my opinion, it can be accomplished by adding filtering
conditions instead of "switching".
2023-12-07 00:26:18 -05:00
|
|
|
ret.YourRepositoriesCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AllPublic = false }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 04:25:53 -04:00
|
|
|
ret.AssignCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.AssigneeID = optional.Some(doerID) }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 04:25:53 -04:00
|
|
|
ret.CreateCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.PosterID = optional.Some(doerID) }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 04:25:53 -04:00
|
|
|
ret.MentionCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.MentionID = optional.Some(doerID) }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 04:25:53 -04:00
|
|
|
ret.ReviewRequestedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewRequestedID = optional.Some(doerID) }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 04:25:53 -04:00
|
|
|
ret.ReviewedCount, err = issue_indexer.CountIssues(ctx, opts.Copy(func(o *issue_indexer.SearchOptions) { o.ReviewedID = optional.Some(doerID) }))
|
2023-08-22 22:29:17 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return ret, nil
|
|
|
|
}
|