diff options
Diffstat (limited to 'models')
40 files changed, 101 insertions, 238 deletions
diff --git a/models/actions/status.go b/models/actions/status.go index eda2234137..2b1d70613c 100644 --- a/models/actions/status.go +++ b/models/actions/status.go @@ -4,6 +4,8 @@ package actions import ( + "slices" + "code.gitea.io/gitea/modules/translation" runnerv1 "code.gitea.io/actions-proto-go/runner/v1" @@ -88,12 +90,7 @@ func (s Status) IsBlocked() bool { // In returns whether s is one of the given statuses func (s Status) In(statuses ...Status) bool { - for _, v := range statuses { - if s == v { - return true - } - } - return false + return slices.Contains(statuses, s) } func (s Status) AsResult() runnerv1.Result { diff --git a/models/activities/action.go b/models/activities/action.go index 6f1837d9f6..1a0dfe6412 100644 --- a/models/activities/action.go +++ b/models/activities/action.go @@ -9,6 +9,7 @@ import ( "fmt" "net/url" "path" + "slices" "strconv" "strings" "time" @@ -125,12 +126,7 @@ func (at ActionType) String() string { } func (at ActionType) InActions(actions ...string) bool { - for _, action := range actions { - if action == at.String() { - return true - } - } - return false + return slices.Contains(actions, at.String()) } // Action represents user operation type and other information to @@ -191,7 +187,7 @@ func (a *Action) LoadActUser(ctx context.Context) { return } var err error - a.ActUser, err = user_model.GetUserByID(ctx, a.ActUserID) + a.ActUser, err = user_model.GetPossibleUserByID(ctx, a.ActUserID) if err == nil { return } else if user_model.IsErrUserNotExist(err) { diff --git a/models/activities/notification_list.go b/models/activities/notification_list.go index 0cbb91df3c..b47f5dc404 100644 --- a/models/activities/notification_list.go +++ b/models/activities/notification_list.go @@ -208,10 +208,7 @@ func (nl NotificationList) LoadRepos(ctx context.Context) (repo_model.Repository repos := make(map[int64]*repo_model.Repository, len(repoIDs)) left := len(repoIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", repoIDs[:limit]). Rows(new(repo_model.Repository)) @@ -282,10 +279,7 @@ func (nl NotificationList) LoadIssues(ctx context.Context) ([]int, error) { issues := make(map[int64]*issues_model.Issue, len(issueIDs)) left := len(issueIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", issueIDs[:limit]). Rows(new(issues_model.Issue)) @@ -377,10 +371,7 @@ func (nl NotificationList) LoadUsers(ctx context.Context) ([]int, error) { users := make(map[int64]*user_model.User, len(userIDs)) left := len(userIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", userIDs[:limit]). Rows(new(user_model.User)) @@ -428,10 +419,7 @@ func (nl NotificationList) LoadComments(ctx context.Context) ([]int, error) { comments := make(map[int64]*issues_model.Comment, len(commentIDs)) left := len(commentIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", commentIDs[:limit]). Rows(new(issues_model.Comment)) diff --git a/models/activities/repo_activity.go b/models/activities/repo_activity.go index 3ccdbd47d3..aeaa452c9e 100644 --- a/models/activities/repo_activity.go +++ b/models/activities/repo_activity.go @@ -139,10 +139,7 @@ func GetActivityStatsTopAuthors(ctx context.Context, repo *repo_model.Repository return v[i].Commits > v[j].Commits }) - cnt := count - if cnt > len(v) { - cnt = len(v) - } + cnt := min(count, len(v)) return v[:cnt], nil } diff --git a/models/auth/access_token_scope.go b/models/auth/access_token_scope.go index 2293fd89a0..3eae19b2a5 100644 --- a/models/auth/access_token_scope.go +++ b/models/auth/access_token_scope.go @@ -213,12 +213,7 @@ func GetRequiredScopes(level AccessTokenScopeLevel, scopeCategories ...AccessTok // ContainsCategory checks if a list of categories contains a specific category func ContainsCategory(categories []AccessTokenScopeCategory, category AccessTokenScopeCategory) bool { - for _, c := range categories { - if c == category { - return true - } - } - return false + return slices.Contains(categories, category) } // GetScopeLevelFromAccessMode converts permission access mode to scope level diff --git a/models/auth/oauth2.go b/models/auth/oauth2.go index c270e4856e..c2b6690116 100644 --- a/models/auth/oauth2.go +++ b/models/auth/oauth2.go @@ -12,6 +12,7 @@ import ( "fmt" "net" "net/url" + "slices" "strings" "code.gitea.io/gitea/models/db" @@ -511,12 +512,7 @@ func (grant *OAuth2Grant) IncreaseCounter(ctx context.Context) error { // ScopeContains returns true if the grant scope contains the specified scope func (grant *OAuth2Grant) ScopeContains(scope string) bool { - for _, currentScope := range strings.Split(grant.Scope, " ") { - if scope == currentScope { - return true - } - } - return false + return slices.Contains(strings.Split(grant.Scope, " "), scope) } // SetNonce updates the current nonce value of a grant diff --git a/models/db/context.go b/models/db/context.go index 4b98796ef0..05d7d72daa 100644 --- a/models/db/context.go +++ b/models/db/context.go @@ -67,7 +67,7 @@ func contextSafetyCheck(e Engine) { _ = e.SQL("SELECT 1").Iterate(&m{}, func(int, any) error { callers := make([]uintptr, 32) callerNum := runtime.Callers(1, callers) - for i := 0; i < callerNum; i++ { + for i := range callerNum { if funcName := runtime.FuncForPC(callers[i]).Name(); funcName == "xorm.io/xorm.(*Session).Iterate" { contextSafetyDeniedFuncPCs = append(contextSafetyDeniedFuncPCs, callers[i]) } @@ -82,7 +82,7 @@ func contextSafetyCheck(e Engine) { // it should be very fast: xxxx ns/op callers := make([]uintptr, 32) callerNum := runtime.Callers(3, callers) // skip 3: runtime.Callers, contextSafetyCheck, GetEngine - for i := 0; i < callerNum; i++ { + for i := range callerNum { if slices.Contains(contextSafetyDeniedFuncPCs, callers[i]) { panic(errors.New("using database context in an iterator would cause corrupted results")) } diff --git a/models/db/name.go b/models/db/name.go index 0e11c78372..48c7fdbce5 100644 --- a/models/db/name.go +++ b/models/db/name.go @@ -5,6 +5,7 @@ package db import ( "fmt" + "slices" "strings" "unicode/utf8" @@ -80,10 +81,8 @@ func IsUsableName(reservedNames, reservedPatterns []string, name string) error { return util.NewInvalidArgumentErrorf("name is empty") } - for i := range reservedNames { - if name == reservedNames[i] { - return ErrNameReserved{name} - } + if slices.Contains(reservedNames, name) { + return ErrNameReserved{name} } for _, pat := range reservedPatterns { diff --git a/models/dbfs/dbfile.go b/models/dbfs/dbfile.go index dd27b5c36b..eaf506fbe6 100644 --- a/models/dbfs/dbfile.go +++ b/models/dbfs/dbfile.go @@ -46,10 +46,7 @@ func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err er blobPos := int(offset % f.blockSize) blobOffset := offset - int64(blobPos) blobRemaining := int(f.blockSize) - blobPos - needRead := len(p) - if needRead > blobRemaining { - needRead = blobRemaining - } + needRead := min(len(p), blobRemaining) if blobOffset+int64(blobPos)+int64(needRead) > fileMeta.FileSize { needRead = int(fileMeta.FileSize - blobOffset - int64(blobPos)) } @@ -66,14 +63,8 @@ func (f *file) readAt(fileMeta *dbfsMeta, offset int64, p []byte) (n int, err er blobData = nil } - canCopy := len(blobData) - blobPos - if canCopy <= 0 { - canCopy = 0 - } - realRead := needRead - if realRead > canCopy { - realRead = canCopy - } + canCopy := max(len(blobData)-blobPos, 0) + realRead := min(needRead, canCopy) if realRead > 0 { copy(p[:realRead], fileData.BlobData[blobPos:blobPos+realRead]) } @@ -113,10 +104,7 @@ func (f *file) Write(p []byte) (n int, err error) { blobPos := int(f.offset % f.blockSize) blobOffset := f.offset - int64(blobPos) blobRemaining := int(f.blockSize) - blobPos - needWrite := len(p) - if needWrite > blobRemaining { - needWrite = blobRemaining - } + needWrite := min(len(p), blobRemaining) buf := make([]byte, f.blockSize) readBytes, err := f.readAt(fileMeta, blobOffset, buf) if err != nil && !errors.Is(err, io.EOF) { diff --git a/models/git/protected_branch.go b/models/git/protected_branch.go index a3caed73c4..19b02ccab9 100644 --- a/models/git/protected_branch.go +++ b/models/git/protected_branch.go @@ -246,7 +246,7 @@ func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob { func getFilePatterns(filePatterns string) []glob.Glob { extarr := make([]glob.Glob, 0, 10) - for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") { + for expr := range strings.SplitSeq(strings.ToLower(filePatterns), ";") { expr = strings.TrimSpace(expr) if expr != "" { if g, err := glob.Compile(expr, '.', '/'); err != nil { diff --git a/models/issues/comment.go b/models/issues/comment.go index ab9b2042f3..9bef96d0dd 100644 --- a/models/issues/comment.go +++ b/models/issues/comment.go @@ -9,6 +9,7 @@ import ( "context" "fmt" "html/template" + "slices" "strconv" "unicode/utf8" @@ -196,12 +197,7 @@ func (t CommentType) HasMailReplySupport() bool { } func (t CommentType) CountedAsConversation() bool { - for _, ct := range ConversationCountedCommentType() { - if t == ct { - return true - } - } - return false + return slices.Contains(ConversationCountedCommentType(), t) } // ConversationCountedCommentType returns the comment types that are counted as a conversation @@ -614,7 +610,7 @@ func UpdateCommentAttachments(ctx context.Context, c *Comment, uuids []string) e if err != nil { return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err) } - for i := 0; i < len(attachments); i++ { + for i := range attachments { attachments[i].IssueID = c.IssueID attachments[i].CommentID = c.ID if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil { diff --git a/models/issues/comment_list.go b/models/issues/comment_list.go index c483ada75a..f6c485449f 100644 --- a/models/issues/comment_list.go +++ b/models/issues/comment_list.go @@ -57,10 +57,7 @@ func (comments CommentList) loadLabels(ctx context.Context) error { commentLabels := make(map[int64]*Label, len(labelIDs)) left := len(labelIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", labelIDs[:limit]). Rows(new(Label)) @@ -107,10 +104,7 @@ func (comments CommentList) loadMilestones(ctx context.Context) error { milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs)) left := len(milestoneIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) err := db.GetEngine(ctx). In("id", milestoneIDs[:limit]). Find(&milestoneMaps) @@ -146,10 +140,7 @@ func (comments CommentList) loadOldMilestones(ctx context.Context) error { milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs)) left := len(milestoneIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) err := db.GetEngine(ctx). In("id", milestoneIDs[:limit]). Find(&milestoneMaps) @@ -184,10 +175,7 @@ func (comments CommentList) loadAssignees(ctx context.Context) error { assignees := make(map[int64]*user_model.User, len(assigneeIDs)) left := len(assigneeIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", assigneeIDs[:limit]). Rows(new(user_model.User)) @@ -256,10 +244,7 @@ func (comments CommentList) LoadIssues(ctx context.Context) error { issues := make(map[int64]*Issue, len(issueIDs)) left := len(issueIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("id", issueIDs[:limit]). Rows(new(Issue)) @@ -313,10 +298,7 @@ func (comments CommentList) loadDependentIssues(ctx context.Context) error { issues := make(map[int64]*Issue, len(issueIDs)) left := len(issueIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := e. In("id", issueIDs[:limit]). Rows(new(Issue)) @@ -392,10 +374,7 @@ func (comments CommentList) LoadAttachments(ctx context.Context) (err error) { commentsIDs := comments.getAttachmentCommentIDs() left := len(commentsIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("comment_id", commentsIDs[:limit]). Rows(new(repo_model.Attachment)) diff --git a/models/issues/issue_list.go b/models/issues/issue_list.go index 6c74b533b3..26b93189b8 100644 --- a/models/issues/issue_list.go +++ b/models/issues/issue_list.go @@ -42,10 +42,7 @@ func (issues IssueList) LoadRepositories(ctx context.Context) (repo_model.Reposi repoMaps := make(map[int64]*repo_model.Repository, len(repoIDs)) left := len(repoIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) err := db.GetEngine(ctx). In("id", repoIDs[:limit]). Find(&repoMaps) @@ -116,10 +113,7 @@ func (issues IssueList) LoadLabels(ctx context.Context) error { issueIDs := issues.getIssueIDs() left := len(issueIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx).Table("label"). Join("LEFT", "issue_label", "issue_label.label_id = label.id"). In("issue_label.issue_id", issueIDs[:limit]). @@ -171,10 +165,7 @@ func (issues IssueList) LoadMilestones(ctx context.Context) error { milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs)) left := len(milestoneIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) err := db.GetEngine(ctx). In("id", milestoneIDs[:limit]). Find(&milestoneMaps) @@ -203,10 +194,7 @@ func (issues IssueList) LoadProjects(ctx context.Context) error { } for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) projects := make([]*projectWithIssueID, 0, limit) err := db.GetEngine(ctx). @@ -245,10 +233,7 @@ func (issues IssueList) LoadAssignees(ctx context.Context) error { issueIDs := issues.getIssueIDs() left := len(issueIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx).Table("issue_assignees"). Join("INNER", "`user`", "`user`.id = `issue_assignees`.assignee_id"). In("`issue_assignees`.issue_id", issueIDs[:limit]).OrderBy(user_model.GetOrderByName()). @@ -306,10 +291,7 @@ func (issues IssueList) LoadPullRequests(ctx context.Context) error { pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs)) left := len(issuesIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("issue_id", issuesIDs[:limit]). Rows(new(PullRequest)) @@ -354,10 +336,7 @@ func (issues IssueList) LoadAttachments(ctx context.Context) (err error) { issuesIDs := issues.getIssueIDs() left := len(issuesIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx). In("issue_id", issuesIDs[:limit]). Rows(new(repo_model.Attachment)) @@ -399,10 +378,7 @@ func (issues IssueList) loadComments(ctx context.Context, cond builder.Cond) (er issuesIDs := issues.getIssueIDs() left := len(issuesIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) rows, err := db.GetEngine(ctx).Table("comment"). Join("INNER", "issue", "issue.id = comment.issue_id"). In("issue.id", issuesIDs[:limit]). @@ -466,10 +442,7 @@ func (issues IssueList) loadTotalTrackedTimes(ctx context.Context) (err error) { left := len(ids) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) // select issue_id, sum(time) from tracked_time where issue_id in (<issue ids in current page>) group by issue_id rows, err := db.GetEngine(ctx).Table("tracked_time"). diff --git a/models/issues/issue_search.go b/models/issues/issue_search.go index 16f808acd1..84d5948640 100644 --- a/models/issues/issue_search.go +++ b/models/issues/issue_search.go @@ -73,8 +73,8 @@ func (o *IssuesOptions) Copy(edit ...func(options *IssuesOptions)) *IssuesOption // sortType string func applySorts(sess *xorm.Session, sortType string, priorityRepoID int64) { // Since this sortType is dynamically created, it has to be treated specially. - if strings.HasPrefix(sortType, ScopeSortPrefix) { - scope := strings.TrimPrefix(sortType, ScopeSortPrefix) + if after, ok := strings.CutPrefix(sortType, ScopeSortPrefix); ok { + scope := after sess.Join("LEFT", "issue_label", "issue.id = issue_label.issue_id") // "exclusive_order=0" means "no order is set", so exclude it from the JOIN criteria and then "LEFT JOIN" result is also null sess.Join("LEFT", "label", "label.id = issue_label.label_id AND label.exclusive_order <> 0 AND label.name LIKE ?", scope+"/%") diff --git a/models/issues/issue_stats.go b/models/issues/issue_stats.go index 50409fbbd8..adedaa3d3a 100644 --- a/models/issues/issue_stats.go +++ b/models/issues/issue_stats.go @@ -94,10 +94,7 @@ func GetIssueStats(ctx context.Context, opts *IssuesOptions) (*IssueStats, error // ids in a temporary table and join from them. accum := &IssueStats{} for i := 0; i < len(opts.IssueIDs); { - chunk := i + MaxQueryParameters - if chunk > len(opts.IssueIDs) { - chunk = len(opts.IssueIDs) - } + chunk := min(i+MaxQueryParameters, len(opts.IssueIDs)) stats, err := getIssueStatsChunk(ctx, opts, opts.IssueIDs[i:chunk]) if err != nil { return nil, err diff --git a/models/issues/issue_test.go b/models/issues/issue_test.go index 18571e3aaa..1c5db55bbc 100644 --- a/models/issues/issue_test.go +++ b/models/issues/issue_test.go @@ -5,6 +5,7 @@ package issues_test import ( "fmt" + "slices" "sort" "sync" "testing" @@ -270,7 +271,7 @@ func TestIssue_ResolveMentions(t *testing.T) { for i, user := range resolved { ids[i] = user.ID } - sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + slices.Sort(ids) assert.Equal(t, expected, ids) } @@ -292,7 +293,7 @@ func TestResourceIndex(t *testing.T) { assert.NoError(t, unittest.PrepareTestDatabase()) var wg sync.WaitGroup - for i := 0; i < 100; i++ { + for i := range 100 { wg.Add(1) go func(i int) { testInsertIssue(t, fmt.Sprintf("issue %d", i+1), "my issue", 0) @@ -314,7 +315,7 @@ func TestCorrectIssueStats(t *testing.T) { issueAmount := issues_model.MaxQueryParameters + 10 var wg sync.WaitGroup - for i := 0; i < issueAmount; i++ { + for i := range issueAmount { wg.Add(1) go func(i int) { testInsertIssue(t, fmt.Sprintf("Issue %d", i+1), "Bugs are nasty", 0) diff --git a/models/issues/issue_update.go b/models/issues/issue_update.go index 2466fcf30f..9b99787e3b 100644 --- a/models/issues/issue_update.go +++ b/models/issues/issue_update.go @@ -304,7 +304,7 @@ func UpdateIssueAttachments(ctx context.Context, issueID int64, uuids []string) if err != nil { return fmt.Errorf("getAttachmentsByUUIDs [uuids: %v]: %w", uuids, err) } - for i := 0; i < len(attachments); i++ { + for i := range attachments { attachments[i].IssueID = issueID if err := repo_model.UpdateAttachment(ctx, attachments[i]); err != nil { return fmt.Errorf("update attachment [id: %d]: %w", attachments[i].ID, err) diff --git a/models/issues/review_list.go b/models/issues/review_list.go index 928f24fb2d..bbb8c489fa 100644 --- a/models/issues/review_list.go +++ b/models/issues/review_list.go @@ -22,7 +22,7 @@ type ReviewList []*Review // LoadReviewers loads reviewers func (reviews ReviewList) LoadReviewers(ctx context.Context) error { reviewerIDs := make([]int64, len(reviews)) - for i := 0; i < len(reviews); i++ { + for i := range reviews { reviewerIDs[i] = reviews[i].ReviewerID } reviewers, err := user_model.GetPossibleUserByIDs(ctx, reviewerIDs) diff --git a/models/issues/tracked_time.go b/models/issues/tracked_time.go index ea404d36cd..2afbe272ed 100644 --- a/models/issues/tracked_time.go +++ b/models/issues/tracked_time.go @@ -350,10 +350,7 @@ func GetIssueTotalTrackedTime(ctx context.Context, opts *IssuesOptions, isClosed // we get the statistics in smaller chunks and get accumulates var accum int64 for i := 0; i < len(opts.IssueIDs); { - chunk := i + MaxQueryParameters - if chunk > len(opts.IssueIDs) { - chunk = len(opts.IssueIDs) - } + chunk := min(i+MaxQueryParameters, len(opts.IssueIDs)) time, err := getIssueTotalTrackedTimeChunk(ctx, opts, isClosed, opts.IssueIDs[i:chunk]) if err != nil { return 0, err diff --git a/models/migrations/base/db.go b/models/migrations/base/db.go index 4ecc930f10..479a46379c 100644 --- a/models/migrations/base/db.go +++ b/models/migrations/base/db.go @@ -518,7 +518,7 @@ func ModifyColumn(x *xorm.Engine, tableName string, col *schemas.Column) error { func removeAllWithRetry(dir string) error { var err error - for i := 0; i < 20; i++ { + for range 20 { err = os.RemoveAll(dir) if err == nil { break diff --git a/models/migrations/v1_11/v111.go b/models/migrations/v1_11/v111.go index ff108479a9..1c8527b2aa 100644 --- a/models/migrations/v1_11/v111.go +++ b/models/migrations/v1_11/v111.go @@ -5,6 +5,7 @@ package v1_11 //nolint import ( "fmt" + "slices" "xorm.io/xorm" ) @@ -344,10 +345,8 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error { } return AccessModeWrite <= perm.UnitsMode[UnitTypeCode], nil } - for _, id := range protectedBranch.ApprovalsWhitelistUserIDs { - if id == reviewer.ID { - return true, nil - } + if slices.Contains(protectedBranch.ApprovalsWhitelistUserIDs, reviewer.ID) { + return true, nil } // isUserInTeams diff --git a/models/migrations/v1_11/v115.go b/models/migrations/v1_11/v115.go index 8c631cfd0b..c44c6d88e4 100644 --- a/models/migrations/v1_11/v115.go +++ b/models/migrations/v1_11/v115.go @@ -146,7 +146,7 @@ func copyOldAvatarToNewLocation(userID int64, oldAvatar string) (string, error) return "", fmt.Errorf("io.ReadAll: %w", err) } - newAvatar := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%d-%x", userID, md5.Sum(data))))) + newAvatar := fmt.Sprintf("%x", md5.Sum(fmt.Appendf(nil, "%d-%x", userID, md5.Sum(data)))) if newAvatar == oldAvatar { return newAvatar, nil } diff --git a/models/migrations/v1_20/v259.go b/models/migrations/v1_20/v259.go index 5b8ced4ad7..0fdeb45957 100644 --- a/models/migrations/v1_20/v259.go +++ b/models/migrations/v1_20/v259.go @@ -329,7 +329,7 @@ func ConvertScopedAccessTokens(x *xorm.Engine) error { for _, token := range tokens { var scopes []string allNewScopesMap := make(map[AccessTokenScope]bool) - for _, oldScope := range strings.Split(token.Scope, ",") { + for oldScope := range strings.SplitSeq(token.Scope, ",") { if newScopes, exists := accessTokenScopeMap[OldAccessTokenScope(oldScope)]; exists { for _, newScope := range newScopes { allNewScopesMap[newScope] = true diff --git a/models/migrations/v1_22/v294_test.go b/models/migrations/v1_22/v294_test.go index a1d702cb77..c3de332650 100644 --- a/models/migrations/v1_22/v294_test.go +++ b/models/migrations/v1_22/v294_test.go @@ -4,7 +4,6 @@ package v1_22 //nolint import ( - "slices" "testing" "code.gitea.io/gitea/models/migrations/base" @@ -44,7 +43,7 @@ func Test_AddUniqueIndexForProjectIssue(t *testing.T) { for _, index := range tables[0].Indexes { if index.Type == schemas.UniqueType { found = true - slices.Equal(index.Cols, []string{"project_id", "issue_id"}) + assert.ElementsMatch(t, index.Cols, []string{"project_id", "issue_id"}) break } } diff --git a/models/packages/container/const.go b/models/packages/container/const.go deleted file mode 100644 index 6c7c9b46d1..0000000000 --- a/models/packages/container/const.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Gitea Authors. All rights reserved. -// SPDX-License-Identifier: MIT - -package container - -const ( - ContentTypeDockerDistributionManifestV2 = "application/vnd.docker.distribution.manifest.v2+json" - - ManifestFilename = "manifest.json" - UploadVersion = "_upload" -) diff --git a/models/packages/container/search.go b/models/packages/container/search.go index a513da08b9..9321d9eb41 100644 --- a/models/packages/container/search.go +++ b/models/packages/container/search.go @@ -44,7 +44,7 @@ func (opts *BlobSearchOptions) toConds() builder.Cond { cond = cond.And(builder.Eq{"package_version.lower_version": strings.ToLower(opts.Tag)}) } if opts.IsManifest { - cond = cond.And(builder.Eq{"package_file.lower_name": ManifestFilename}) + cond = cond.And(builder.Eq{"package_file.lower_name": container_module.ManifestFilename}) } if opts.OnlyLead { cond = cond.And(builder.Eq{"package_file.is_lead": true}) @@ -235,7 +235,7 @@ func SearchImageTags(ctx context.Context, opts *ImageTagsSearchOptions) ([]*pack func SearchExpiredUploadedBlobs(ctx context.Context, olderThan time.Duration) ([]*packages.PackageFile, error) { var cond builder.Cond = builder.Eq{ "package_version.is_internal": true, - "package_version.lower_version": UploadVersion, + "package_version.lower_version": container_module.UploadVersion, "package.type": packages.TypeContainer, } cond = cond.And(builder.Lt{"package_file.created_unix": time.Now().Add(-olderThan).Unix()}) diff --git a/models/packages/descriptor.go b/models/packages/descriptor.go index 1ea181c723..2d43dc3046 100644 --- a/models/packages/descriptor.go +++ b/models/packages/descriptor.go @@ -103,10 +103,10 @@ func (pd *PackageDescriptor) CalculateBlobSize() int64 { // GetPackageDescriptor gets the package description for a version func GetPackageDescriptor(ctx context.Context, pv *PackageVersion) (*PackageDescriptor, error) { - return getPackageDescriptor(ctx, pv, cache.NewEphemeralCache()) + return GetPackageDescriptorWithCache(ctx, pv, cache.NewEphemeralCache()) } -func getPackageDescriptor(ctx context.Context, pv *PackageVersion, c *cache.EphemeralCache) (*PackageDescriptor, error) { +func GetPackageDescriptorWithCache(ctx context.Context, pv *PackageVersion, c *cache.EphemeralCache) (*PackageDescriptor, error) { p, err := cache.GetWithEphemeralCache(ctx, c, "package", pv.PackageID, GetPackageByID) if err != nil { return nil, err @@ -270,7 +270,7 @@ func GetPackageDescriptors(ctx context.Context, pvs []*PackageVersion) ([]*Packa func getPackageDescriptors(ctx context.Context, pvs []*PackageVersion, c *cache.EphemeralCache) ([]*PackageDescriptor, error) { pds := make([]*PackageDescriptor, 0, len(pvs)) for _, pv := range pvs { - pd, err := getPackageDescriptor(ctx, pv, c) + pd, err := GetPackageDescriptorWithCache(ctx, pv, c) if err != nil { return nil, err } diff --git a/models/packages/nuget/search.go b/models/packages/nuget/search.go index 7a505ff08f..a4b23f31d5 100644 --- a/models/packages/nuget/search.go +++ b/models/packages/nuget/search.go @@ -33,7 +33,7 @@ func SearchVersions(ctx context.Context, opts *packages_model.PackageSearchOptio Where(cond). OrderBy("package.name ASC") if opts.Paginator != nil { - skip, take := opts.GetSkipTake() + skip, take := opts.Paginator.GetSkipTake() inner = inner.Limit(take, skip) } diff --git a/models/packages/package_property.go b/models/packages/package_property.go index 10670951ad..7ddbfd97e9 100644 --- a/models/packages/package_property.go +++ b/models/packages/package_property.go @@ -92,8 +92,8 @@ func DeletePropertyByID(ctx context.Context, propertyID int64) error { return err } -// DeletePropertyByName deletes properties by name -func DeletePropertyByName(ctx context.Context, refType PropertyType, refID int64, name string) error { +// DeletePropertiesByName deletes properties by name +func DeletePropertiesByName(ctx context.Context, refType PropertyType, refID int64, name string) error { _, err := db.GetEngine(ctx).Where("ref_type = ? AND ref_id = ? AND name = ?", refType, refID, name).Delete(&PackageProperty{}) return err } diff --git a/models/packages/package_version.go b/models/packages/package_version.go index bb7fd895f8..5672e0efbf 100644 --- a/models/packages/package_version.go +++ b/models/packages/package_version.go @@ -14,6 +14,7 @@ import ( "code.gitea.io/gitea/modules/util" "xorm.io/builder" + "xorm.io/xorm" ) // ErrDuplicatePackageVersion indicates a duplicated package version error @@ -187,7 +188,7 @@ type PackageSearchOptions struct { HasFileWithName string // only results are found which are associated with a file with the specific name HasFiles optional.Option[bool] // only results are found which have associated files Sort VersionSort - db.Paginator + Paginator db.Paginator } func (opts *PackageSearchOptions) ToConds() builder.Cond { @@ -282,6 +283,18 @@ func (opts *PackageSearchOptions) configureOrderBy(e db.Engine) { e.Desc("package_version.id") // Sort by id for stable order with duplicates in the other field } +func searchVersionsBySession(sess *xorm.Session, opts *PackageSearchOptions) ([]*PackageVersion, int64, error) { + opts.configureOrderBy(sess) + pvs := make([]*PackageVersion, 0, 10) + if opts.Paginator != nil { + sess = db.SetSessionPagination(sess, opts.Paginator) + count, err := sess.FindAndCount(&pvs) + return pvs, count, err + } + err := sess.Find(&pvs) + return pvs, int64(len(pvs)), err +} + // SearchVersions gets all versions of packages matching the search options func SearchVersions(ctx context.Context, opts *PackageSearchOptions) ([]*PackageVersion, int64, error) { sess := db.GetEngine(ctx). @@ -289,16 +302,7 @@ func SearchVersions(ctx context.Context, opts *PackageSearchOptions) ([]*Package Table("package_version"). Join("INNER", "package", "package.id = package_version.package_id"). Where(opts.ToConds()) - - opts.configureOrderBy(sess) - - if opts.Paginator != nil { - sess = db.SetSessionPagination(sess, opts) - } - - pvs := make([]*PackageVersion, 0, 10) - count, err := sess.FindAndCount(&pvs) - return pvs, count, err + return searchVersionsBySession(sess, opts) } // SearchLatestVersions gets the latest version of every package matching the search options @@ -316,15 +320,7 @@ func SearchLatestVersions(ctx context.Context, opts *PackageSearchOptions) ([]*P Join("INNER", "package", "package.id = package_version.package_id"). Where(builder.In("package_version.id", in)) - opts.configureOrderBy(sess) - - if opts.Paginator != nil { - sess = db.SetSessionPagination(sess, opts) - } - - pvs := make([]*PackageVersion, 0, 10) - count, err := sess.FindAndCount(&pvs) - return pvs, count, err + return searchVersionsBySession(sess, opts) } // ExistVersion checks if a version matching the search options exist diff --git a/models/project/column_test.go b/models/project/column_test.go index 5b93e7760f..6a615090a5 100644 --- a/models/project/column_test.go +++ b/models/project/column_test.go @@ -110,7 +110,7 @@ func Test_NewColumn(t *testing.T) { assert.NoError(t, err) assert.Len(t, columns, 3) - for i := 0; i < maxProjectColumns-3; i++ { + for i := range maxProjectColumns - 3 { err := NewColumn(db.DefaultContext, &Column{ Title: fmt.Sprintf("column-%d", i+4), ProjectID: project1.ID, diff --git a/models/pull/review_state.go b/models/pull/review_state.go index e46a22a49d..137af00eab 100644 --- a/models/pull/review_state.go +++ b/models/pull/review_state.go @@ -6,6 +6,7 @@ package pull import ( "context" "fmt" + "maps" "code.gitea.io/gitea/models/db" "code.gitea.io/gitea/modules/log" @@ -100,9 +101,7 @@ func mergeFiles(oldFiles, newFiles map[string]ViewedState) map[string]ViewedStat return oldFiles } - for file, viewed := range newFiles { - oldFiles[file] = viewed - } + maps.Copy(oldFiles, newFiles) return oldFiles } diff --git a/models/repo/repo_list.go b/models/repo/repo_list.go index b2f722dbb3..f2cdd2f284 100644 --- a/models/repo/repo_list.go +++ b/models/repo/repo_list.go @@ -449,7 +449,7 @@ func SearchRepositoryCondition(opts SearchRepoOptions) builder.Cond { if opts.Keyword != "" { // separate keyword subQueryCond := builder.NewCond() - for _, v := range strings.Split(opts.Keyword, ",") { + for v := range strings.SplitSeq(opts.Keyword, ",") { if opts.TopicOnly { subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)}) } else { @@ -464,7 +464,7 @@ func SearchRepositoryCondition(opts SearchRepoOptions) builder.Cond { keywordCond := builder.In("id", subQuery) if !opts.TopicOnly { likes := builder.NewCond() - for _, v := range strings.Split(opts.Keyword, ",") { + for v := range strings.SplitSeq(opts.Keyword, ",") { likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)}) // If the string looks like "org/repo", match against that pattern too diff --git a/models/repo/repo_unit.go b/models/repo/repo_unit.go index 8a7dbfe340..a5207bc22a 100644 --- a/models/repo/repo_unit.go +++ b/models/repo/repo_unit.go @@ -185,10 +185,8 @@ func (cfg *ActionsConfig) IsWorkflowDisabled(file string) bool { } func (cfg *ActionsConfig) DisableWorkflow(file string) { - for _, workflow := range cfg.DisabledWorkflows { - if file == workflow { - return - } + if slices.Contains(cfg.DisabledWorkflows, file) { + return } cfg.DisabledWorkflows = append(cfg.DisabledWorkflows, file) diff --git a/models/repo/upload.go b/models/repo/upload.go index fb57fb6c51..20a8fa26fe 100644 --- a/models/repo/upload.go +++ b/models/repo/upload.go @@ -124,7 +124,7 @@ func DeleteUploads(ctx context.Context, uploads ...*Upload) (err error) { defer committer.Close() ids := make([]int64, len(uploads)) - for i := 0; i < len(uploads); i++ { + for i := range uploads { ids[i] = uploads[i].ID } if err = db.DeleteByIDs[Upload](ctx, ids...); err != nil { diff --git a/models/unit/unit.go b/models/unit/unit.go index 4ca676802f..c0560678ca 100644 --- a/models/unit/unit.go +++ b/models/unit/unit.go @@ -6,6 +6,7 @@ package unit import ( "errors" "fmt" + "slices" "strings" "sync/atomic" @@ -204,22 +205,12 @@ func LoadUnitConfig() error { // UnitGlobalDisabled checks if unit type is global disabled func (u Type) UnitGlobalDisabled() bool { - for _, ud := range DisabledRepoUnitsGet() { - if u == ud { - return true - } - } - return false + return slices.Contains(DisabledRepoUnitsGet(), u) } // CanBeDefault checks if the unit type can be a default repo unit func (u *Type) CanBeDefault() bool { - for _, nadU := range NotAllowedDefaultRepoUnits { - if *u == nadU { - return false - } - } - return true + return !slices.Contains(NotAllowedDefaultRepoUnits, *u) } // Unit is a section of one repository diff --git a/models/user/email_address_test.go b/models/user/email_address_test.go index 0e52950cfd..c0666246b0 100644 --- a/models/user/email_address_test.go +++ b/models/user/email_address_test.go @@ -4,6 +4,7 @@ package user_test import ( + "slices" "testing" "code.gitea.io/gitea/models/db" @@ -100,12 +101,7 @@ func TestListEmails(t *testing.T) { assert.Greater(t, count, int64(5)) contains := func(match func(s *user_model.SearchEmailResult) bool) bool { - for _, v := range emails { - if match(v) { - return true - } - } - return false + return slices.ContainsFunc(emails, match) } assert.True(t, contains(func(s *user_model.SearchEmailResult) bool { return s.UID == 18 })) diff --git a/models/user/user_list.go b/models/user/user_list.go index 4241905058..1b6a27dd86 100644 --- a/models/user/user_list.go +++ b/models/user/user_list.go @@ -17,10 +17,7 @@ func GetUsersMapByIDs(ctx context.Context, userIDs []int64) (map[int64]*User, er left := len(userIDs) for left > 0 { - limit := db.DefaultMaxInSize - if left < limit { - limit = left - } + limit := min(left, db.DefaultMaxInSize) err := db.GetEngine(ctx). In("id", userIDs[:limit]). Find(&userMaps) diff --git a/models/user/user_test.go b/models/user/user_test.go index 93b6e68a8d..a2597ba3f5 100644 --- a/models/user/user_test.go +++ b/models/user/user_test.go @@ -204,9 +204,9 @@ func TestHashPasswordDeterministic(t *testing.T) { b := make([]byte, 16) u := &user_model.User{} algos := hash.RecommendedHashAlgorithms - for j := 0; j < len(algos); j++ { + for j := range algos { u.PasswdHashAlgo = algos[j] - for i := 0; i < 50; i++ { + for range 50 { // generate a random password rand.Read(b) pass := string(b) diff --git a/models/webhook/webhook.go b/models/webhook/webhook.go index 97ad373027..b234d9ffee 100644 --- a/models/webhook/webhook.go +++ b/models/webhook/webhook.go @@ -240,7 +240,7 @@ func CreateWebhooks(ctx context.Context, ws []*Webhook) error { if len(ws) == 0 { return nil } - for i := 0; i < len(ws); i++ { + for i := range ws { ws[i].Type = strings.TrimSpace(ws[i].Type) } return db.Insert(ctx, ws) |