Noteable additions: - `redefines-builtin-id` forbid variable names that shadow go builtins - `empty-lines` remove unnecessary empty lines that `gofumpt` does not remove for some reason - `superfluous-else` eliminate more superfluous `else` branches Rules are also sorted alphabetically and I cleaned up various parts of `.golangci.yml`.tags/v1.22.0-rc1
@@ -1,13 +1,14 @@ | |||
linters: | |||
enable-all: false | |||
disable-all: true | |||
fast: false | |||
enable: | |||
- bidichk | |||
# - deadcode # deprecated - https://github.com/golangci/golangci-lint/issues/1841 | |||
- depguard | |||
- dupl | |||
- errcheck | |||
- forbidigo | |||
- gocritic | |||
# - gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. | |||
- gofmt | |||
- gofumpt | |||
- gosimple | |||
@@ -17,20 +18,18 @@ linters: | |||
- nolintlint | |||
- revive | |||
- staticcheck | |||
# - structcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841 | |||
- stylecheck | |||
- typecheck | |||
- unconvert | |||
- unused | |||
# - varcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841 | |||
- wastedassign | |||
enable-all: false | |||
disable-all: true | |||
fast: false | |||
run: | |||
timeout: 10m | |||
output: | |||
sort-results: true | |||
linters-settings: | |||
stylecheck: | |||
checks: ["all", "-ST1005", "-ST1003"] | |||
@@ -47,27 +46,37 @@ linters-settings: | |||
errorCode: 1 | |||
warningCode: 1 | |||
rules: | |||
- name: atomic | |||
- name: bare-return | |||
- name: blank-imports | |||
- name: constant-logical-expr | |||
- name: context-as-argument | |||
- name: context-keys-type | |||
- name: dot-imports | |||
- name: duplicated-imports | |||
- name: empty-lines | |||
- name: error-naming | |||
- name: error-return | |||
- name: error-strings | |||
- name: error-naming | |||
- name: errorf | |||
- name: exported | |||
- name: identical-branches | |||
- name: if-return | |||
- name: increment-decrement | |||
- name: var-naming | |||
- name: var-declaration | |||
- name: indent-error-flow | |||
- name: modifies-value-receiver | |||
- name: package-comments | |||
- name: range | |||
- name: receiver-naming | |||
- name: redefines-builtin-id | |||
- name: string-of-int | |||
- name: superfluous-else | |||
- name: time-naming | |||
- name: unconditional-recursion | |||
- name: unexported-return | |||
- name: indent-error-flow | |||
- name: errorf | |||
- name: duplicated-imports | |||
- name: modifies-value-receiver | |||
- name: unreachable-code | |||
- name: var-declaration | |||
- name: var-naming | |||
gofumpt: | |||
extra-rules: true | |||
depguard: | |||
@@ -93,8 +102,8 @@ issues: | |||
max-issues-per-linter: 0 | |||
max-same-issues: 0 | |||
exclude-dirs: [node_modules, public, web_src] | |||
exclude-case-sensitive: true | |||
exclude-rules: | |||
# Exclude some linters from running on tests files. | |||
- path: _test\.go | |||
linters: | |||
- gocyclo | |||
@@ -112,19 +121,19 @@ issues: | |||
- path: cmd | |||
linters: | |||
- forbidigo | |||
- linters: | |||
- text: "webhook" | |||
linters: | |||
- dupl | |||
text: "webhook" | |||
- linters: | |||
- text: "`ID' should not be capitalized" | |||
linters: | |||
- gocritic | |||
text: "`ID' should not be capitalized" | |||
- linters: | |||
- text: "swagger" | |||
linters: | |||
- unused | |||
- deadcode | |||
text: "swagger" | |||
- linters: | |||
- text: "argument x is overwritten before first use" | |||
linters: | |||
- staticcheck | |||
text: "argument x is overwritten before first use" | |||
- text: "commentFormatting: put a space between `//` and comment text" | |||
linters: | |||
- gocritic |
@@ -465,7 +465,7 @@ func hookPrintResult(output, isCreate bool, branch, url string) { | |||
fmt.Fprintf(os.Stderr, " %s\n", url) | |||
} | |||
fmt.Fprintln(os.Stderr, "") | |||
os.Stderr.Sync() | |||
_ = os.Stderr.Sync() | |||
} | |||
func pushOptions() map[string]string { |
@@ -110,7 +110,6 @@ func ParseCommitWithSignature(ctx context.Context, c *git.Commit) *CommitVerific | |||
Reason: "gpg.error.no_committer_account", | |||
} | |||
} | |||
} | |||
} | |||
@@ -227,7 +227,6 @@ func NamesToBean(names ...string) ([]any, error) { | |||
// Need to map provided names to beans... | |||
beanMap := make(map[string]any) | |||
for _, bean := range tables { | |||
beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean | |||
beanMap[strings.ToLower(x.TableName(bean))] = bean | |||
beanMap[strings.ToLower(x.TableName(bean, true))] = bean |
@@ -345,11 +345,9 @@ func CreateReview(ctx context.Context, opts CreateReviewOptions) (*Review, error | |||
return nil, err | |||
} | |||
} | |||
} else if opts.ReviewerTeam != nil { | |||
review.Type = ReviewTypeRequest | |||
review.ReviewerTeamID = opts.ReviewerTeam.ID | |||
} else { | |||
return nil, fmt.Errorf("provide either reviewer or reviewer team") | |||
} |
@@ -177,7 +177,6 @@ func RecreateTable(sess *xorm.Session, bean any) error { | |||
log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err) | |||
return err | |||
} | |||
case setting.Database.Type.IsMySQL(): | |||
// MySQL will drop all the constraints on the old table | |||
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { | |||
@@ -228,7 +227,6 @@ func RecreateTable(sess *xorm.Session, bean any) error { | |||
return err | |||
} | |||
sequenceMap[sequence] = sequenceData | |||
} | |||
// CASCADE causes postgres to drop all the constraints on the old table | |||
@@ -293,9 +291,7 @@ func RecreateTable(sess *xorm.Session, bean any) error { | |||
return err | |||
} | |||
} | |||
} | |||
case setting.Database.Type.IsMSSQL(): | |||
// MSSQL will drop all the constraints on the old table | |||
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil { | |||
@@ -308,7 +304,6 @@ func RecreateTable(sess *xorm.Session, bean any) error { | |||
log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err) | |||
return err | |||
} | |||
default: | |||
log.Fatal("Unrecognized DB") | |||
} |
@@ -262,7 +262,6 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error { | |||
for _, u := range units { | |||
var found bool | |||
for _, team := range teams { | |||
var teamU []*TeamUnit | |||
var unitEnabled bool | |||
err = sess.Where("team_id = ?", team.ID).Find(&teamU) | |||
@@ -331,7 +330,6 @@ func AddBranchProtectionCanPushAndEnableWhitelist(x *xorm.Engine) error { | |||
} | |||
if !protectedBranch.EnableApprovalsWhitelist { | |||
perm, err := getUserRepoPermission(sess, baseRepo, reviewer) | |||
if err != nil { | |||
return false, err |
@@ -104,7 +104,7 @@ func ChangeContainerMetadataMultiArch(x *xorm.Engine) error { | |||
// Convert to new metadata format | |||
new := &MetadataNew{ | |||
newMetadata := &MetadataNew{ | |||
Type: old.Type, | |||
IsTagged: old.IsTagged, | |||
Platform: old.Platform, | |||
@@ -119,7 +119,7 @@ func ChangeContainerMetadataMultiArch(x *xorm.Engine) error { | |||
Manifests: manifests, | |||
} | |||
metadataJSON, err := json.Marshal(new) | |||
metadataJSON, err := json.Marshal(newMetadata) | |||
if err != nil { | |||
return err | |||
} |
@@ -61,7 +61,6 @@ func AddScratchHash(x *xorm.Engine) error { | |||
if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil { | |||
return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err) | |||
} | |||
} | |||
} | |||
@@ -81,7 +81,6 @@ func HashAppToken(x *xorm.Engine) error { | |||
if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil { | |||
return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err) | |||
} | |||
} | |||
} | |||
@@ -226,9 +226,8 @@ func GetTeamIDsByNames(ctx context.Context, orgID int64, names []string, ignoreN | |||
if err != nil { | |||
if ignoreNonExistent { | |||
continue | |||
} else { | |||
return nil, err | |||
} | |||
return nil, err | |||
} | |||
ids = append(ids, u.ID) | |||
} |
@@ -110,13 +110,11 @@ func createBoardsForProjectsType(ctx context.Context, project *Project) error { | |||
var items []string | |||
switch project.BoardType { | |||
case BoardTypeBugTriage: | |||
items = setting.Project.ProjectBoardBugTriageType | |||
case BoardTypeBasicKanban: | |||
items = setting.Project.ProjectBoardBasicKanbanType | |||
case BoardTypeNone: | |||
fallthrough | |||
default: |
@@ -170,7 +170,6 @@ func GetReviewers(ctx context.Context, repo *Repository, doerID, posterID int64) | |||
// the owner of a private repo needs to be explicitly added. | |||
cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID}) | |||
} | |||
} else { | |||
// This is a "public" repository: | |||
// Any user that has read access, is a watcher or organization member can be requested to review |
@@ -988,9 +988,8 @@ func GetUserIDsByNames(ctx context.Context, names []string, ignoreNonExistent bo | |||
if err != nil { | |||
if ignoreNonExistent { | |||
continue | |||
} else { | |||
return nil, err | |||
} | |||
return nil, err | |||
} | |||
ids = append(ids, u.ID) | |||
} |
@@ -63,16 +63,16 @@ func NewComplexity() { | |||
func setupComplexity(values []string) { | |||
if len(values) != 1 || values[0] != "off" { | |||
for _, val := range values { | |||
if complex, ok := charComplexities[val]; ok { | |||
validChars += complex.ValidChars | |||
requiredList = append(requiredList, complex) | |||
if complexity, ok := charComplexities[val]; ok { | |||
validChars += complexity.ValidChars | |||
requiredList = append(requiredList, complexity) | |||
} | |||
} | |||
if len(requiredList) == 0 { | |||
// No valid character classes found; use all classes as default | |||
for _, complex := range charComplexities { | |||
validChars += complex.ValidChars | |||
requiredList = append(requiredList, complex) | |||
for _, complexity := range charComplexities { | |||
validChars += complexity.ValidChars | |||
requiredList = append(requiredList, complexity) | |||
} | |||
} | |||
} |
@@ -307,10 +307,10 @@ func ParseTreeLine(objectFormat ObjectFormat, rd *bufio.Reader, modeBuf, fnameBu | |||
// Deal with the binary hash | |||
idx = 0 | |||
len := objectFormat.FullLength() / 2 | |||
for idx < len { | |||
length := objectFormat.FullLength() / 2 | |||
for idx < length { | |||
var read int | |||
read, err = rd.Read(shaBuf[idx:len]) | |||
read, err = rd.Read(shaBuf[idx:length]) | |||
n += read | |||
if err != nil { | |||
return mode, fname, sha, n, err |
@@ -49,9 +49,8 @@ readLoop: | |||
if len(line) > 0 && line[0] == ' ' { | |||
_, _ = signatureSB.Write(line[1:]) | |||
continue | |||
} else { | |||
pgpsig = false | |||
} | |||
pgpsig = false | |||
} | |||
if !message { |
@@ -232,7 +232,6 @@ func FindLFSFile(repo *git.Repository, objectID git.ObjectID) ([]*LFSResult, err | |||
errChan <- err | |||
break | |||
} | |||
} | |||
}() | |||
@@ -251,18 +251,18 @@ func (repo *Repository) CommitsByFileAndRange(opts CommitsByFileAndRangeOptions) | |||
return nil, err | |||
} | |||
len := objectFormat.FullLength() | |||
length := objectFormat.FullLength() | |||
commits := []*Commit{} | |||
shaline := make([]byte, len+1) | |||
shaline := make([]byte, length+1) | |||
for { | |||
n, err := io.ReadFull(stdoutReader, shaline) | |||
if err != nil || n < len { | |||
if err != nil || n < length { | |||
if err == io.EOF { | |||
err = nil | |||
} | |||
return commits, err | |||
} | |||
objectID, err := NewIDFromString(string(shaline[0:len])) | |||
objectID, err := NewIDFromString(string(shaline[0:length])) | |||
if err != nil { | |||
return nil, err | |||
} |
@@ -64,7 +64,6 @@ func getRefURL(refURL, urlPrefix, repoFullName, sshDomain string) string { | |||
// ex: git@try.gitea.io:go-gitea/gitea | |||
match := scpSyntax.FindAllStringSubmatch(refURI, -1) | |||
if len(match) > 0 { | |||
m := match[0] | |||
refHostname := m[2] | |||
pth := m[3] |
@@ -191,7 +191,6 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository, batch | |||
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error { | |||
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize) | |||
if len(changes.Updates) > 0 { | |||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first! | |||
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil { | |||
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err) | |||
@@ -335,7 +334,6 @@ func (b *Indexer) Search(ctx context.Context, opts *internal.SearchOptions) (int | |||
if result, err = b.inner.Indexer.Search(facetRequest); err != nil { | |||
return 0, nil, nil, err | |||
} | |||
} | |||
languagesFacet := result.Facets["languages"] | |||
for _, term := range languagesFacet.Terms.Terms() { |
@@ -145,7 +145,6 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) ( | |||
query := elastic.NewBoolQuery() | |||
if options.Keyword != "" { | |||
searchType := esMultiMatchTypePhrasePrefix | |||
if options.IsFuzzyKeyword { | |||
searchType = esMultiMatchTypeBestFields |
@@ -125,7 +125,6 @@ func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, ms | |||
if mode.Colorize { | |||
buf = append(buf, resetBytes...) | |||
} | |||
} | |||
if flags&(Lshortfile|Llongfile) != 0 { | |||
if mode.Colorize { |
@@ -466,7 +466,6 @@ func TestColorPreview(t *testing.T) { | |||
res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) | |||
assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) | |||
assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) | |||
} | |||
negativeTests := []string{ | |||
@@ -549,7 +548,6 @@ func TestMathBlock(t *testing.T) { | |||
res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) | |||
assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) | |||
assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) | |||
} | |||
} | |||
@@ -147,35 +147,35 @@ func (e *MarshalEncoder) marshalIntInternal(i int64) error { | |||
return e.w.WriteByte(byte(i - 5)) | |||
} | |||
var len int | |||
var length int | |||
if 122 < i && i <= 0xff { | |||
len = 1 | |||
length = 1 | |||
} else if 0xff < i && i <= 0xffff { | |||
len = 2 | |||
length = 2 | |||
} else if 0xffff < i && i <= 0xffffff { | |||
len = 3 | |||
length = 3 | |||
} else if 0xffffff < i && i <= 0x3fffffff { | |||
len = 4 | |||
length = 4 | |||
} else if -0x100 <= i && i < -123 { | |||
len = -1 | |||
length = -1 | |||
} else if -0x10000 <= i && i < -0x100 { | |||
len = -2 | |||
length = -2 | |||
} else if -0x1000000 <= i && i < -0x100000 { | |||
len = -3 | |||
length = -3 | |||
} else if -0x40000000 <= i && i < -0x1000000 { | |||
len = -4 | |||
length = -4 | |||
} else { | |||
return ErrInvalidIntRange | |||
} | |||
if err := e.w.WriteByte(byte(len)); err != nil { | |||
if err := e.w.WriteByte(byte(length)); err != nil { | |||
return err | |||
} | |||
if len < 0 { | |||
len = -len | |||
if length < 0 { | |||
length = -length | |||
} | |||
for c := 0; c < len; c++ { | |||
for c := 0; c < length; c++ { | |||
if err := e.w.WriteByte(byte(i >> uint(8*c) & 0xff)); err != nil { | |||
return err | |||
} | |||
@@ -244,13 +244,13 @@ func (e *MarshalEncoder) marshalArray(arr reflect.Value) error { | |||
return err | |||
} | |||
len := arr.Len() | |||
length := arr.Len() | |||
if err := e.marshalIntInternal(int64(len)); err != nil { | |||
if err := e.marshalIntInternal(int64(length)); err != nil { | |||
return err | |||
} | |||
for i := 0; i < len; i++ { | |||
for i := 0; i < length; i++ { | |||
if err := e.marshal(arr.Index(i).Interface()); err != nil { | |||
return err | |||
} |
@@ -339,7 +339,6 @@ func (pm *Manager) ProcessStacktraces(flat, noSystem bool) ([]*Process, int, int | |||
} | |||
sort.Slice(processes, after(processes)) | |||
if !flat { | |||
var sortChildren func(process *Process) | |||
sortChildren = func(process *Process) { |
@@ -32,7 +32,6 @@ func CreateTemporaryPath(prefix string) (string, error) { | |||
if err != nil { | |||
log.Error("Unable to create temporary directory: %s-*.git (%v)", prefix, err) | |||
return "", fmt.Errorf("Failed to create dir %s-*.git: %w", prefix, err) | |||
} | |||
return basePath, nil | |||
} |
@@ -19,9 +19,8 @@ func loadTimeFrom(rootCfg ConfigProvider) { | |||
DefaultUILocation, err = time.LoadLocation(zone) | |||
if err != nil { | |||
log.Fatal("Load time zone failed: %v", err) | |||
} else { | |||
log.Info("Default UI Location is %v", zone) | |||
} | |||
log.Info("Default UI Location is %v", zone) | |||
} | |||
if DefaultUILocation == nil { | |||
DefaultUILocation = time.Local |
@@ -138,10 +138,9 @@ func wrapTmplErrMsg(msg string) { | |||
if setting.IsProd { | |||
// in prod mode, Gitea must have correct templates to run | |||
log.Fatal("Gitea can't run with template errors: %s", msg) | |||
} else { | |||
// in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded | |||
log.Error("There are template errors but Gitea continues to run in dev mode: %s", msg) | |||
} | |||
// in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded | |||
log.Error("There are template errors but Gitea continues to run in dev mode: %s", msg) | |||
} | |||
type templateErrorPrettier struct { |
@@ -84,9 +84,8 @@ func Mailer(ctx context.Context) (*texttmpl.Template, *template.Template) { | |||
if err = buildSubjectBodyTemplate(subjectTemplates, bodyTemplates, tmplName, content); err != nil { | |||
if firstRun { | |||
log.Fatal("Failed to parse mail template, err: %v", err) | |||
} else { | |||
log.Error("Failed to parse mail template, err: %v", err) | |||
} | |||
log.Error("Failed to parse mail template, err: %v", err) | |||
} | |||
} | |||
} |
@@ -121,9 +121,9 @@ func Test_NormalizeEOL(t *testing.T) { | |||
} | |||
func Test_RandomInt(t *testing.T) { | |||
int, err := CryptoRandomInt(255) | |||
assert.True(t, int >= 0) | |||
assert.True(t, int <= 255) | |||
randInt, err := CryptoRandomInt(255) | |||
assert.True(t, randInt >= 0) | |||
assert.True(t, randInt <= 255) | |||
assert.NoError(t, err) | |||
} | |||
@@ -144,7 +144,6 @@ func ArtifactContexter() func(next http.Handler) http.Handler { | |||
var task *actions.ActionTask | |||
if err == nil { | |||
task, err = actions.GetTaskByID(req.Context(), tID) | |||
if err != nil { | |||
log.Error("Error runner api getting task by ID: %v", err) |
@@ -96,12 +96,12 @@ func UploadPackageFile(ctx *context.Context) { | |||
return | |||
} | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -310,12 +310,12 @@ func uploadFile(ctx *context.Context, fileFilter container.Set[string], fileKey | |||
return | |||
} | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusBadRequest, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -174,12 +174,12 @@ func EnumeratePackages(ctx *context.Context) { | |||
} | |||
func UploadPackageFile(ctx *context.Context) { | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -385,9 +385,9 @@ func EndUploadBlob(ctx *context.Context) { | |||
} | |||
return | |||
} | |||
close := true | |||
doClose := true | |||
defer func() { | |||
if close { | |||
if doClose { | |||
uploader.Close() | |||
} | |||
}() | |||
@@ -427,7 +427,7 @@ func EndUploadBlob(ctx *context.Context) { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
close = false | |||
doClose = false | |||
if err := container_service.RemoveBlobUploadByID(ctx, uploader.ID); err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) |
@@ -151,12 +151,12 @@ func UploadBinaryPackageFile(ctx *context.Context) { | |||
} | |||
func uploadPackageFile(ctx *context.Context, compositeKey string, properties map[string]string) { | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusBadRequest, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -127,12 +127,12 @@ func UploadPackageFile(ctx *context.Context) { | |||
return | |||
} | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -90,12 +90,12 @@ func UploadPackage(ctx *context.Context) { | |||
return | |||
} | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -154,12 +154,12 @@ func resolvePackage(ctx *context.Context, ownerID int64, name, version string) ( | |||
} | |||
func UploadPackage(ctx *context.Context) { | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -594,13 +594,13 @@ func UploadSymbolPackage(ctx *context.Context) { | |||
func processUploadedFile(ctx *context.Context, expectedType nuget_module.PackageType) (*nuget_module.Package, *packages_module.HashedBuffer, []io.Closer) { | |||
closables := make([]io.Closer, 0, 2) | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusBadRequest, err) | |||
return nil, nil, closables | |||
} | |||
if close { | |||
if needToClose { | |||
closables = append(closables, upload) | |||
} | |||
@@ -117,12 +117,12 @@ func GetRepositoryFile(ctx *context.Context) { | |||
} | |||
func UploadPackageFile(ctx *context.Context) { | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusInternalServerError, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -197,12 +197,12 @@ func DownloadPackageFile(ctx *context.Context) { | |||
// UploadPackageFile adds a file to the package. If the package does not exist, it gets created. | |||
func UploadPackageFile(ctx *context.Context) { | |||
upload, close, err := ctx.UploadStream() | |||
upload, needToClose, err := ctx.UploadStream() | |||
if err != nil { | |||
apiError(ctx, http.StatusBadRequest, err) | |||
return | |||
} | |||
if close { | |||
if needToClose { | |||
defer upload.Close() | |||
} | |||
@@ -217,7 +217,6 @@ func SearchIssues(ctx *context.APIContext) { | |||
var includedAnyLabels []int64 | |||
{ | |||
labels := ctx.FormTrim("labels") | |||
var includedLabelNames []string | |||
if len(labels) > 0 { |
@@ -180,7 +180,6 @@ func ListPushMirrors(ctx *context.APIContext) { | |||
if err == nil { | |||
responsePushMirrors = append(responsePushMirrors, m) | |||
} | |||
} | |||
ctx.SetLinkHeader(len(responsePushMirrors), utils.GetListOptions(ctx).PageSize) | |||
ctx.SetTotalCountHeader(count) |
@@ -1061,7 +1061,6 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | |||
isSameRepo = true | |||
headUser = ctx.Repo.Owner | |||
headBranch = headInfos[0] | |||
} else if len(headInfos) == 2 { | |||
headUser, err = user_model.GetUserByName(ctx, headInfos[0]) | |||
if err != nil { | |||
@@ -1075,7 +1074,6 @@ func parseCompareInfo(ctx *context.APIContext, form api.CreatePullRequestOption) | |||
headBranch = headInfos[1] | |||
// The head repository can also point to the same repo | |||
isSameRepo = ctx.Repo.Owner.ID == headUser.ID | |||
} else { | |||
ctx.NotFound() | |||
return nil, nil, nil, nil, "", "" |
@@ -728,7 +728,6 @@ func apiReviewRequest(ctx *context.APIContext, opts api.PullReviewRequestOptions | |||
} | |||
if ctx.Repo.Repository.Owner.IsOrganization() && len(opts.TeamReviewers) > 0 { | |||
teamReviewers := make([]*organization.Team, 0, len(opts.TeamReviewers)) | |||
for _, t := range opts.TeamReviewers { | |||
var teamReviewer *organization.Team |
@@ -1084,7 +1084,6 @@ func updateMirror(ctx *context.APIContext, opts api.EditRepoOption) error { | |||
// update MirrorInterval | |||
if opts.MirrorInterval != nil { | |||
// MirrorInterval should be a duration | |||
interval, err := time.ParseDuration(*opts.MirrorInterval) | |||
if err != nil { |
@@ -478,7 +478,6 @@ func findEntryForFile(commit *git.Commit, target string) (*git.TreeEntry, error) | |||
func findWikiRepoCommit(ctx *context.APIContext) (*git.Repository, *git.Commit) { | |||
wikiRepo, err := gitrepo.OpenWikiRepository(ctx, ctx.Repo.Repository) | |||
if err != nil { | |||
if git.IsErrNotExist(err) || err.Error() == "no such file or directory" { | |||
ctx.NotFound(err) | |||
} else { |
@@ -198,7 +198,6 @@ func preReceiveBranch(ctx *preReceiveContext, oldCommitID, newCommitID string, r | |||
UserMsg: fmt.Sprintf("branch %s is protected from force push", branchName), | |||
}) | |||
return | |||
} | |||
} | |||
@@ -644,7 +644,6 @@ func ArtifactsDownloadView(ctx *context_module.Context) { | |||
writer := zip.NewWriter(ctx.Resp) | |||
defer writer.Close() | |||
for _, art := range artifacts { | |||
f, err := storage.ActionsArtifacts.Open(art.StoragePath) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) |
@@ -933,7 +933,6 @@ func setTemplateIfExists(ctx *context.Context, ctxDataKey string, possibleFiles | |||
} | |||
} | |||
} | |||
} | |||
if template.Ref != "" && !strings.HasPrefix(template.Ref, "refs/") { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref> | |||
@@ -1681,7 +1680,6 @@ func ViewIssue(ctx *context.Context) { | |||
if comment.ProjectID > 0 && comment.Project == nil { | |||
comment.Project = ghostProject | |||
} | |||
} else if comment.Type == issues_model.CommentTypeAssignees || comment.Type == issues_model.CommentTypeReviewRequest { | |||
if err = comment.LoadAssigneeUserAndTeam(ctx); err != nil { | |||
ctx.ServerError("LoadAssigneeUserAndTeam", err) | |||
@@ -2610,7 +2608,6 @@ func SearchIssues(ctx *context.Context) { | |||
var includedAnyLabels []int64 | |||
{ | |||
labels := ctx.FormTrim("labels") | |||
var includedLabelNames []string | |||
if len(labels) > 0 { | |||
@@ -2994,7 +2991,6 @@ func NewComment(ctx *context.Context) { | |||
if (ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) || (ctx.IsSigned && issue.IsPoster(ctx.Doer.ID))) && | |||
(form.Status == "reopen" || form.Status == "close") && | |||
!(issue.IsPull && issue.PullRequest.HasMerged) { | |||
// Duplication and conflict check should apply to reopen pull request. | |||
var pr *issues_model.PullRequest | |||
@@ -443,7 +443,6 @@ func PrepareViewPullInfo(ctx *context.Context, issue *issues_model.Issue) *git.C | |||
} | |||
if pb != nil && pb.EnableStatusCheck { | |||
var missingRequiredChecks []string | |||
for _, requiredContext := range pb.StatusCheckContexts { | |||
contextFound := false | |||
@@ -646,7 +645,6 @@ func viewPullFiles(ctx *context.Context, specifiedStartCommit, specifiedEndCommi | |||
// Validate the given commit sha to show (if any passed) | |||
if willShowSpecifiedCommit || willShowSpecifiedCommitRange { | |||
foundStartCommit := len(specifiedStartCommit) == 0 | |||
foundEndCommit := len(specifiedEndCommit) == 0 | |||
@@ -974,7 +972,6 @@ func UpdatePullRequest(ctx *context.Context) { | |||
ctx.Flash.Error(flashError) | |||
ctx.Redirect(issue.Link()) | |||
return | |||
} | |||
ctx.Flash.Error(err.Error()) | |||
ctx.Redirect(issue.Link()) |
@@ -318,7 +318,6 @@ func UpdateViewedFiles(ctx *context.Context) { | |||
updatedFiles := make(map[string]pull_model.ViewedState, len(data.Files)) | |||
for file, viewed := range data.Files { | |||
// Only unviewed and viewed are possible, has-changed can not be set from the outside | |||
state := pull_model.Unviewed | |||
if viewed { |
@@ -347,7 +347,6 @@ func loadLatestCommitData(ctx *context.Context, latestCommit *git.Commit) bool { | |||
// or of directory if not in root directory. | |||
ctx.Data["LatestCommit"] = latestCommit | |||
if latestCommit != nil { | |||
verification := asymkey_model.ParseCommitWithSignature(ctx, latestCommit) | |||
if err := asymkey_model.CalculateTrustStatus(verification, ctx.Repo.Repository.GetTrustModel(), func(user *user_model.User) (bool, error) { |
@@ -298,13 +298,15 @@ func handleWorkflows( | |||
TriggerEvent: dwf.TriggerEvent.Name, | |||
Status: actions_model.StatusWaiting, | |||
} | |||
if need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer); err != nil { | |||
need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer) | |||
if err != nil { | |||
log.Error("check if need approval for repo %d with user %d: %v", input.Repo.ID, input.Doer.ID, err) | |||
continue | |||
} else { | |||
run.NeedApproval = need | |||
} | |||
run.NeedApproval = need | |||
if err := run.LoadAttributes(ctx); err != nil { | |||
log.Error("LoadAttributes: %v", err) | |||
continue |
@@ -156,7 +156,6 @@ func (source *Source) Sync(ctx context.Context, updateExisting bool) error { | |||
!strings.EqualFold(usr.Email, su.Mail) || | |||
usr.FullName != fullName || | |||
!usr.IsActive { | |||
log.Trace("SyncExternalUsers[%s]: Updating user %s", source.authSource.Name, usr.Name) | |||
opts := &user_service.UpdateOptions{ |
@@ -825,7 +825,6 @@ func getRefName(ctx *Base, repo *Repository, pathType RepoRefType) string { | |||
case RepoRefBranch: | |||
ref := getRefNameFromPath(ctx, repo, path, repo.GitRepo.IsBranchExist) | |||
if len(ref) == 0 { | |||
// check if ref is HEAD | |||
parts := strings.Split(path, "/") | |||
if parts[0] == headRefName { | |||
@@ -968,7 +967,6 @@ func RepoRefByType(refType RepoRefType, ignoreNotExistErr ...bool) func(*Context | |||
return cancel | |||
} | |||
ctx.Repo.CommitID = ctx.Repo.Commit.ID.String() | |||
} else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) { | |||
ctx.Repo.IsViewTag = true | |||
ctx.Repo.TagName = refName |
@@ -1044,10 +1044,10 @@ func createDiffFile(diff *Diff, line string) *DiffFile { | |||
// diff --git a/b b/b b/b b/b b/b b/b | |||
// | |||
midpoint := (len(line) + len(cmdDiffHead) - 1) / 2 | |||
new, old := line[len(cmdDiffHead):midpoint], line[midpoint+1:] | |||
if len(new) > 2 && len(old) > 2 && new[2:] == old[2:] { | |||
curFile.OldName = old[2:] | |||
curFile.Name = old[2:] | |||
newPart, oldPart := line[len(cmdDiffHead):midpoint], line[midpoint+1:] | |||
if len(newPart) > 2 && len(oldPart) > 2 && newPart[2:] == oldPart[2:] { | |||
curFile.OldName = oldPart[2:] | |||
curFile.Name = oldPart[2:] | |||
} | |||
} | |||
} | |||
@@ -1181,7 +1181,6 @@ func GetDiff(ctx context.Context, gitRepo *git.Repository, opts *DiffOptions, fi | |||
defer deferable() | |||
for _, diffFile := range diff.Files { | |||
isVendored := optional.None[bool]() | |||
isGenerated := optional.None[bool]() | |||
if checker != nil { |
@@ -118,7 +118,6 @@ func UpdateIssuesCommit(ctx context.Context, doer *user_model.User, repo *repo_m | |||
var refIssue *issues_model.Issue | |||
var err error | |||
for _, ref := range references.FindAllIssueReferences(c.Message) { | |||
// issue is from another repo | |||
if len(ref.Owner) > 0 && len(ref.Name) > 0 { | |||
refRepo, err = repo_model.GetRepositoryByOwnerAndName(ctx, ref.Owner, ref.Name) | |||
@@ -189,15 +188,15 @@ func UpdateIssuesCommit(ctx context.Context, doer *user_model.User, repo *repo_m | |||
continue | |||
} | |||
} | |||
close := ref.Action == references.XRefActionCloses | |||
if close && len(ref.TimeLog) > 0 { | |||
isClosed := ref.Action == references.XRefActionCloses | |||
if isClosed && len(ref.TimeLog) > 0 { | |||
if err := issueAddTime(ctx, refIssue, doer, c.Timestamp, ref.TimeLog); err != nil { | |||
return err | |||
} | |||
} | |||
if close != refIssue.IsClosed { | |||
if isClosed != refIssue.IsClosed { | |||
refIssue.Repo = refRepo | |||
if err := ChangeStatus(ctx, refIssue, doer, c.Sha1, close); err != nil { | |||
if err := ChangeStatus(ctx, refIssue, doer, c.Sha1, isClosed); err != nil { | |||
return err | |||
} | |||
} |
@@ -86,12 +86,13 @@ func renderRepoFileCodePreview(ctx context.Context, opts markup.RenderCodePrevie | |||
lineNums := make([]int, 0, lineCount) | |||
lineCodes := make([]string, 0, lineCount) | |||
for i := opts.LineStart; i <= opts.LineStop; i++ { | |||
if line, err := reader.ReadString('\n'); err != nil && line == "" { | |||
line, err := reader.ReadString('\n') | |||
if err != nil && line == "" { | |||
break | |||
} else { | |||
lineNums = append(lineNums, i) | |||
lineCodes = append(lineCodes, line) | |||
} | |||
lineNums = append(lineNums, i) | |||
lineCodes = append(lineCodes, line) | |||
} | |||
realLineStop := max(opts.LineStart, opts.LineStart+len(lineNums)-1) | |||
highlightLines := code.HighlightSearchResultCode(opts.FilePath, language, lineNums, strings.Join(lineCodes, "")) |
@@ -410,7 +410,6 @@ func (g *GiteaDownloader) GetIssues(page, perPage int) ([]*base.Issue, bool, err | |||
return nil, false, fmt.Errorf("error while listing issues: %w", err) | |||
} | |||
for _, issue := range issues { | |||
labels := make([]*base.Label, 0, len(issue.Labels)) | |||
for i := range issue.Labels { | |||
labels = append(labels, g.convertGiteaLabel(issue.Labels[i])) |
@@ -421,7 +421,6 @@ func (g *GitlabDownloader) GetIssues(page, perPage int) ([]*base.Issue, bool, er | |||
return nil, false, fmt.Errorf("error while listing issues: %w", err) | |||
} | |||
for _, issue := range issues { | |||
labels := make([]*base.Label, 0, len(issue.Labels)) | |||
for _, l := range issue.Labels { | |||
labels = append(labels, &base.Label{ |
@@ -523,13 +523,13 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool { | |||
theCommits.Commits = theCommits.Commits[:setting.UI.FeedMaxCommitNum] | |||
} | |||
if newCommit, err := gitRepo.GetCommit(newCommitID); err != nil { | |||
newCommit, err := gitRepo.GetCommit(newCommitID) | |||
if err != nil { | |||
log.Error("SyncMirrors [repo: %-v]: unable to get commit %s: %v", m.Repo, newCommitID, err) | |||
continue | |||
} else { | |||
theCommits.HeadCommit = repo_module.CommitToPushCommit(newCommit) | |||
} | |||
theCommits.HeadCommit = repo_module.CommitToPushCommit(newCommit) | |||
theCommits.CompareURL = m.Repo.ComposeCompareURL(oldCommitID, newCommitID) | |||
notify_service.SyncPushCommits(ctx, m.Repo.MustOwner(ctx), m.Repo, &repo_module.PushUpdateOptions{ | |||
@@ -557,7 +557,6 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool { | |||
log.Error("SyncMirrors [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err) | |||
return false | |||
} | |||
} | |||
log.Trace("SyncMirrors [repo: %-v]: Successfully updated", m.Repo) |
@@ -231,9 +231,9 @@ func Merge(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.U | |||
if err = ref.Issue.LoadRepo(ctx); err != nil { | |||
return err | |||
} | |||
close := ref.RefAction == references.XRefActionCloses | |||
if close != ref.Issue.IsClosed { | |||
if err = issue_service.ChangeStatus(ctx, ref.Issue, doer, pr.MergedCommitID, close); err != nil { | |||
isClosed := ref.RefAction == references.XRefActionCloses | |||
if isClosed != ref.Issue.IsClosed { | |||
if err = issue_service.ChangeStatus(ctx, ref.Issue, doer, pr.MergedCommitID, isClosed); err != nil { | |||
// Allow ErrDependenciesLeft | |||
if !issues_model.IsErrDependenciesLeft(err) { | |||
return err |
@@ -807,7 +807,6 @@ func GetSquashMergeCommitMessages(ctx context.Context, pr *issues_model.PullRequ | |||
if err != nil { | |||
log.Error("Unable to get commits between: %s %s Error: %v", pr.HeadBranch, pr.MergeBase, err) | |||
return "" | |||
} | |||
if len(commits) == 0 { | |||
break |
@@ -357,7 +357,6 @@ func ListUnadoptedRepositories(ctx context.Context, query string, opts *db.ListO | |||
return err | |||
} | |||
repoNamesToCheck = repoNamesToCheck[:0] | |||
} | |||
return filepath.SkipDir | |||
}); err != nil { |
@@ -187,7 +187,6 @@ func getExtendedCommitStats(repo *git.Repository, revision string /*, limit int | |||
Stats: &commitStats, | |||
} | |||
extendedCommitStats = append(extendedCommitStats, res) | |||
} | |||
_ = stdoutReader.Close() | |||
return nil |
@@ -208,7 +208,6 @@ func ChangeRepoFiles(ctx context.Context, repo *repo_model.Repository, doer *use | |||
return nil, fmt.Errorf("ConvertToSHA1: Invalid last commit ID: %w", err) | |||
} | |||
opts.LastCommitID = lastCommitID.String() | |||
} | |||
for _, file := range opts.Files { | |||
@@ -360,7 +359,6 @@ func handleCheckErrors(file *ChangeRepoFile, commit *git.Commit, opts *ChangeRep | |||
Path: file.Options.treePath, | |||
} | |||
} | |||
} | |||
} | |||
@@ -105,7 +105,6 @@ func deleteUser(ctx context.Context, u *user_model.User, purge bool) (err error) | |||
if purge || (setting.Service.UserDeleteWithCommentsMaxTime != 0 && | |||
u.CreatedUnix.AsTime().Add(setting.Service.UserDeleteWithCommentsMaxTime).After(time.Now())) { | |||
// Delete Comments | |||
const batchSize = 50 | |||
for { |
@@ -94,7 +94,7 @@ func TestUpdateAuth(t *testing.T) { | |||
assert.NoError(t, unittest.PrepareTestDatabase()) | |||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 28}) | |||
copy := *user | |||
userCopy := *user | |||
assert.NoError(t, UpdateAuth(db.DefaultContext, user, &UpdateAuthOptions{ | |||
LoginName: optional.Some("new-login"), | |||
@@ -106,8 +106,8 @@ func TestUpdateAuth(t *testing.T) { | |||
MustChangePassword: optional.Some(true), | |||
})) | |||
assert.True(t, user.MustChangePassword) | |||
assert.NotEqual(t, copy.Passwd, user.Passwd) | |||
assert.NotEqual(t, copy.Salt, user.Salt) | |||
assert.NotEqual(t, userCopy.Passwd, user.Passwd) | |||
assert.NotEqual(t, userCopy.Salt, user.Salt) | |||
assert.NoError(t, UpdateAuth(db.DefaultContext, user, &UpdateAuthOptions{ | |||
ProhibitLogin: optional.Some(true), |
@@ -274,14 +274,12 @@ func newDiscordRequest(ctx context.Context, w *webhook_model.Webhook, t *webhook | |||
func parseHookPullRequestEventType(event webhook_module.HookEventType) (string, error) { | |||
switch event { | |||
case webhook_module.HookEventPullRequestReviewApproved: | |||
return "approved", nil | |||
case webhook_module.HookEventPullRequestReviewRejected: | |||
return "rejected", nil | |||
case webhook_module.HookEventPullRequestReviewComment: | |||
return "comment", nil | |||
default: | |||
return "", errors.New("unknown event type") | |||
} |
@@ -179,7 +179,6 @@ func (m matrixConvertor) Push(p *api.PushPayload) (MatrixPayload, error) { | |||
if i < len(p.Commits)-1 { | |||
text += "<br>" | |||
} | |||
} | |||
return m.newPayload(text, p.Commits...) |
@@ -102,18 +102,20 @@ func TestE2e(t *testing.T) { | |||
cmd := exec.Command(runArgs[0], runArgs...) | |||
cmd.Env = os.Environ() | |||
cmd.Env = append(cmd.Env, fmt.Sprintf("GITEA_URL=%s", setting.AppURL)) | |||
var stdout, stderr bytes.Buffer | |||
cmd.Stdout = &stdout | |||
cmd.Stderr = &stderr | |||
err := cmd.Run() | |||
if err != nil { | |||
// Currently colored output is conflicting. Using Printf until that is resolved. | |||
fmt.Printf("%v", stdout.String()) | |||
fmt.Printf("%v", stderr.String()) | |||
log.Fatal("Playwright Failed: %s", err) | |||
} else { | |||
fmt.Printf("%v", stdout.String()) | |||
} | |||
fmt.Printf("%v", stdout.String()) | |||
}) | |||
}) | |||
} |
@@ -111,7 +111,7 @@ func TestAPINotification(t *testing.T) { | |||
MakeRequest(t, NewRequest(t, "GET", "/api/v1/notifications/new"), http.StatusUnauthorized) | |||
new := struct { | |||
newStruct := struct { | |||
New int64 `json:"new"` | |||
}{} | |||
@@ -119,8 +119,8 @@ func TestAPINotification(t *testing.T) { | |||
req = NewRequest(t, "GET", "/api/v1/notifications/new"). | |||
AddTokenAuth(token) | |||
resp = MakeRequest(t, req, http.StatusOK) | |||
DecodeJSON(t, resp, &new) | |||
assert.True(t, new.New > 0) | |||
DecodeJSON(t, resp, &newStruct) | |||
assert.True(t, newStruct.New > 0) | |||
// -- mark notifications as read -- | |||
req = NewRequest(t, "GET", "/api/v1/notifications?status-types=unread"). | |||
@@ -153,8 +153,8 @@ func TestAPINotification(t *testing.T) { | |||
req = NewRequest(t, "GET", "/api/v1/notifications/new"). | |||
AddTokenAuth(token) | |||
resp = MakeRequest(t, req, http.StatusOK) | |||
DecodeJSON(t, resp, &new) | |||
assert.True(t, new.New == 0) | |||
DecodeJSON(t, resp, &newStruct) | |||
assert.True(t, newStruct.New == 0) | |||
} | |||
func TestAPINotificationPUT(t *testing.T) { |
@@ -71,7 +71,6 @@ func TestPullCreate_CommitStatus(t *testing.T) { | |||
// Update commit status, and check if icon is updated as well | |||
for _, status := range statusList { | |||
// Call API to add status for commit | |||
t.Run("CreateStatus", doAPICreateCommitStatus(testCtx, commitID, api.CreateStatusOption{ | |||
State: status, |