commands: | commands: | ||||
- make clean | - make clean | ||||
- make generate | - make generate | ||||
- make vet | |||||
- make lint | |||||
- make fmt-check | |||||
- make golangci-lint | |||||
- make revive | |||||
- make swagger-check | - make swagger-check | ||||
- make swagger-validate | - make swagger-validate | ||||
- make misspell-check | |||||
- make test-vendor | - make test-vendor | ||||
- make build | - make build | ||||
when: | when: |
linters: | |||||
enable: | |||||
- gosimple | |||||
- deadcode | |||||
- typecheck | |||||
- govet | |||||
- errcheck | |||||
- staticcheck | |||||
- unused | |||||
- structcheck | |||||
- varcheck | |||||
- golint | |||||
- dupl | |||||
#- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. | |||||
- gofmt | |||||
- misspell | |||||
- gocritic | |||||
enable-all: false | |||||
disable-all: true | |||||
fast: false | |||||
linters-settings: | |||||
gocritic: | |||||
disabled-checks: | |||||
- ifElseChain | |||||
- singleCaseSwitch # Every time this occured in the code, there was no other way. | |||||
issues: | |||||
exclude-rules: | |||||
# Exclude some linters from running on tests files. | |||||
- path: _test\.go | |||||
linters: | |||||
- gocyclo | |||||
- errcheck | |||||
- dupl | |||||
- gosec | |||||
- unparam | |||||
- staticcheck | |||||
- path: models/migrations/v | |||||
linters: | |||||
- gocyclo | |||||
- errcheck | |||||
- dupl | |||||
- gosec | |||||
- linters: | |||||
- dupl | |||||
text: "webhook" | |||||
- linters: | |||||
- gocritic | |||||
text: "`ID' should not be capitalized" | |||||
- path: modules/templates/helper.go | |||||
linters: | |||||
- gocritic | |||||
- linters: | |||||
- unused | |||||
- deadcode | |||||
text: "swagger" | |||||
- path: contrib/pr/checkout.go | |||||
linters: | |||||
- errcheck | |||||
- path: models/issue.go | |||||
linters: | |||||
- errcheck | |||||
- path: models/migrations/ | |||||
linters: | |||||
- errcheck | |||||
- path: modules/log/ | |||||
linters: | |||||
- errcheck | |||||
- path: routers/routes/routes.go | |||||
linters: | |||||
- dupl | |||||
- path: routers/repo/view.go | |||||
linters: | |||||
- dupl | |||||
- path: models/migrations/ | |||||
linters: | |||||
- unused | |||||
- linters: | |||||
- staticcheck | |||||
text: "argument x is overwritten before first use" | |||||
- path: modules/httplib/httplib.go | |||||
linters: | |||||
- staticcheck | |||||
# Enabling this would require refactoring the methods and how they are called. | |||||
- path: models/issue_comment_list.go | |||||
linters: | |||||
- dupl | |||||
# "Destroy" is misspelled in github.com/go-macaron/session/session.go:213 so it's not our responsability to fix it | |||||
- path: modules/session/virtual.go | |||||
linters: | |||||
- misspell | |||||
text: '`Destory` is a misspelling of `Destroy`' | |||||
- path: modules/session/memory.go | |||||
linters: | |||||
- misspell | |||||
text: '`Destory` is a misspelling of `Destroy`' |
.PHONY: lint | .PHONY: lint | ||||
lint: | lint: | ||||
@echo 'make lint is depricated. Use "make revive" if you want to use the old lint tool, or "make golangci-lint" to run a complete code check.' | |||||
.PHONY: revive | |||||
revive: | |||||
@hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ | @hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ | ||||
$(GO) get -u github.com/mgechev/revive; \ | $(GO) get -u github.com/mgechev/revive; \ | ||||
fi | fi | ||||
.PHONY: pr | .PHONY: pr | ||||
pr: | pr: | ||||
$(GO) run contrib/pr/checkout.go $(PR) | $(GO) run contrib/pr/checkout.go $(PR) | ||||
.PHONY: golangci-lint | |||||
golangci-lint: | |||||
@hash golangci-lint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ | |||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.16.0; \ | |||||
fi | |||||
golangci-lint run |
} | } | ||||
// update custom URL mapping | // update custom URL mapping | ||||
var customURLMapping *oauth2.CustomURLMapping | |||||
var customURLMapping = &oauth2.CustomURLMapping{} | |||||
if oAuth2Config.CustomURLMapping != nil { | if oAuth2Config.CustomURLMapping != nil { | ||||
customURLMapping.TokenURL = oAuth2Config.CustomURLMapping.TokenURL | customURLMapping.TokenURL = oAuth2Config.CustomURLMapping.TokenURL |
if err != nil { | if err != nil { | ||||
log.Fatalf("Failed to open cert.pem for writing: %v", err) | log.Fatalf("Failed to open cert.pem for writing: %v", err) | ||||
} | } | ||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) | |||||
certOut.Close() | |||||
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) | |||||
if err != nil { | |||||
log.Fatalf("Failed to encode certificate: %v", err) | |||||
} | |||||
err = certOut.Close() | |||||
if err != nil { | |||||
log.Fatalf("Failed to write cert: %v", err) | |||||
} | |||||
log.Println("Written cert.pem") | log.Println("Written cert.pem") | ||||
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) | keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) | ||||
if err != nil { | if err != nil { | ||||
log.Fatalf("Failed to open key.pem for writing: %v", err) | log.Fatalf("Failed to open key.pem for writing: %v", err) | ||||
} | } | ||||
pem.Encode(keyOut, pemBlockForKey(priv)) | |||||
keyOut.Close() | |||||
err = pem.Encode(keyOut, pemBlockForKey(priv)) | |||||
if err != nil { | |||||
log.Fatalf("Failed to encode key: %v", err) | |||||
} | |||||
err = keyOut.Close() | |||||
if err != nil { | |||||
log.Fatalf("Failed to write key: %v", err) | |||||
} | |||||
log.Println("Written key.pem") | log.Println("Written key.pem") | ||||
return nil | return nil | ||||
} | } |
) | ) | ||||
const ( | const ( | ||||
accessDenied = "Repository does not exist or you do not have access" | |||||
lfsAuthenticateVerb = "git-lfs-authenticate" | lfsAuthenticateVerb = "git-lfs-authenticate" | ||||
) | ) | ||||
} | } | ||||
func setup(logPath string) { | func setup(logPath string) { | ||||
log.DelLogger("console") | |||||
_ = log.DelLogger("console") | |||||
setting.NewContext() | setting.NewContext() | ||||
checkLFSVersion() | checkLFSVersion() | ||||
} | } | ||||
} | } | ||||
if len(c.Args()) < 1 { | if len(c.Args()) < 1 { | ||||
cli.ShowSubcommandHelp(c) | |||||
if err := cli.ShowSubcommandHelp(c); err != nil { | |||||
fmt.Printf("error showing subcommand help: %v\n", err) | |||||
} | |||||
return nil | return nil | ||||
} | } | ||||
} | } | ||||
err = runHTTPS(listenAddr, setting.CertFile, setting.KeyFile, context2.ClearHandler(m)) | err = runHTTPS(listenAddr, setting.CertFile, setting.KeyFile, context2.ClearHandler(m)) | ||||
case setting.FCGI: | case setting.FCGI: | ||||
listener, err := net.Listen("tcp", listenAddr) | |||||
var listener net.Listener | |||||
listener, err = net.Listen("tcp", listenAddr) | |||||
if err != nil { | if err != nil { | ||||
log.Fatal("Failed to bind %s: %v", listenAddr, err) | log.Fatal("Failed to bind %s: %v", listenAddr, err) | ||||
} | } | ||||
defer listener.Close() | |||||
defer func() { | |||||
if err := listener.Close(); err != nil { | |||||
log.Fatal("Failed to stop server: %v", err) | |||||
} | |||||
}() | |||||
err = fcgi.Serve(listener, context2.ClearHandler(m)) | err = fcgi.Serve(listener, context2.ClearHandler(m)) | ||||
case setting.UnixSocket: | case setting.UnixSocket: | ||||
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) { | if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) { |
routers.NewServices() | routers.NewServices() | ||||
//x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared") | //x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared") | ||||
var helper testfixtures.Helper | |||||
helper = &testfixtures.SQLite{} | |||||
var helper testfixtures.Helper = &testfixtures.SQLite{} | |||||
models.NewEngine(func(_ *xorm.Engine) error { | models.NewEngine(func(_ *xorm.Engine) error { | ||||
return nil | return nil | ||||
}) | }) |
req = NewRequestWithValues(t, "POST", link, map[string]string{ | req = NewRequestWithValues(t, "POST", link, map[string]string{ | ||||
"_csrf": getCsrf(t, htmlDoc.doc), | "_csrf": getCsrf(t, htmlDoc.doc), | ||||
}) | }) | ||||
resp = session.MakeRequest(t, req, http.StatusOK) | |||||
session.MakeRequest(t, req, http.StatusOK) | |||||
url, err := url.Parse(link) | url, err := url.Parse(link) | ||||
assert.NoError(t, err) | assert.NoError(t, err) |
"content": "Content", | "content": "Content", | ||||
"commit_choice": "direct", | "commit_choice": "direct", | ||||
}) | }) | ||||
resp = session.MakeRequest(t, req, http.StatusFound) | |||||
session.MakeRequest(t, req, http.StatusFound) | |||||
}) | }) | ||||
} | } | ||||
"_csrf": csrf, | "_csrf": csrf, | ||||
"protected": "on", | "protected": "on", | ||||
}) | }) | ||||
resp := session.MakeRequest(t, req, http.StatusFound) | |||||
session.MakeRequest(t, req, http.StatusFound) | |||||
// Check if master branch has been locked successfully | // Check if master branch has been locked successfully | ||||
flashCookie := session.GetCookie("macaron_flash") | flashCookie := session.GetCookie("macaron_flash") | ||||
assert.NotNil(t, flashCookie) | assert.NotNil(t, flashCookie) | ||||
// Request editor page | // Request editor page | ||||
req = NewRequest(t, "GET", "/user2/repo1/_new/master/") | req = NewRequest(t, "GET", "/user2/repo1/_new/master/") | ||||
resp = session.MakeRequest(t, req, http.StatusOK) | |||||
resp := session.MakeRequest(t, req, http.StatusOK) | |||||
doc := NewHTMLParser(t, resp.Body) | doc := NewHTMLParser(t, resp.Body) | ||||
lastCommit := doc.GetInputValueByName("last_commit") | lastCommit := doc.GetInputValueByName("last_commit") |
} | } | ||||
func (n *NilResponseRecorder) Write(b []byte) (int, error) { | func (n *NilResponseRecorder) Write(b []byte) (int, error) { | ||||
n.Length = n.Length + len(b) | |||||
n.Length += len(b) | |||||
return len(b), nil | return len(b), nil | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
log.Fatalf("sql.Open: %v", err) | log.Fatalf("sql.Open: %v", err) | ||||
} | } | ||||
rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", | |||||
models.DbCfg.Name)) | |||||
rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", models.DbCfg.Name)) | |||||
if err != nil { | if err != nil { | ||||
log.Fatalf("db.Query: %v", err) | log.Fatalf("db.Query: %v", err) | ||||
} | } | ||||
resp := MakeRequest(t, req, expectedStatus) | resp := MakeRequest(t, req, expectedStatus) | ||||
ch := http.Header{} | ch := http.Header{} | ||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||||
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||||
cr := http.Request{Header: ch} | cr := http.Request{Header: ch} | ||||
s.jar.SetCookies(baseURL, cr.Cookies()) | s.jar.SetCookies(baseURL, cr.Cookies()) | ||||
resp := MakeRequestNilResponseRecorder(t, req, expectedStatus) | resp := MakeRequestNilResponseRecorder(t, req, expectedStatus) | ||||
ch := http.Header{} | ch := http.Header{} | ||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||||
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||||
cr := http.Request{Header: ch} | cr := http.Request{Header: ch} | ||||
s.jar.SetCookies(baseURL, cr.Cookies()) | s.jar.SetCookies(baseURL, cr.Cookies()) | ||||
resp = MakeRequest(t, req, http.StatusFound) | resp = MakeRequest(t, req, http.StatusFound) | ||||
ch := http.Header{} | ch := http.Header{} | ||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";")) | |||||
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";")) | |||||
cr := http.Request{Header: ch} | cr := http.Request{Header: ch} | ||||
session := emptyTestSession(t) | session := emptyTestSession(t) |
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID} | lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID} | ||||
} | } | ||||
lfsID = lfsID + 1 | |||||
lfsID++ | |||||
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject) | lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject) | ||||
assert.NoError(t, err) | assert.NoError(t, err) | ||||
contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath} | contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath} |
setting.NewLogServices(true) | setting.NewLogServices(true) | ||||
} | } | ||||
func getDialect() string { | |||||
dialect := "sqlite" | |||||
switch { | |||||
case setting.UseSQLite3: | |||||
dialect = "sqlite" | |||||
case setting.UseMySQL: | |||||
dialect = "mysql" | |||||
case setting.UsePostgreSQL: | |||||
dialect = "pgsql" | |||||
case setting.UseMSSQL: | |||||
dialect = "mssql" | |||||
} | |||||
return dialect | |||||
} | |||||
func availableVersions() ([]string, error) { | func availableVersions() ([]string, error) { | ||||
migrationsDir, err := os.Open("integrations/migration-test") | migrationsDir, err := os.Open("integrations/migration-test") | ||||
if err != nil { | if err != nil { |
_, filename, line, _ := runtime.Caller(actualSkip) | _, filename, line, _ := runtime.Caller(actualSkip) | ||||
if log.CanColorStdout { | if log.CanColorStdout { | ||||
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", log.NewColoredValue(t.Name()), strings.TrimPrefix(filename, prefix), line) | |||||
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line) | |||||
} else { | } else { | ||||
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line) | fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line) | ||||
} | } |
func init() { | func init() { | ||||
setting.AppVer = Version | setting.AppVer = Version | ||||
setting.AppBuiltWith = formatBuiltWith(Tags) | |||||
setting.AppBuiltWith = formatBuiltWith() | |||||
// Grab the original help templates | // Grab the original help templates | ||||
originalAppHelpTemplate = cli.AppHelpTemplate | originalAppHelpTemplate = cli.AppHelpTemplate | ||||
app.Usage = "A painless self-hosted Git service" | app.Usage = "A painless self-hosted Git service" | ||||
app.Description = `By default, gitea will start serving using the webserver with no | app.Description = `By default, gitea will start serving using the webserver with no | ||||
arguments - which can alternatively be run by running the subcommand web.` | arguments - which can alternatively be run by running the subcommand web.` | ||||
app.Version = Version + formatBuiltWith(Tags) | |||||
app.Version = Version + formatBuiltWith() | |||||
app.Commands = []cli.Command{ | app.Commands = []cli.Command{ | ||||
cmd.CmdWeb, | cmd.CmdWeb, | ||||
cmd.CmdServ, | cmd.CmdServ, | ||||
`, originalTemplate, setting.CustomPath, overrided, setting.CustomConf, setting.AppPath, setting.AppWorkPath) | `, originalTemplate, setting.CustomPath, overrided, setting.CustomConf, setting.AppPath, setting.AppWorkPath) | ||||
} | } | ||||
func formatBuiltWith(makeTags string) string { | |||||
func formatBuiltWith() string { | |||||
var version = runtime.Version() | var version = runtime.Version() | ||||
if len(MakeVersion) > 0 { | if len(MakeVersion) > 0 { | ||||
version = MakeVersion + ", " + runtime.Version() | version = MakeVersion + ", " + runtime.Version() |
"github.com/stretchr/testify/assert" | "github.com/stretchr/testify/assert" | ||||
) | ) | ||||
var accessModes = []AccessMode{ | |||||
AccessModeRead, | |||||
AccessModeWrite, | |||||
AccessModeAdmin, | |||||
AccessModeOwner, | |||||
} | |||||
func TestAccessLevel(t *testing.T) { | func TestAccessLevel(t *testing.T) { | ||||
assert.NoError(t, PrepareTestDatabase()) | assert.NoError(t, PrepareTestDatabase()) | ||||
} | } | ||||
// GetProtectedBranchByRepoID getting protected branch by repo ID | // GetProtectedBranchByRepoID getting protected branch by repo ID | ||||
func GetProtectedBranchByRepoID(RepoID int64) ([]*ProtectedBranch, error) { | |||||
func GetProtectedBranchByRepoID(repoID int64) ([]*ProtectedBranch, error) { | |||||
protectedBranches := make([]*ProtectedBranch, 0) | protectedBranches := make([]*ProtectedBranch, 0) | ||||
return protectedBranches, x.Where("repo_id = ?", RepoID).Desc("updated_unix").Find(&protectedBranches) | |||||
return protectedBranches, x.Where("repo_id = ?", repoID).Desc("updated_unix").Find(&protectedBranches) | |||||
} | } | ||||
// GetProtectedBranchBy getting protected branch by ID/Name | // GetProtectedBranchBy getting protected branch by ID/Name | ||||
func GetProtectedBranchBy(repoID int64, BranchName string) (*ProtectedBranch, error) { | |||||
rel := &ProtectedBranch{RepoID: repoID, BranchName: BranchName} | |||||
func GetProtectedBranchBy(repoID int64, branchName string) (*ProtectedBranch, error) { | |||||
rel := &ProtectedBranch{RepoID: repoID, BranchName: branchName} | |||||
has, err := x.Get(rel) | has, err := x.Get(rel) | ||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err |
scanner := r.scanner | scanner := r.scanner | ||||
if r.lastSha != nil { | if r.lastSha != nil { | ||||
blamePart = &BlamePart{*r.lastSha, make([]string, 0, 0)} | |||||
blamePart = &BlamePart{*r.lastSha, make([]string, 0)} | |||||
} | } | ||||
for scanner.Scan() { | for scanner.Scan() { | ||||
sha1 := lines[1] | sha1 := lines[1] | ||||
if blamePart == nil { | if blamePart == nil { | ||||
blamePart = &BlamePart{sha1, make([]string, 0, 0)} | |||||
blamePart = &BlamePart{sha1, make([]string, 0)} | |||||
} | } | ||||
if blamePart.Sha != sha1 { | if blamePart.Sha != sha1 { |
// headers + hunk header | // headers + hunk header | ||||
newHunk := make([]string, headerLines) | newHunk := make([]string, headerLines) | ||||
// transfer existing headers | // transfer existing headers | ||||
for idx, lof := range hunk[:headerLines] { | |||||
newHunk[idx] = lof | |||||
} | |||||
copy(newHunk, hunk[:headerLines]) | |||||
// transfer last n lines | // transfer last n lines | ||||
for _, lof := range hunk[len(hunk)-numbersOfLine-1:] { | |||||
newHunk = append(newHunk, lof) | |||||
} | |||||
newHunk = append(newHunk, hunk[len(hunk)-numbersOfLine-1:]...) | |||||
// calculate newBegin, ... by counting lines | // calculate newBegin, ... by counting lines | ||||
for i := len(hunk) - 1; i >= len(hunk)-numbersOfLine; i-- { | for i := len(hunk) - 1; i >= len(hunk)-numbersOfLine; i-- { | ||||
switch hunk[i][0] { | switch hunk[i][0] { | ||||
diff.Files = append(diff.Files, curFile) | diff.Files = append(diff.Files, curFile) | ||||
if len(diff.Files) >= maxFiles { | if len(diff.Files) >= maxFiles { | ||||
diff.IsIncomplete = true | diff.IsIncomplete = true | ||||
io.Copy(ioutil.Discard, reader) | |||||
_, err := io.Copy(ioutil.Discard, reader) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("Copy: %v", err) | |||||
} | |||||
break | break | ||||
} | } | ||||
curFileLinesCount = 0 | curFileLinesCount = 0 |
} | } | ||||
} | } | ||||
func assertLineEqual(t *testing.T, d1 *DiffLine, d2 *DiffLine) { | |||||
if d1 != d2 { | |||||
t.Errorf("%v should be equal %v", d1, d2) | |||||
} | |||||
} | |||||
func TestDiffToHTML(t *testing.T) { | func TestDiffToHTML(t *testing.T) { | ||||
assertEqual(t, "+foo <span class=\"added-code\">bar</span> biz", diffToHTML([]dmp.Diff{ | assertEqual(t, "+foo <span class=\"added-code\">bar</span> biz", diffToHTML([]dmp.Diff{ | ||||
{Type: dmp.DiffEqual, Text: "foo "}, | {Type: dmp.DiffEqual, Text: "foo "}, |
} | } | ||||
} | } | ||||
func (opts *IssuesOptions) setupSession(sess *xorm.Session) error { | |||||
func (opts *IssuesOptions) setupSession(sess *xorm.Session) { | |||||
if opts.Page >= 0 && opts.PageSize > 0 { | if opts.Page >= 0 && opts.PageSize > 0 { | ||||
var start int | var start int | ||||
if opts.Page == 0 { | if opts.Page == 0 { | ||||
fmt.Sprintf("issue.id = il%[1]d.issue_id AND il%[1]d.label_id = %[2]d", i, labelID)) | fmt.Sprintf("issue.id = il%[1]d.issue_id AND il%[1]d.label_id = %[2]d", i, labelID)) | ||||
} | } | ||||
} | } | ||||
return nil | |||||
} | } | ||||
// CountIssuesByRepo map from repoID to number of issues matching the options | // CountIssuesByRepo map from repoID to number of issues matching the options | ||||
sess := x.NewSession() | sess := x.NewSession() | ||||
defer sess.Close() | defer sess.Close() | ||||
if err := opts.setupSession(sess); err != nil { | |||||
return nil, err | |||||
} | |||||
opts.setupSession(sess) | |||||
countsSlice := make([]*struct { | countsSlice := make([]*struct { | ||||
RepoID int64 | RepoID int64 | ||||
sess := x.NewSession() | sess := x.NewSession() | ||||
defer sess.Close() | defer sess.Close() | ||||
if err := opts.setupSession(sess); err != nil { | |||||
return nil, err | |||||
} | |||||
opts.setupSession(sess) | |||||
sortIssuesSession(sess, opts.SortType) | sortIssuesSession(sess, opts.SortType) | ||||
issues := make([]*Issue, 0, setting.UI.IssuePagingNum) | issues := make([]*Issue, 0, setting.UI.IssuePagingNum) |
return err | return err | ||||
} | } | ||||
func (c *Comment) loadAttachments(e Engine) (err error) { | |||||
if len(c.Attachments) > 0 { | |||||
return | |||||
} | |||||
c.Attachments, err = getAttachmentsByCommentID(e, c.ID) | |||||
if err != nil { | |||||
log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err) | |||||
} | |||||
return err | |||||
} | |||||
// AfterDelete is invoked from XORM after the object is deleted. | // AfterDelete is invoked from XORM after the object is deleted. | ||||
func (c *Comment) AfterDelete() { | func (c *Comment) AfterDelete() { | ||||
if c.ID <= 0 { | if c.ID <= 0 { | ||||
return c.loadReview(x) | return c.loadReview(x) | ||||
} | } | ||||
func (c *Comment) checkInvalidation(e Engine, doer *User, repo *git.Repository, branch string) error { | |||||
func (c *Comment) checkInvalidation(doer *User, repo *git.Repository, branch string) error { | |||||
// FIXME differentiate between previous and proposed line | // FIXME differentiate between previous and proposed line | ||||
commit, err := repo.LineBlame(branch, repo.Path, c.TreePath, uint(c.UnsignedLine())) | commit, err := repo.LineBlame(branch, repo.Path, c.TreePath, uint(c.UnsignedLine())) | ||||
if err != nil { | if err != nil { | ||||
// CheckInvalidation checks if the line of code comment got changed by another commit. | // CheckInvalidation checks if the line of code comment got changed by another commit. | ||||
// If the line got changed the comment is going to be invalidated. | // If the line got changed the comment is going to be invalidated. | ||||
func (c *Comment) CheckInvalidation(repo *git.Repository, doer *User, branch string) error { | func (c *Comment) CheckInvalidation(repo *git.Repository, doer *User, branch string) error { | ||||
return c.checkInvalidation(x, doer, repo, branch) | |||||
return c.checkInvalidation(doer, repo, branch) | |||||
} | } | ||||
// DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes. | // DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes. | ||||
commit, err := gitRepo.LineBlame(pr.GetGitRefName(), gitRepo.Path, treePath, uint(line)) | commit, err := gitRepo.LineBlame(pr.GetGitRefName(), gitRepo.Path, treePath, uint(line)) | ||||
if err == nil { | if err == nil { | ||||
commitID = commit.ID.String() | commitID = commit.ID.String() | ||||
} else if err != nil && !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") { | |||||
} else if !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") { | |||||
return nil, fmt.Errorf("LineBlame[%s, %s, %s, %d]: %v", pr.GetGitRefName(), gitRepo.Path, treePath, line, err) | return nil, fmt.Errorf("LineBlame[%s, %s, %s, %d]: %v", pr.GetGitRefName(), gitRepo.Path, treePath, line, err) | ||||
} | } | ||||
} | } |
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
posterIDs = posterIDs[limit:] | posterIDs = posterIDs[limit:] | ||||
} | } | ||||
var label Label | var label Label | ||||
err = rows.Scan(&label) | err = rows.Scan(&label) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
return err | return err | ||||
} | } | ||||
commentLabels[label.ID] = &label | commentLabels[label.ID] = &label | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
_ = rows.Close() | |||||
left -= limit | |||||
labelIDs = labelIDs[limit:] | labelIDs = labelIDs[limit:] | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
milestoneIDs = milestoneIDs[limit:] | milestoneIDs = milestoneIDs[limit:] | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
milestoneIDs = milestoneIDs[limit:] | milestoneIDs = milestoneIDs[limit:] | ||||
} | } | ||||
assignees[user.ID] = &user | assignees[user.ID] = &user | ||||
} | } | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
left = left - limit | |||||
left -= limit | |||||
assigneeIDs = assigneeIDs[limit:] | assigneeIDs = assigneeIDs[limit:] | ||||
} | } | ||||
issues[issue.ID] = &issue | issues[issue.ID] = &issue | ||||
} | } | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
left = left - limit | |||||
left -= limit | |||||
issueIDs = issueIDs[limit:] | issueIDs = issueIDs[limit:] | ||||
} | } | ||||
var issue Issue | var issue Issue | ||||
err = rows.Scan(&issue) | err = rows.Scan(&issue) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
return err | return err | ||||
} | } | ||||
issues[issue.ID] = &issue | issues[issue.ID] = &issue | ||||
} | } | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
left = left - limit | |||||
left -= limit | |||||
issueIDs = issueIDs[limit:] | issueIDs = issueIDs[limit:] | ||||
} | } | ||||
var attachment Attachment | var attachment Attachment | ||||
err = rows.Scan(&attachment) | err = rows.Scan(&attachment) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
return err | return err | ||||
} | } | ||||
attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment) | attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment) | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
_ = rows.Close() | |||||
left -= limit | |||||
commentsIDs = commentsIDs[limit:] | commentsIDs = commentsIDs[limit:] | ||||
} | } | ||||
var review Review | var review Review | ||||
err = rows.Scan(&review) | err = rows.Scan(&review) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
return err | return err | ||||
} | } | ||||
reviews[review.ID] = &review | reviews[review.ID] = &review | ||||
} | } | ||||
rows.Close() | |||||
_ = rows.Close() | |||||
left = left - limit | |||||
left -= limit | |||||
reviewIDs = reviewIDs[limit:] | reviewIDs = reviewIDs[limit:] | ||||
} | } | ||||
return sess.Commit() | return sess.Commit() | ||||
} | } | ||||
func getIssueLabels(e Engine, issueID int64) ([]*IssueLabel, error) { | |||||
issueLabels := make([]*IssueLabel, 0, 10) | |||||
return issueLabels, e. | |||||
Where("issue_id=?", issueID). | |||||
Asc("label_id"). | |||||
Find(&issueLabels) | |||||
} | |||||
func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err error) { | func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err error) { | ||||
if count, err := e.Delete(&IssueLabel{ | if count, err := e.Delete(&IssueLabel{ | ||||
IssueID: issue.ID, | IssueID: issue.ID, |
import ( | import ( | ||||
"fmt" | "fmt" | ||||
"code.gitea.io/gitea/modules/log" | |||||
"github.com/go-xorm/builder" | "github.com/go-xorm/builder" | ||||
) | ) | ||||
if err != nil { | if err != nil { | ||||
return nil, fmt.Errorf("find repository: %v", err) | return nil, fmt.Errorf("find repository: %v", err) | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
repoIDs = repoIDs[limit:] | repoIDs = repoIDs[limit:] | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
posterIDs = posterIDs[limit:] | posterIDs = posterIDs[limit:] | ||||
} | } | ||||
var labelIssue LabelIssue | var labelIssue LabelIssue | ||||
err = rows.Scan(&labelIssue) | err = rows.Scan(&labelIssue) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadLabels: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label) | issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label) | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadLabels: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
issueIDs = issueIDs[limit:] | issueIDs = issueIDs[limit:] | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
left = left - limit | |||||
left -= limit | |||||
milestoneIDs = milestoneIDs[limit:] | milestoneIDs = milestoneIDs[limit:] | ||||
} | } | ||||
var assigneeIssue AssigneeIssue | var assigneeIssue AssigneeIssue | ||||
err = rows.Scan(&assigneeIssue) | err = rows.Scan(&assigneeIssue) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadAssignees: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee) | assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee) | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadAssignees: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
issueIDs = issueIDs[limit:] | issueIDs = issueIDs[limit:] | ||||
} | } | ||||
var pr PullRequest | var pr PullRequest | ||||
err = rows.Scan(&pr) | err = rows.Scan(&pr) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadPullRequests: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
pullRequestMaps[pr.IssueID] = &pr | pullRequestMaps[pr.IssueID] = &pr | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadPullRequests: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
issuesIDs = issuesIDs[limit:] | issuesIDs = issuesIDs[limit:] | ||||
} | } | ||||
var attachment Attachment | var attachment Attachment | ||||
err = rows.Scan(&attachment) | err = rows.Scan(&attachment) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadAttachments: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) | attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadAttachments: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
issuesIDs = issuesIDs[limit:] | issuesIDs = issuesIDs[limit:] | ||||
} | } | ||||
var comment Comment | var comment Comment | ||||
err = rows.Scan(&comment) | err = rows.Scan(&comment) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadComments: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
comments[comment.IssueID] = append(comments[comment.IssueID], &comment) | comments[comment.IssueID] = append(comments[comment.IssueID], &comment) | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadComments: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
issuesIDs = issuesIDs[limit:] | issuesIDs = issuesIDs[limit:] | ||||
} | } | ||||
var totalTime totalTimesByIssue | var totalTime totalTimesByIssue | ||||
err = rows.Scan(&totalTime) | err = rows.Scan(&totalTime) | ||||
if err != nil { | if err != nil { | ||||
rows.Close() | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
trackedTimes[totalTime.IssueID] = totalTime.Time | trackedTimes[totalTime.IssueID] = totalTime.Time | ||||
} | } | ||||
rows.Close() | |||||
left = left - limit | |||||
// When there are no rows left and we try to close it, xorm will complain with an error. | |||||
// Since that is not relevant for us, we can safely ignore it. | |||||
if err := rows.Close(); err != nil { | |||||
log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) | |||||
} | |||||
left -= limit | |||||
ids = ids[limit:] | ids = ids[limit:] | ||||
} | } | ||||
} | } | ||||
// loadAttributes loads all attributes, expect for attachments and comments | // loadAttributes loads all attributes, expect for attachments and comments | ||||
func (issues IssueList) loadAttributes(e Engine) (err error) { | |||||
if _, err = issues.loadRepositories(e); err != nil { | |||||
return | |||||
func (issues IssueList) loadAttributes(e Engine) error { | |||||
if _, err := issues.loadRepositories(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadRepositories: %v", err) | |||||
} | } | ||||
if err = issues.loadPosters(e); err != nil { | |||||
return | |||||
if err := issues.loadPosters(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadPosters: %v", err) | |||||
} | } | ||||
if err = issues.loadLabels(e); err != nil { | |||||
return | |||||
if err := issues.loadLabels(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadLabels: %v", err) | |||||
} | } | ||||
if err = issues.loadMilestones(e); err != nil { | |||||
return | |||||
if err := issues.loadMilestones(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadMilestones: %v", err) | |||||
} | } | ||||
if err = issues.loadAssignees(e); err != nil { | |||||
return | |||||
if err := issues.loadAssignees(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadAssignees: %v", err) | |||||
} | } | ||||
if err = issues.loadPullRequests(e); err != nil { | |||||
return | |||||
if err := issues.loadPullRequests(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadPullRequests: %v", err) | |||||
} | } | ||||
if err = issues.loadTotalTrackedTimes(e); err != nil { | |||||
return | |||||
if err := issues.loadTotalTrackedTimes(e); err != nil { | |||||
return fmt.Errorf("issue.loadAttributes: loadTotalTrackedTimes: %v", err) | |||||
} | } | ||||
return nil | return nil |
// XORMLogBridge a logger bridge from Logger to xorm | // XORMLogBridge a logger bridge from Logger to xorm | ||||
type XORMLogBridge struct { | type XORMLogBridge struct { | ||||
showSQL bool | showSQL bool | ||||
level core.LogLevel | |||||
logger *log.Logger | logger *log.Logger | ||||
} | } | ||||
// Debug show debug log | // Debug show debug log | ||||
func (l *XORMLogBridge) Debug(v ...interface{}) { | func (l *XORMLogBridge) Debug(v ...interface{}) { | ||||
l.Log(2, log.DEBUG, fmt.Sprint(v...)) | |||||
_ = l.Log(2, log.DEBUG, fmt.Sprint(v...)) | |||||
} | } | ||||
// Debugf show debug log | // Debugf show debug log | ||||
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) { | func (l *XORMLogBridge) Debugf(format string, v ...interface{}) { | ||||
l.Log(2, log.DEBUG, format, v...) | |||||
_ = l.Log(2, log.DEBUG, format, v...) | |||||
} | } | ||||
// Error show error log | // Error show error log | ||||
func (l *XORMLogBridge) Error(v ...interface{}) { | func (l *XORMLogBridge) Error(v ...interface{}) { | ||||
l.Log(2, log.ERROR, fmt.Sprint(v...)) | |||||
_ = l.Log(2, log.ERROR, fmt.Sprint(v...)) | |||||
} | } | ||||
// Errorf show error log | // Errorf show error log | ||||
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) { | func (l *XORMLogBridge) Errorf(format string, v ...interface{}) { | ||||
l.Log(2, log.ERROR, format, v...) | |||||
_ = l.Log(2, log.ERROR, format, v...) | |||||
} | } | ||||
// Info show information level log | // Info show information level log | ||||
func (l *XORMLogBridge) Info(v ...interface{}) { | func (l *XORMLogBridge) Info(v ...interface{}) { | ||||
l.Log(2, log.INFO, fmt.Sprint(v...)) | |||||
_ = l.Log(2, log.INFO, fmt.Sprint(v...)) | |||||
} | } | ||||
// Infof show information level log | // Infof show information level log | ||||
func (l *XORMLogBridge) Infof(format string, v ...interface{}) { | func (l *XORMLogBridge) Infof(format string, v ...interface{}) { | ||||
l.Log(2, log.INFO, format, v...) | |||||
_ = l.Log(2, log.INFO, format, v...) | |||||
} | } | ||||
// Warn show warning log | // Warn show warning log | ||||
func (l *XORMLogBridge) Warn(v ...interface{}) { | func (l *XORMLogBridge) Warn(v ...interface{}) { | ||||
l.Log(2, log.WARN, fmt.Sprint(v...)) | |||||
_ = l.Log(2, log.WARN, fmt.Sprint(v...)) | |||||
} | } | ||||
// Warnf show warnning log | // Warnf show warnning log | ||||
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) { | func (l *XORMLogBridge) Warnf(format string, v ...interface{}) { | ||||
l.Log(2, log.WARN, format, v...) | |||||
_ = l.Log(2, log.WARN, format, v...) | |||||
} | } | ||||
// Level get logger level | // Level get logger level |
// BeforeSet is invoked from XORM before setting the value of a field of this object. | // BeforeSet is invoked from XORM before setting the value of a field of this object. | ||||
func (source *LoginSource) BeforeSet(colName string, val xorm.Cell) { | func (source *LoginSource) BeforeSet(colName string, val xorm.Cell) { | ||||
switch colName { | |||||
case "type": | |||||
if colName == "type" { | |||||
switch LoginType(Cell2Int64(val)) { | switch LoginType(Cell2Int64(val)) { | ||||
case LoginLDAP, LoginDLDAP: | case LoginLDAP, LoginDLDAP: | ||||
source.Cfg = new(LDAPConfig) | source.Cfg = new(LDAPConfig) | ||||
oAuth2Config := source.OAuth2() | oAuth2Config := source.OAuth2() | ||||
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | ||||
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | ||||
if err != nil { | if err != nil { | ||||
// remove the LoginSource in case of errors while registering OAuth2 providers | // remove the LoginSource in case of errors while registering OAuth2 providers | ||||
x.Delete(source) | |||||
if _, err := x.Delete(source); err != nil { | |||||
log.Error("CreateLoginSource: Error while wrapOpenIDConnectInitializeError: %v", err) | |||||
} | |||||
return err | |||||
} | } | ||||
} | } | ||||
return err | return err | ||||
oAuth2Config := source.OAuth2() | oAuth2Config := source.OAuth2() | ||||
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | ||||
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config) | ||||
if err != nil { | if err != nil { | ||||
// restore original values since we cannot update the provider it self | // restore original values since we cannot update the provider it self | ||||
x.ID(source.ID).AllCols().Update(originalLoginSource) | |||||
if _, err := x.ID(source.ID).AllCols().Update(originalLoginSource); err != nil { | |||||
log.Error("UpdateSource: Error while wrapOpenIDConnectInitializeError: %v", err) | |||||
} | |||||
return err | |||||
} | } | ||||
} | } | ||||
return err | return err | ||||
} | } | ||||
var ( | var ( | ||||
alphaDashDotPattern = regexp.MustCompile("[^\\w-\\.]") | |||||
alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`) | |||||
) | ) | ||||
// LoginViaLDAP queries if login/password is valid against the LDAP directory pool, | // LoginViaLDAP queries if login/password is valid against the LDAP directory pool, | ||||
if !autoRegister { | if !autoRegister { | ||||
if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | ||||
RewriteAllPublicKeys() | |||||
return user, RewriteAllPublicKeys() | |||||
} | } | ||||
return user, nil | return user, nil | ||||
err := CreateUser(user) | err := CreateUser(user) | ||||
if err == nil && isAttributeSSHPublicKeySet && addLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | if err == nil && isAttributeSSHPublicKeySet && addLdapSSHPublicKeys(user, source, sr.SSHPublicKey) { | ||||
RewriteAllPublicKeys() | |||||
err = RewriteAllPublicKeys() | |||||
} | } | ||||
return user, err | return user, err |
func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message { | func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message { | ||||
subject := issue.mailSubject() | subject := issue.mailSubject() | ||||
issue.LoadRepo() | |||||
err := issue.LoadRepo() | |||||
if err != nil { | |||||
log.Error("LoadRepo: %v", err) | |||||
} | |||||
body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas())) | body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas())) | ||||
data := make(map[string]interface{}, 10) | |||||
var data = make(map[string]interface{}, 10) | |||||
if comment != nil { | if comment != nil { | ||||
data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag()) | data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag()) | ||||
} else { | } else { |
return fmt.Errorf("marshal action content[%d]: %v", actID, err) | return fmt.Errorf("marshal action content[%d]: %v", actID, err) | ||||
} | } | ||||
if _, err = sess.Id(actID).Update(&Action{ | |||||
if _, err = sess.ID(actID).Update(&Action{ | |||||
Content: string(p), | Content: string(p), | ||||
}); err != nil { | }); err != nil { | ||||
return fmt.Errorf("update action[%d]: %v", actID, err) | return fmt.Errorf("update action[%d]: %v", actID, err) | ||||
// Update database first because this is where error happens the most often. | // Update database first because this is where error happens the most often. | ||||
for _, attach := range attachments { | for _, attach := range attachments { | ||||
if _, err = sess.Id(attach.ID).Update(attach); err != nil { | |||||
if _, err = sess.ID(attach.ID).Update(attach); err != nil { | |||||
return err | return err | ||||
} | } | ||||
if pull.Index == 0 { | if pull.Index == 0 { | ||||
continue | continue | ||||
} | } | ||||
if _, err = sess.Id(pull.ID).Update(pull); err != nil { | |||||
if _, err = sess.ID(pull.ID).Update(pull); err != nil { | |||||
return err | return err | ||||
} | } | ||||
} | } | ||||
if org.Salt, err = generate.GetRandomString(10); err != nil { | if org.Salt, err = generate.GetRandomString(10); err != nil { | ||||
return err | return err | ||||
} | } | ||||
if _, err = sess.Id(org.ID).Update(org); err != nil { | |||||
if _, err = sess.ID(org.ID).Update(org); err != nil { | |||||
return err | return err | ||||
} | } | ||||
} | } |
return fmt.Errorf("Query repositories: %v", err) | return fmt.Errorf("Query repositories: %v", err) | ||||
} | } | ||||
for _, mirror := range mirrors { | for _, mirror := range mirrors { | ||||
mirror.Interval = mirror.Interval * time.Hour | |||||
mirror.Interval *= time.Hour | |||||
if mirror.Interval < setting.Mirror.MinInterval { | if mirror.Interval < setting.Mirror.MinInterval { | ||||
log.Info("Mirror interval less than Mirror.MinInterval, setting default interval: repo id %v", mirror.RepoID) | log.Info("Mirror interval less than Mirror.MinInterval, setting default interval: repo id %v", mirror.RepoID) | ||||
mirror.Interval = setting.Mirror.DefaultInterval | mirror.Interval = setting.Mirror.DefaultInterval | ||||
} | } | ||||
log.Debug("Mirror interval set to %v for repo id %v", mirror.Interval, mirror.RepoID) | log.Debug("Mirror interval set to %v for repo id %v", mirror.Interval, mirror.RepoID) | ||||
_, err := sess.Id(mirror.ID).Cols("interval").Update(mirror) | |||||
_, err := sess.ID(mirror.ID).Cols("interval").Update(mirror) | |||||
if err != nil { | if err != nil { | ||||
return fmt.Errorf("update mirror interval failed: %v", err) | return fmt.Errorf("update mirror interval failed: %v", err) | ||||
} | } |
if len(indexes) >= 1 { | if len(indexes) >= 1 { | ||||
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") | _, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") | ||||
if err != nil { | |||||
return fmt.Errorf("Drop index failed: %v", err) | |||||
} | |||||
} | } | ||||
} else { | } else { | ||||
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") | _, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository") |
if len(indexes) >= 1 { | if len(indexes) >= 1 { | ||||
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") | _, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") | ||||
if err != nil { | |||||
return err | |||||
} | |||||
} | } | ||||
} else { | } else { | ||||
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") | _, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token") |
Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *xorm.Session | Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *xorm.Session | ||||
SQL(interface{}, ...interface{}) *xorm.Session | SQL(interface{}, ...interface{}) *xorm.Session | ||||
Where(interface{}, ...interface{}) *xorm.Session | Where(interface{}, ...interface{}) *xorm.Session | ||||
Asc(colNames ...string) *xorm.Session | |||||
} | } | ||||
var ( | var ( | ||||
return host, port | return host, port | ||||
} | } | ||||
func getPostgreSQLConnectionString(DBHost, DBUser, DBPasswd, DBName, DBParam, DBSSLMode string) (connStr string) { | |||||
host, port := parsePostgreSQLHostPort(DBHost) | |||||
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbParam, dbsslMode string) (connStr string) { | |||||
host, port := parsePostgreSQLHostPort(dbHost) | |||||
if host[0] == '/' { // looks like a unix socket | if host[0] == '/' { // looks like a unix socket | ||||
connStr = fmt.Sprintf("postgres://%s:%s@:%s/%s%ssslmode=%s&host=%s", | connStr = fmt.Sprintf("postgres://%s:%s@:%s/%s%ssslmode=%s&host=%s", | ||||
url.PathEscape(DBUser), url.PathEscape(DBPasswd), port, DBName, DBParam, DBSSLMode, host) | |||||
url.PathEscape(dbUser), url.PathEscape(dbPasswd), port, dbName, dbParam, dbsslMode, host) | |||||
} else { | } else { | ||||
connStr = fmt.Sprintf("postgres://%s:%s@%s:%s/%s%ssslmode=%s", | connStr = fmt.Sprintf("postgres://%s:%s@%s:%s/%s%ssslmode=%s", | ||||
url.PathEscape(DBUser), url.PathEscape(DBPasswd), host, port, DBName, DBParam, DBSSLMode) | |||||
url.PathEscape(dbUser), url.PathEscape(dbPasswd), host, port, dbName, dbParam, dbsslMode) | |||||
} | } | ||||
return | return | ||||
} | } |
} | } | ||||
} | } | ||||
issue.loadRepo(e) | |||||
err = issue.loadRepo(e) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
for _, watch := range watches { | for _, watch := range watches { | ||||
issue.Repo.Units = nil | issue.Repo.Units = nil |
for _, source := range loginSources { | for _, source := range loginSources { | ||||
oAuth2Config := source.OAuth2() | oAuth2Config := source.OAuth2() | ||||
oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||||
err := oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | } | ||||
return nil | return nil | ||||
} | } |
func getOAuth2ApplicationByID(e Engine, id int64) (app *OAuth2Application, err error) { | func getOAuth2ApplicationByID(e Engine, id int64) (app *OAuth2Application, err error) { | ||||
app = new(OAuth2Application) | app = new(OAuth2Application) | ||||
has, err := e.ID(id).Get(app) | has, err := e.ID(id).Get(app) | ||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if !has { | if !has { | ||||
return nil, ErrOAuthApplicationNotFound{ID: id} | return nil, ErrOAuthApplicationNotFound{ID: id} | ||||
} | } | ||||
// ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation. | // ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation. | ||||
func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool { | func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool { | ||||
return code.validateCodeChallenge(x, verifier) | |||||
return code.validateCodeChallenge(verifier) | |||||
} | } | ||||
func (code *OAuth2AuthorizationCode) validateCodeChallenge(e Engine, verifier string) bool { | |||||
func (code *OAuth2AuthorizationCode) validateCodeChallenge(verifier string) bool { | |||||
switch code.CodeChallengeMethod { | switch code.CodeChallengeMethod { | ||||
case "S256": | case "S256": | ||||
// base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6 | // base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6 |
} | } | ||||
if _, err = sess.Insert(&units); err != nil { | if _, err = sess.Insert(&units); err != nil { | ||||
sess.Rollback() | |||||
if err := sess.Rollback(); err != nil { | |||||
log.Error("CreateOrganization: sess.Rollback: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
func hasOrgVisible(e Engine, org *User, user *User) bool { | func hasOrgVisible(e Engine, org *User, user *User) bool { | ||||
// Not SignedUser | // Not SignedUser | ||||
if user == nil { | if user == nil { | ||||
if org.Visibility == structs.VisibleTypePublic { | |||||
return true | |||||
} | |||||
return false | |||||
return org.Visibility == structs.VisibleTypePublic | |||||
} | } | ||||
if user.IsAdmin { | if user.IsAdmin { | ||||
} | } | ||||
if _, err := sess.Insert(ou); err != nil { | if _, err := sess.Insert(ou); err != nil { | ||||
sess.Rollback() | |||||
if err := sess.Rollback(); err != nil { | |||||
log.Error("AddOrgUser: sess.Rollback: %v", err) | |||||
} | |||||
return err | return err | ||||
} else if _, err = sess.Exec("UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil { | } else if _, err = sess.Exec("UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil { | ||||
sess.Rollback() | |||||
if err := sess.Rollback(); err != nil { | |||||
log.Error("AddOrgUser: sess.Rollback: %v", err) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
has, err := x.ID(t.OrgID).Get(new(User)) | has, err := x.ID(t.OrgID).Get(new(User)) | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} else if !has { | |||||
} | |||||
if !has { | |||||
return ErrOrgNotExist{t.OrgID, ""} | return ErrOrgNotExist{t.OrgID, ""} | ||||
} | } | ||||
Get(new(Team)) | Get(new(Team)) | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} else if has { | |||||
} | |||||
if has { | |||||
return ErrTeamAlreadyExist{t.OrgID, t.LowerName} | return ErrTeamAlreadyExist{t.OrgID, t.LowerName} | ||||
} | } | ||||
} | } | ||||
if _, err = sess.Insert(t); err != nil { | if _, err = sess.Insert(t); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("NewTeam sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
unit.TeamID = t.ID | unit.TeamID = t.ID | ||||
} | } | ||||
if _, err = sess.Insert(&t.Units); err != nil { | if _, err = sess.Insert(&t.Units); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("NewTeam sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
} | } | ||||
// Update organization number of teams. | // Update organization number of teams. | ||||
if _, err = sess.Exec("UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil { | if _, err = sess.Exec("UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("NewTeam sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
return sess.Commit() | return sess.Commit() | ||||
} | } | ||||
if _, err = sess.Insert(&t.Units); err != nil { | if _, err = sess.Insert(&t.Units); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("UpdateTeam sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if _, err = sess.Insert(units); err != nil { | if _, err = sess.Insert(units); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("UpdateTeamUnits sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
assert.EqualValues(t, 3, org.ID) | assert.EqualValues(t, 3, org.ID) | ||||
assert.Equal(t, "user3", org.Name) | assert.Equal(t, "user3", org.Name) | ||||
org, err = GetOrgByName("user2") // user2 is an individual | |||||
_, err = GetOrgByName("user2") // user2 is an individual | |||||
assert.True(t, IsErrOrgNotExist(err)) | assert.True(t, IsErrOrgNotExist(err)) | ||||
org, err = GetOrgByName("") // corner case | |||||
_, err = GetOrgByName("") // corner case | |||||
assert.True(t, IsErrOrgNotExist(err)) | assert.True(t, IsErrOrgNotExist(err)) | ||||
} | } | ||||
func TestAccessibleReposEnv_RepoIDs(t *testing.T) { | func TestAccessibleReposEnv_RepoIDs(t *testing.T) { | ||||
assert.NoError(t, PrepareTestDatabase()) | assert.NoError(t, PrepareTestDatabase()) | ||||
org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User) | org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User) | ||||
testSuccess := func(userID, page, pageSize int64, expectedRepoIDs []int64) { | |||||
testSuccess := func(userID, _, pageSize int64, expectedRepoIDs []int64) { | |||||
env, err := org.AccessibleReposEnv(userID) | env, err := org.AccessibleReposEnv(userID) | ||||
assert.NoError(t, err) | assert.NoError(t, err) | ||||
repoIDs, err := env.RepoIDs(1, 100) | repoIDs, err := env.RepoIDs(1, 100) |
} | } | ||||
} | } | ||||
if baseBranch, err = pr.BaseRepo.GetBranch(pr.BaseBranch); err != nil { | if baseBranch, err = pr.BaseRepo.GetBranch(pr.BaseBranch); err != nil { | ||||
log.Error("pr.BaseRepo.GetBranch[%d]: %v", pr.BaseBranch, err) | |||||
return nil | return nil | ||||
} | } | ||||
if baseCommit, err = baseBranch.GetCommit(); err != nil { | if baseCommit, err = baseBranch.GetCommit(); err != nil { | ||||
log.Error("baseBranch.GetCommit[%d]: %v", pr.ID, err) | |||||
return nil | return nil | ||||
} | } | ||||
if headBranch, err = pr.HeadRepo.GetBranch(pr.HeadBranch); err != nil { | if headBranch, err = pr.HeadRepo.GetBranch(pr.HeadBranch); err != nil { | ||||
log.Error("pr.HeadRepo.GetBranch[%d]: %v", pr.HeadBranch, err) | |||||
return nil | return nil | ||||
} | } | ||||
if headCommit, err = headBranch.GetCommit(); err != nil { | if headCommit, err = headBranch.GetCommit(); err != nil { | ||||
log.Error("headBranch.GetCommit[%d]: %v", pr.ID, err) | |||||
return nil | return nil | ||||
} | } | ||||
apiBaseBranchInfo := &api.PRBranchInfo{ | apiBaseBranchInfo := &api.PRBranchInfo{ | ||||
Repository: pr.HeadRepo.innerAPIFormat(e, AccessModeNone, false), | Repository: pr.HeadRepo.innerAPIFormat(e, AccessModeNone, false), | ||||
} | } | ||||
pr.Issue.loadRepo(e) | |||||
if err = pr.Issue.loadRepo(e); err != nil { | |||||
log.Error("pr.Issue.loadRepo[%d]: %v", pr.ID, err) | |||||
return nil | |||||
} | |||||
apiPullRequest := &api.PullRequest{ | apiPullRequest := &api.PullRequest{ | ||||
ID: pr.ID, | ID: pr.ID, | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
defer RemoveTemporaryPath(tmpBasePath) | |||||
defer func() { | |||||
if err := RemoveTemporaryPath(tmpBasePath); err != nil { | |||||
log.Error("Merge: RemoveTemporaryPath: %s", err) | |||||
} | |||||
}() | |||||
headRepoPath := RepoPath(pr.HeadUserName, pr.HeadRepo.Name) | headRepoPath := RepoPath(pr.HeadUserName, pr.HeadRepo.Name) | ||||
return fmt.Errorf("AddRemote: %v", err) | return fmt.Errorf("AddRemote: %v", err) | ||||
} | } | ||||
defer func() { | defer func() { | ||||
headGitRepo.RemoveRemote(tmpRemote) | |||||
if err := headGitRepo.RemoveRemote(tmpRemote); err != nil { | |||||
log.Error("UpdatePatch: RemoveRemote: %s", err) | |||||
} | |||||
}() | }() | ||||
pr.MergeBase, _, err = headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch) | pr.MergeBase, _, err = headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch) | ||||
if err != nil { | if err != nil { | ||||
return fmt.Errorf("headGitRepo.AddRemote: %v", err) | return fmt.Errorf("headGitRepo.AddRemote: %v", err) | ||||
} | } | ||||
// Make sure to remove the remote even if the push fails | // Make sure to remove the remote even if the push fails | ||||
defer headGitRepo.RemoveRemote(tmpRemoteName) | |||||
defer func() { | |||||
if err := headGitRepo.RemoveRemote(tmpRemoteName); err != nil { | |||||
log.Error("PushToBaseRepo: RemoveRemote: %s", err) | |||||
} | |||||
}() | |||||
headFile := pr.GetGitRefName() | headFile := pr.GetGitRefName() | ||||
assert.NoError(t, err) | assert.NoError(t, err) | ||||
assert.Equal(t, int64(2), pr.ID) | assert.Equal(t, int64(2), pr.ID) | ||||
pr, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master") | |||||
_, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master") | |||||
assert.Error(t, err) | assert.Error(t, err) | ||||
assert.True(t, IsErrPullRequestNotExist(err)) | assert.True(t, IsErrPullRequestNotExist(err)) | ||||
} | } | ||||
assert.Equal(t, int64(1), pr.BaseRepoID) | assert.Equal(t, int64(1), pr.BaseRepoID) | ||||
assert.Equal(t, int64(2), pr.Index) | assert.Equal(t, int64(2), pr.Index) | ||||
pr, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807) | |||||
_, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807) | |||||
assert.Error(t, err) | assert.Error(t, err) | ||||
assert.True(t, IsErrPullRequestNotExist(err)) | assert.True(t, IsErrPullRequestNotExist(err)) | ||||
} | } | ||||
assert.NoError(t, err) | assert.NoError(t, err) | ||||
assert.Equal(t, int64(2), pr.IssueID) | assert.Equal(t, int64(2), pr.IssueID) | ||||
pr, err = GetPullRequestByIssueID(9223372036854775807) | |||||
_, err = GetPullRequestByIssueID(9223372036854775807) | |||||
assert.Error(t, err) | assert.Error(t, err) | ||||
assert.True(t, IsErrPullRequestNotExist(err)) | assert.True(t, IsErrPullRequestNotExist(err)) | ||||
} | } |
} | } | ||||
} | } | ||||
if r.Publisher == nil { | if r.Publisher == nil { | ||||
r.Publisher, err = GetUserByID(r.PublisherID) | |||||
r.Publisher, err = getUserByID(e, r.PublisherID) | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
} | } | ||||
return GetReleaseAttachments(r) | |||||
return getReleaseAttachments(e, r) | |||||
} | } | ||||
// LoadAttributes load repo and publisher attributes for a release | // LoadAttributes load repo and publisher attributes for a release | ||||
// GetReleaseAttachments retrieves the attachments for releases | // GetReleaseAttachments retrieves the attachments for releases | ||||
func GetReleaseAttachments(rels ...*Release) (err error) { | func GetReleaseAttachments(rels ...*Release) (err error) { | ||||
return getReleaseAttachments(x, rels...) | |||||
} | |||||
func getReleaseAttachments(e Engine, rels ...*Release) (err error) { | |||||
if len(rels) == 0 { | if len(rels) == 0 { | ||||
return | return | ||||
} | } | ||||
sort.Sort(sortedRels) | sort.Sort(sortedRels) | ||||
// Select attachments | // Select attachments | ||||
err = x. | |||||
err = e. | |||||
Asc("release_id"). | Asc("release_id"). | ||||
In("release_id", sortedRels.ID). | In("release_id", sortedRels.ID). | ||||
Find(&attachments, Attachment{}) | Find(&attachments, Attachment{}) | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
} | } | ||||
return | return | ||||
} | } | ||||
type releaseSorter struct { | type releaseSorter struct { | ||||
return fmt.Errorf("GetTagCommitID: %v", err) | return fmt.Errorf("GetTagCommitID: %v", err) | ||||
} | } | ||||
if git.IsErrNotExist(err) || commitID != rel.Sha1 { | if git.IsErrNotExist(err) || commitID != rel.Sha1 { | ||||
if err := pushUpdateDeleteTag(repo, gitRepo, rel.TagName); err != nil { | |||||
if err := pushUpdateDeleteTag(repo, rel.TagName); err != nil { | |||||
return fmt.Errorf("pushUpdateDeleteTag: %v", err) | return fmt.Errorf("pushUpdateDeleteTag: %v", err) | ||||
} | } | ||||
} else { | } else { |
"os" | "os" | ||||
"path" | "path" | ||||
"path/filepath" | "path/filepath" | ||||
"regexp" | |||||
"sort" | "sort" | ||||
"strconv" | "strconv" | ||||
"strings" | "strings" | ||||
return users, nil | return users, nil | ||||
} | } | ||||
var ( | |||||
descPattern = regexp.MustCompile(`https?://\S+`) | |||||
) | |||||
// DescriptionHTML does special handles to description and return HTML string. | // DescriptionHTML does special handles to description and return HTML string. | ||||
func (repo *Repository) DescriptionHTML() template.HTML { | func (repo *Repository) DescriptionHTML() template.HTML { | ||||
desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas()) | desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas()) | ||||
return fmt.Errorf("prepareWebhooks: %v", err) | return fmt.Errorf("prepareWebhooks: %v", err) | ||||
} | } | ||||
go HookQueue.Add(repo.ID) | go HookQueue.Add(repo.ID) | ||||
} else { | |||||
} else if err = repo.recalculateAccesses(e); err != nil { | |||||
// Organization automatically called this in addRepository method. | // Organization automatically called this in addRepository method. | ||||
if err = repo.recalculateAccesses(e); err != nil { | |||||
return fmt.Errorf("recalculateAccesses: %v", err) | |||||
} | |||||
return fmt.Errorf("recalculateAccesses: %v", err) | |||||
} | } | ||||
if setting.Service.AutoWatchNewRepos { | if setting.Service.AutoWatchNewRepos { | ||||
} else if err = t.addRepository(sess, repo); err != nil { | } else if err = t.addRepository(sess, repo); err != nil { | ||||
return fmt.Errorf("add to owner team: %v", err) | return fmt.Errorf("add to owner team: %v", err) | ||||
} | } | ||||
} else { | |||||
} else if err = repo.recalculateAccesses(sess); err != nil { | |||||
// Organization called this in addRepository method. | // Organization called this in addRepository method. | ||||
if err = repo.recalculateAccesses(sess); err != nil { | |||||
return fmt.Errorf("recalculateAccesses: %v", err) | |||||
} | |||||
return fmt.Errorf("recalculateAccesses: %v", err) | |||||
} | } | ||||
// Update repository count. | // Update repository count. | ||||
repoPath := repo.repoPath(sess) | repoPath := repo.repoPath(sess) | ||||
removeAllWithNotice(sess, "Delete repository files", repoPath) | removeAllWithNotice(sess, "Delete repository files", repoPath) | ||||
repo.deleteWiki(sess) | |||||
err = repo.deleteWiki(sess) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// Remove attachment files. | // Remove attachment files. | ||||
for i := range attachmentPaths { | for i := range attachmentPaths { | ||||
// CustomAvatarPath returns repository custom avatar file path. | // CustomAvatarPath returns repository custom avatar file path. | ||||
func (repo *Repository) CustomAvatarPath() string { | func (repo *Repository) CustomAvatarPath() string { | ||||
// Avatar empty by default | // Avatar empty by default | ||||
if len(repo.Avatar) <= 0 { | |||||
if len(repo.Avatar) == 0 { | |||||
return "" | return "" | ||||
} | } | ||||
return filepath.Join(setting.RepositoryAvatarUploadPath, repo.Avatar) | return filepath.Join(setting.RepositoryAvatarUploadPath, repo.Avatar) | ||||
// RemoveRandomAvatars removes the randomly generated avatars that were created for repositories | // RemoveRandomAvatars removes the randomly generated avatars that were created for repositories | ||||
func RemoveRandomAvatars() error { | func RemoveRandomAvatars() error { | ||||
var ( | |||||
err error | |||||
) | |||||
err = x. | |||||
return x. | |||||
Where("id > 0").BufferSize(setting.IterateBufferSize). | Where("id > 0").BufferSize(setting.IterateBufferSize). | ||||
Iterate(new(Repository), | Iterate(new(Repository), | ||||
func(idx int, bean interface{}) error { | func(idx int, bean interface{}) error { | ||||
} | } | ||||
return nil | return nil | ||||
}) | }) | ||||
return err | |||||
} | } | ||||
// RelAvatarLink returns a relative link to the repository's avatar. | // RelAvatarLink returns a relative link to the repository's avatar. | ||||
func (repo *Repository) relAvatarLink(e Engine) string { | func (repo *Repository) relAvatarLink(e Engine) string { | ||||
// If no avatar - path is empty | // If no avatar - path is empty | ||||
avatarPath := repo.CustomAvatarPath() | avatarPath := repo.CustomAvatarPath() | ||||
if len(avatarPath) <= 0 || !com.IsFile(avatarPath) { | |||||
if len(avatarPath) == 0 || !com.IsFile(avatarPath) { | |||||
switch mode := setting.RepositoryAvatarFallback; mode { | switch mode := setting.RepositoryAvatarFallback; mode { | ||||
case "image": | case "image": | ||||
return setting.RepositoryAvatarFallbackImage | return setting.RepositoryAvatarFallbackImage |
v = append(v, u) | v = append(v, u) | ||||
} | } | ||||
sort.Slice(v[:], func(i, j int) bool { | |||||
sort.Slice(v, func(i, j int) bool { | |||||
return v[i].Commits < v[j].Commits | return v[i].Commits < v[j].Commits | ||||
}) | }) | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
defer RemoveTemporaryPath(basePath) | |||||
defer func() { | |||||
if err := RemoveTemporaryPath(basePath); err != nil { | |||||
log.Error("CreateNewBranch: RemoveTemporaryPath: %s", err) | |||||
} | |||||
}() | |||||
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | ||||
Bare: true, | Bare: true, | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
defer RemoveTemporaryPath(basePath) | |||||
defer func() { | |||||
if err := RemoveTemporaryPath(basePath); err != nil { | |||||
log.Error("CreateNewBranchFromCommit: RemoveTemporaryPath: %s", err) | |||||
} | |||||
}() | |||||
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{ | ||||
Bare: true, | Bare: true, |
} | } | ||||
if _, err = sess. | if _, err = sess. | ||||
Id(collaboration.ID). | |||||
ID(collaboration.ID). | |||||
Cols("mode"). | Cols("mode"). | ||||
Update(collaboration); err != nil { | Update(collaboration); err != nil { | ||||
return fmt.Errorf("update collaboration: %v", err) | return fmt.Errorf("update collaboration: %v", err) |
// Strings for sorting result | // Strings for sorting result | ||||
const ( | const ( | ||||
SearchOrderByAlphabetically SearchOrderBy = "name ASC" | SearchOrderByAlphabetically SearchOrderBy = "name ASC" | ||||
SearchOrderByAlphabeticallyReverse = "name DESC" | |||||
SearchOrderByLeastUpdated = "updated_unix ASC" | |||||
SearchOrderByRecentUpdated = "updated_unix DESC" | |||||
SearchOrderByOldest = "created_unix ASC" | |||||
SearchOrderByNewest = "created_unix DESC" | |||||
SearchOrderBySize = "size ASC" | |||||
SearchOrderBySizeReverse = "size DESC" | |||||
SearchOrderByID = "id ASC" | |||||
SearchOrderByIDReverse = "id DESC" | |||||
SearchOrderByStars = "num_stars ASC" | |||||
SearchOrderByStarsReverse = "num_stars DESC" | |||||
SearchOrderByForks = "num_forks ASC" | |||||
SearchOrderByForksReverse = "num_forks DESC" | |||||
SearchOrderByAlphabeticallyReverse SearchOrderBy = "name DESC" | |||||
SearchOrderByLeastUpdated SearchOrderBy = "updated_unix ASC" | |||||
SearchOrderByRecentUpdated SearchOrderBy = "updated_unix DESC" | |||||
SearchOrderByOldest SearchOrderBy = "created_unix ASC" | |||||
SearchOrderByNewest SearchOrderBy = "created_unix DESC" | |||||
SearchOrderBySize SearchOrderBy = "size ASC" | |||||
SearchOrderBySizeReverse SearchOrderBy = "size DESC" | |||||
SearchOrderByID SearchOrderBy = "id ASC" | |||||
SearchOrderByIDReverse SearchOrderBy = "id DESC" | |||||
SearchOrderByStars SearchOrderBy = "num_stars ASC" | |||||
SearchOrderByStarsReverse SearchOrderBy = "num_stars DESC" | |||||
SearchOrderByForks SearchOrderBy = "num_forks ASC" | |||||
SearchOrderByForksReverse SearchOrderBy = "num_forks DESC" | |||||
) | ) | ||||
// SearchRepositoryByName takes keyword and part of repository name to search, | // SearchRepositoryByName takes keyword and part of repository name to search, |
package models | package models | ||||
import "strings" | |||||
import ( | |||||
"code.gitea.io/gitea/modules/log" | |||||
"strings" | |||||
) | |||||
// RepoRedirect represents that a repo name should be redirected to another | // RepoRedirect represents that a repo name should be redirected to another | ||||
type RepoRedirect struct { | type RepoRedirect struct { | ||||
} | } | ||||
if err := deleteRepoRedirect(sess, ownerID, newRepoName); err != nil { | if err := deleteRepoRedirect(sess, ownerID, newRepoName); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("NewRepoRedirect sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
LowerName: oldRepoName, | LowerName: oldRepoName, | ||||
RedirectRepoID: repoID, | RedirectRepoID: repoID, | ||||
}); err != nil { | }); err != nil { | ||||
sess.Rollback() | |||||
errRollback := sess.Rollback() | |||||
if errRollback != nil { | |||||
log.Error("NewRepoRedirect sess.Rollback: %v", errRollback) | |||||
} | |||||
return err | return err | ||||
} | } | ||||
return sess.Commit() | return sess.Commit() |
if continuationLine || strings.ContainsAny(line, ":-") { | if continuationLine || strings.ContainsAny(line, ":-") { | ||||
continuationLine = strings.HasSuffix(line, "\\") | continuationLine = strings.HasSuffix(line, "\\") | ||||
} else { | } else { | ||||
keyContent = keyContent + line | |||||
keyContent += line | |||||
} | } | ||||
} | } | ||||
} | } | ||||
// AddPublicKey adds new public key to database and authorized_keys file. | // AddPublicKey adds new public key to database and authorized_keys file. | ||||
func AddPublicKey(ownerID int64, name, content string, LoginSourceID int64) (*PublicKey, error) { | |||||
func AddPublicKey(ownerID int64, name, content string, loginSourceID int64) (*PublicKey, error) { | |||||
log.Trace(content) | log.Trace(content) | ||||
fingerprint, err := calcFingerprint(content) | fingerprint, err := calcFingerprint(content) | ||||
Content: content, | Content: content, | ||||
Mode: AccessModeWrite, | Mode: AccessModeWrite, | ||||
Type: KeyTypeUser, | Type: KeyTypeUser, | ||||
LoginSourceID: LoginSourceID, | |||||
LoginSourceID: loginSourceID, | |||||
} | } | ||||
if err = addKey(sess, key); err != nil { | if err = addKey(sess, key); err != nil { | ||||
return nil, fmt.Errorf("addKey: %v", err) | return nil, fmt.Errorf("addKey: %v", err) | ||||
} | } | ||||
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source. | // ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source. | ||||
func ListPublicLdapSSHKeys(uid int64, LoginSourceID int64) ([]*PublicKey, error) { | |||||
func ListPublicLdapSSHKeys(uid int64, loginSourceID int64) ([]*PublicKey, error) { | |||||
keys := make([]*PublicKey, 0, 5) | keys := make([]*PublicKey, 0, 5) | ||||
return keys, x. | return keys, x. | ||||
Where("owner_id = ? AND login_source_id = ?", uid, LoginSourceID). | |||||
Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID). | |||||
Find(&keys) | Find(&keys) | ||||
} | } | ||||
// APIURL returns the absolute APIURL to this commit-status. | // APIURL returns the absolute APIURL to this commit-status. | ||||
func (status *CommitStatus) APIURL() string { | func (status *CommitStatus) APIURL() string { | ||||
status.loadRepo(x) | |||||
_ = status.loadRepo(x) | |||||
return fmt.Sprintf("%sapi/v1/%s/statuses/%s", | return fmt.Sprintf("%sapi/v1/%s/statuses/%s", | ||||
setting.AppURL, status.Repo.FullName(), status.SHA) | setting.AppURL, status.Repo.FullName(), status.SHA) | ||||
} | } | ||||
// APIFormat assumes some fields assigned with values: | // APIFormat assumes some fields assigned with values: | ||||
// Required - Repo, Creator | // Required - Repo, Creator | ||||
func (status *CommitStatus) APIFormat() *api.Status { | func (status *CommitStatus) APIFormat() *api.Status { | ||||
status.loadRepo(x) | |||||
_ = status.loadRepo(x) | |||||
apiStatus := &api.Status{ | apiStatus := &api.Status{ | ||||
Created: status.CreatedUnix.AsTime(), | Created: status.CreatedUnix.AsTime(), | ||||
Updated: status.CreatedUnix.AsTime(), | Updated: status.CreatedUnix.AsTime(), | ||||
} | } | ||||
has, err := sess.Desc("index").Limit(1).Get(lastCommitStatus) | has, err := sess.Desc("index").Limit(1).Get(lastCommitStatus) | ||||
if err != nil { | if err != nil { | ||||
sess.Rollback() | |||||
if err := sess.Rollback(); err != nil { | |||||
log.Error("newCommitStatus: sess.Rollback: %v", err) | |||||
} | |||||
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | ||||
} | } | ||||
if has { | if has { | ||||
// Insert new CommitStatus | // Insert new CommitStatus | ||||
if _, err = sess.Insert(opts.CommitStatus); err != nil { | if _, err = sess.Insert(opts.CommitStatus); err != nil { | ||||
sess.Rollback() | |||||
if err := sess.Rollback(); err != nil { | |||||
log.Error("newCommitStatus: sess.Rollback: %v", err) | |||||
} | |||||
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err) | ||||
} | } | ||||
assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash) | assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash) | ||||
assert.Equal(t, "e4efbf36", token.TokenLastEight) | assert.Equal(t, "e4efbf36", token.TokenLastEight) | ||||
token, err = GetAccessTokenBySHA("notahash") | |||||
_, err = GetAccessTokenBySHA("notahash") | |||||
assert.Error(t, err) | assert.Error(t, err) | ||||
assert.True(t, IsErrAccessTokenNotExist(err)) | assert.True(t, IsErrAccessTokenNotExist(err)) | ||||
token, err = GetAccessTokenBySHA("") | |||||
_, err = GetAccessTokenBySHA("") | |||||
assert.Error(t, err) | assert.Error(t, err) | ||||
assert.True(t, IsErrAccessTokenEmpty(err)) | assert.True(t, IsErrAccessTokenEmpty(err)) | ||||
} | } |
return nil | return nil | ||||
} | } | ||||
func pushUpdateDeleteTag(repo *Repository, gitRepo *git.Repository, tagName string) error { | |||||
func pushUpdateDeleteTag(repo *Repository, tagName string) error { | |||||
rel, err := GetRelease(repo.ID, tagName) | rel, err := GetRelease(repo.ID, tagName) | ||||
if err != nil { | if err != nil { | ||||
if IsErrReleaseNotExist(err) { | if IsErrReleaseNotExist(err) { | ||||
// If is tag reference | // If is tag reference | ||||
tagName := opts.RefFullName[len(git.TagPrefix):] | tagName := opts.RefFullName[len(git.TagPrefix):] | ||||
if isDelRef { | if isDelRef { | ||||
err = pushUpdateDeleteTag(repo, gitRepo, tagName) | |||||
err = pushUpdateDeleteTag(repo, tagName) | |||||
if err != nil { | if err != nil { | ||||
return nil, fmt.Errorf("pushUpdateDeleteTag: %v", err) | return nil, fmt.Errorf("pushUpdateDeleteTag: %v", err) | ||||
} | } |
if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil { | if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil { | ||||
return fmt.Errorf("deletePublicKeys: %v", err) | return fmt.Errorf("deletePublicKeys: %v", err) | ||||
} | } | ||||
rewriteAllPublicKeys(e) | |||||
err = rewriteAllPublicKeys(e) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
// ***** END: PublicKey ***** | // ***** END: PublicKey ***** | ||||
// ***** START: GPGPublicKey ***** | // ***** START: GPGPublicKey ***** | ||||
} else { | } else { | ||||
exprCond = builder.Expr("org_user.org_id = \"user\".id") | exprCond = builder.Expr("org_user.org_id = \"user\".id") | ||||
} | } | ||||
var accessCond = builder.NewCond() | |||||
accessCond = builder.Or( | |||||
accessCond := builder.Or( | |||||
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.OwnerID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))), | builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.OwnerID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))), | ||||
builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited)) | builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited)) | ||||
cond = cond.And(accessCond) | cond = cond.And(accessCond) | ||||
} | } | ||||
// addLdapSSHPublicKeys add a users public keys. Returns true if there are changes. | // addLdapSSHPublicKeys add a users public keys. Returns true if there are changes. | ||||
func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool { | |||||
func addLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool { | |||||
var sshKeysNeedUpdate bool | var sshKeysNeedUpdate bool | ||||
for _, sshKey := range SSHPublicKeys { | |||||
for _, sshKey := range sshPublicKeys { | |||||
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(sshKey)) | _, _, _, _, err := ssh.ParseAuthorizedKey([]byte(sshKey)) | ||||
if err == nil { | if err == nil { | ||||
sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40]) | sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40]) | ||||
} | } | ||||
// synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes. | // synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes. | ||||
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool { | |||||
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool { | |||||
var sshKeysNeedUpdate bool | var sshKeysNeedUpdate bool | ||||
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name) | log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name) | ||||
// Get Public Keys from LDAP and skip duplicate keys | // Get Public Keys from LDAP and skip duplicate keys | ||||
var ldapKeys []string | var ldapKeys []string | ||||
for _, v := range SSHPublicKeys { | |||||
for _, v := range sshPublicKeys { | |||||
sshKeySplit := strings.Split(v, " ") | sshKeySplit := strings.Split(v, " ") | ||||
if len(sshKeySplit) > 1 { | if len(sshKeySplit) > 1 { | ||||
ldapKey := strings.Join(sshKeySplit[:2], " ") | ldapKey := strings.Join(sshKeySplit[:2], " ") | ||||
// Find all users with this login type | // Find all users with this login type | ||||
var users []*User | var users []*User | ||||
x.Where("login_type = ?", LoginLDAP). | |||||
err = x.Where("login_type = ?", LoginLDAP). | |||||
And("login_source = ?", s.ID). | And("login_source = ?", s.ID). | ||||
Find(&users) | Find(&users) | ||||
if err != nil { | |||||
log.Error("SyncExternalUsers: %v", err) | |||||
return | |||||
} | |||||
sr := s.LDAP().SearchEntries() | sr := s.LDAP().SearchEntries() | ||||
for _, su := range sr { | for _, su := range sr { | ||||
// Check if user data has changed | // Check if user data has changed | ||||
if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) || | if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) || | ||||
strings.ToLower(usr.Email) != strings.ToLower(su.Mail) || | |||||
!strings.EqualFold(usr.Email, su.Mail) || | |||||
usr.FullName != fullName || | usr.FullName != fullName || | ||||
!usr.IsActive { | !usr.IsActive { | ||||
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed | // Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed | ||||
if sshKeysNeedUpdate { | if sshKeysNeedUpdate { | ||||
RewriteAllPublicKeys() | |||||
err = RewriteAllPublicKeys() | |||||
if err != nil { | |||||
log.Error("RewriteAllPublicKeys: %v", err) | |||||
} | |||||
} | } | ||||
// Deactivate users not present in LDAP | // Deactivate users not present in LDAP |
email.IsActivated = true | email.IsActivated = true | ||||
if _, err := sess. | if _, err := sess. | ||||
Id(email.ID). | |||||
ID(email.ID). | |||||
Cols("is_activated"). | Cols("is_activated"). | ||||
Update(email); err != nil { | Update(email); err != nil { | ||||
return err | return err |
func TestGetUserByOpenID(t *testing.T) { | func TestGetUserByOpenID(t *testing.T) { | ||||
assert.NoError(t, PrepareTestDatabase()) | assert.NoError(t, PrepareTestDatabase()) | ||||
user, err := GetUserByOpenID("https://unknown") | |||||
_, err := GetUserByOpenID("https://unknown") | |||||
if assert.Error(t, err) { | if assert.Error(t, err) { | ||||
assert.True(t, IsErrUserNotExist(err)) | assert.True(t, IsErrUserNotExist(err)) | ||||
} | } | ||||
user, err = GetUserByOpenID("https://user1.domain1.tld") | |||||
user, err := GetUserByOpenID("https://user1.domain1.tld") | |||||
if assert.NoError(t, err) { | if assert.NoError(t, err) { | ||||
assert.Equal(t, user.ID, int64(1)) | assert.Equal(t, user.ID, int64(1)) | ||||
} | } |
log.Error("prepareWebhooks.JSONPayload: %v", err) | log.Error("prepareWebhooks.JSONPayload: %v", err) | ||||
} | } | ||||
sig := hmac.New(sha256.New, []byte(w.Secret)) | sig := hmac.New(sha256.New, []byte(w.Secret)) | ||||
sig.Write(data) | |||||
_, err = sig.Write(data) | |||||
if err != nil { | |||||
log.Error("prepareWebhooks.sigWrite: %v", err) | |||||
} | |||||
signature = hex.EncodeToString(sig.Sum(nil)) | signature = hex.EncodeToString(sig.Sum(nil)) | ||||
} | } | ||||
return nil, err | return nil, err | ||||
} | } | ||||
conn.SetDeadline(time.Now().Add(timeout)) | |||||
return conn, nil | |||||
return conn, conn.SetDeadline(time.Now().Add(timeout)) | |||||
}, | }, | ||||
}, | }, |
Embeds: []DiscordEmbed{ | Embeds: []DiscordEmbed{ | ||||
{ | { | ||||
Title: title, | Title: title, | ||||
Description: fmt.Sprintf("%s", p.Release.Note), | |||||
Description: p.Release.Note, | |||||
URL: url, | URL: url, | ||||
Color: color, | Color: color, | ||||
Author: DiscordEmbedAuthor{ | Author: DiscordEmbedAuthor{ |
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
defer RemoveTemporaryPath(basePath) | |||||
defer func() { | |||||
if err := RemoveTemporaryPath(basePath); err != nil { | |||||
log.Error("Merge: RemoveTemporaryPath: %s", err) | |||||
} | |||||
}() | |||||
cloneOpts := git.CloneRepoOptions{ | cloneOpts := git.CloneRepoOptions{ | ||||
Bare: true, | Bare: true, | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
defer RemoveTemporaryPath(basePath) | |||||
defer func() { | |||||
if err := RemoveTemporaryPath(basePath); err != nil { | |||||
log.Error("Merge: RemoveTemporaryPath: %s", err) | |||||
} | |||||
}() | |||||
if err := git.Clone(repo.WikiPath(), basePath, git.CloneRepoOptions{ | if err := git.Clone(repo.WikiPath(), basePath, git.CloneRepoOptions{ | ||||
Bare: true, | Bare: true, |
if err = models.UpdateAccessToken(token); err != nil { | if err = models.UpdateAccessToken(token); err != nil { | ||||
log.Error("UpdateAccessToken: %v", err) | log.Error("UpdateAccessToken: %v", err) | ||||
} | } | ||||
} else { | |||||
if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { | |||||
log.Error("GetAccessTokenBySha: %v", err) | |||||
} | |||||
} else if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) { | |||||
log.Error("GetAccessTokenBySha: %v", err) | |||||
} | } | ||||
if u == nil { | if u == nil { | ||||
return getRuleBody(field, "Include(") | return getRuleBody(field, "Include(") | ||||
} | } | ||||
// FIXME: struct contains a struct | |||||
func validateStruct(obj interface{}) binding.Errors { | |||||
return nil | |||||
} | |||||
func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaron.Locale) binding.Errors { | func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaron.Locale) binding.Errors { | ||||
if errs.Len() == 0 { | if errs.Len() == 0 { | ||||
return errs | return errs |
// GetDefaultEmailURL return the default email url for the given provider | // GetDefaultEmailURL return the default email url for the given provider | ||||
func GetDefaultEmailURL(provider string) string { | func GetDefaultEmailURL(provider string) string { | ||||
switch provider { | |||||
case "github": | |||||
if provider == "github" { | |||||
return github.EmailURL | return github.EmailURL | ||||
} | } | ||||
return "" | return "" |
t.Errorf("Expected nil, got %v", di) | t.Errorf("Expected nil, got %v", di) | ||||
} | } | ||||
// Sleep one second and try retrive again | |||||
// Sleep one second and try retrieve again | |||||
time.Sleep(1 * time.Second) | time.Sleep(1 * time.Second) | ||||
if di := dc.Get("foo"); di != nil { | if di := dc.Get("foo"); di != nil { |
var exists bool | var exists bool | ||||
for _, v := range setting.UI.Themes { | for _, v := range setting.UI.Themes { | ||||
if strings.ToLower(v) == strings.ToLower(f.Theme) { | |||||
if strings.EqualFold(v, f.Theme) { | |||||
exists = true | exists = true | ||||
break | break | ||||
} | } |
// EncodeMD5 encodes string to md5 hex value. | // EncodeMD5 encodes string to md5 hex value. | ||||
func EncodeMD5(str string) string { | func EncodeMD5(str string) string { | ||||
m := md5.New() | m := md5.New() | ||||
m.Write([]byte(str)) | |||||
_, _ = m.Write([]byte(str)) | |||||
return hex.EncodeToString(m.Sum(nil)) | return hex.EncodeToString(m.Sum(nil)) | ||||
} | } | ||||
// EncodeSha1 string to sha1 hex value. | // EncodeSha1 string to sha1 hex value. | ||||
func EncodeSha1(str string) string { | func EncodeSha1(str string) string { | ||||
h := sha1.New() | h := sha1.New() | ||||
h.Write([]byte(str)) | |||||
_, _ = h.Write([]byte(str)) | |||||
return hex.EncodeToString(h.Sum(nil)) | return hex.EncodeToString(h.Sum(nil)) | ||||
} | } | ||||
// EncodeSha256 string to sha1 hex value. | // EncodeSha256 string to sha1 hex value. | ||||
func EncodeSha256(str string) string { | func EncodeSha256(str string) string { | ||||
h := sha256.New() | h := sha256.New() | ||||
h.Write([]byte(str)) | |||||
_, _ = h.Write([]byte(str)) | |||||
return hex.EncodeToString(h.Sum(nil)) | return hex.EncodeToString(h.Sum(nil)) | ||||
} | } | ||||
// create sha1 encode string | // create sha1 encode string | ||||
sh := sha1.New() | sh := sha1.New() | ||||
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes))) | |||||
_, _ = sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes))) | |||||
encoded := hex.EncodeToString(sh.Sum(nil)) | encoded := hex.EncodeToString(sh.Sum(nil)) | ||||
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded) | code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded) | ||||
EByte = PByte * 1024 | EByte = PByte * 1024 | ||||
) | ) | ||||
var bytesSizeTable = map[string]uint64{ | |||||
"b": Byte, | |||||
"kb": KByte, | |||||
"mb": MByte, | |||||
"gb": GByte, | |||||
"tb": TByte, | |||||
"pb": PByte, | |||||
"eb": EByte, | |||||
} | |||||
func logn(n, b float64) float64 { | func logn(n, b float64) float64 { | ||||
return math.Log(n) / math.Log(b) | return math.Log(n) / math.Log(b) | ||||
} | } | ||||
if len(data) == 0 { | if len(data) == 0 { | ||||
return true | return true | ||||
} | } | ||||
return strings.Index(http.DetectContentType(data), "text/") != -1 | |||||
return strings.Contains(http.DetectContentType(data), "text/") | |||||
} | } | ||||
// IsImageFile detects if data is an image format | // IsImageFile detects if data is an image format | ||||
func IsImageFile(data []byte) bool { | func IsImageFile(data []byte) bool { | ||||
return strings.Index(http.DetectContentType(data), "image/") != -1 | |||||
return strings.Contains(http.DetectContentType(data), "image/") | |||||
} | } | ||||
// IsPDFFile detects if data is a pdf format | // IsPDFFile detects if data is a pdf format | ||||
func IsPDFFile(data []byte) bool { | func IsPDFFile(data []byte) bool { | ||||
return strings.Index(http.DetectContentType(data), "application/pdf") != -1 | |||||
return strings.Contains(http.DetectContentType(data), "application/pdf") | |||||
} | } | ||||
// IsVideoFile detects if data is an video format | // IsVideoFile detects if data is an video format | ||||
func IsVideoFile(data []byte) bool { | func IsVideoFile(data []byte) bool { | ||||
return strings.Index(http.DetectContentType(data), "video/") != -1 | |||||
return strings.Contains(http.DetectContentType(data), "video/") | |||||
} | } | ||||
// IsAudioFile detects if data is an video format | // IsAudioFile detects if data is an video format | ||||
func IsAudioFile(data []byte) bool { | func IsAudioFile(data []byte) bool { | ||||
return strings.Index(http.DetectContentType(data), "audio/") != -1 | |||||
return strings.Contains(http.DetectContentType(data), "audio/") | |||||
} | } | ||||
// EntryIcon returns the octicon class for displaying files/directories | // EntryIcon returns the octicon class for displaying files/directories |
} | } | ||||
func TestFileSize(t *testing.T) { | func TestFileSize(t *testing.T) { | ||||
var size int64 | |||||
size = 512 | |||||
var size int64 = 512 | |||||
assert.Equal(t, "512B", FileSize(size)) | assert.Equal(t, "512B", FileSize(size)) | ||||
size = size * 1024 | |||||
size *= 1024 | |||||
assert.Equal(t, "512KB", FileSize(size)) | assert.Equal(t, "512KB", FileSize(size)) | ||||
size = size * 1024 | |||||
size *= 1024 | |||||
assert.Equal(t, "512MB", FileSize(size)) | assert.Equal(t, "512MB", FileSize(size)) | ||||
size = size * 1024 | |||||
size *= 1024 | |||||
assert.Equal(t, "512GB", FileSize(size)) | assert.Equal(t, "512GB", FileSize(size)) | ||||
size = size * 1024 | |||||
size *= 1024 | |||||
assert.Equal(t, "512TB", FileSize(size)) | assert.Equal(t, "512TB", FileSize(size)) | ||||
size = size * 1024 | |||||
size *= 1024 | |||||
assert.Equal(t, "512PB", FileSize(size)) | assert.Equal(t, "512PB", FileSize(size)) | ||||
size = size * 4 | |||||
size *= 4 | |||||
assert.Equal(t, "2.0EB", FileSize(size)) | assert.Equal(t, "2.0EB", FileSize(size)) | ||||
} | } | ||||
if value, err = getFunc(); err != nil { | if value, err = getFunc(); err != nil { | ||||
return value, err | return value, err | ||||
} | } | ||||
conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||||
err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
} | } | ||||
switch value := conn.Get(key).(type) { | switch value := conn.Get(key).(type) { | ||||
case int: | case int: | ||||
if value, err = getFunc(); err != nil { | if value, err = getFunc(); err != nil { | ||||
return value, err | return value, err | ||||
} | } | ||||
conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||||
err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds())) | |||||
if err != nil { | |||||
return 0, err | |||||
} | |||||
} | } | ||||
switch value := conn.Get(key).(type) { | switch value := conn.Get(key).(type) { | ||||
case int64: | case int64: | ||||
if conn == nil { | if conn == nil { | ||||
return | return | ||||
} | } | ||||
conn.Delete(key) | |||||
_ = conn.Delete(key) | |||||
} | } |
} | } | ||||
ctx.Redirect(setting.AppSubURL + "/") | ctx.Redirect(setting.AppSubURL + "/") | ||||
return | |||||
} | } | ||||
// HTML calls Context.HTML and converts template name to string. | // HTML calls Context.HTML and converts template name to string. | ||||
} | } | ||||
c.Header().Set("Content-Type", "text/html") | c.Header().Set("Content-Type", "text/html") | ||||
c.WriteHeader(http.StatusOK) | c.WriteHeader(http.StatusOK) | ||||
c.Write([]byte(com.Expand(`<!doctype html> | |||||
_, _ = c.Write([]byte(com.Expand(`<!doctype html> | |||||
<html> | <html> | ||||
<head> | <head> | ||||
<meta name="go-import" content="{GoGetImport} git {CloneLink}"> | <meta name="go-import" content="{GoGetImport} git {CloneLink}"> |
// GetParams returns the configured URL params | // GetParams returns the configured URL params | ||||
func (p *Pagination) GetParams() template.URL { | func (p *Pagination) GetParams() template.URL { | ||||
return template.URL(strings.Join(p.urlParams[:], "&")) | |||||
return template.URL(strings.Join(p.urlParams, "&")) | |||||
} | } | ||||
// SetDefaultParams sets common pagination params that are often used | // SetDefaultParams sets common pagination params that are often used |
ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo | ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo | ||||
ctx.Repo.PullRequest.Allowed = true | ctx.Repo.PullRequest.Allowed = true | ||||
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.Owner.Name + ":" + ctx.Repo.BranchName | ctx.Repo.PullRequest.HeadInfo = ctx.Repo.Owner.Name + ":" + ctx.Repo.BranchName | ||||
} else { | |||||
} else if repo.AllowsPulls() { | |||||
// Or, this is repository accepts pull requests between branches. | // Or, this is repository accepts pull requests between branches. | ||||
if repo.AllowsPulls() { | |||||
ctx.Data["BaseRepo"] = repo | |||||
ctx.Repo.PullRequest.BaseRepo = repo | |||||
ctx.Repo.PullRequest.Allowed = true | |||||
ctx.Repo.PullRequest.SameRepo = true | |||||
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName | |||||
} | |||||
ctx.Data["BaseRepo"] = repo | |||||
ctx.Repo.PullRequest.BaseRepo = repo | |||||
ctx.Repo.PullRequest.Allowed = true | |||||
ctx.Repo.PullRequest.SameRepo = true | |||||
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName | |||||
} | } | ||||
} | } | ||||
ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest | ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest |
go func() { | go func() { | ||||
_, err := io.Copy(encoder, dataRc) | _, err := io.Copy(encoder, dataRc) | ||||
encoder.Close() | |||||
_ = encoder.Close() | |||||
if err != nil { | if err != nil { | ||||
pw.CloseWithError(err) | |||||
_ = pw.CloseWithError(err) | |||||
} else { | } else { | ||||
pw.Close() | |||||
_ = pw.Close() | |||||
} | } | ||||
}() | }() | ||||
func isImageFile(data []byte) (string, bool) { | func isImageFile(data []byte) (string, bool) { | ||||
contentType := http.DetectContentType(data) | contentType := http.DetectContentType(data) | ||||
if strings.Index(contentType, "image/") != -1 { | |||||
if strings.Contains(contentType, "image/") { | |||||
return contentType, true | return contentType, true | ||||
} | } | ||||
return contentType, false | return contentType, false | ||||
} | } | ||||
func commitsCount(repoPath, revision, relpath string) (int64, error) { | func commitsCount(repoPath, revision, relpath string) (int64, error) { | ||||
var cmd *Command | |||||
cmd = NewCommand("rev-list", "--count") | |||||
cmd := NewCommand("rev-list", "--count") | |||||
cmd.AddArguments(revision) | cmd.AddArguments(revision) | ||||
if len(relpath) > 0 { | if len(relpath) > 0 { | ||||
cmd.AddArguments("--", relpath) | cmd.AddArguments("--", relpath) | ||||
All bool | All bool | ||||
} | } | ||||
// NewSearchCommitsOptions contruct a SearchCommitsOption from a space-delimited search string | |||||
// NewSearchCommitsOptions construct a SearchCommitsOption from a space-delimited search string | |||||
func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommitsOptions { | func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommitsOptions { | ||||
var keywords, authors, committers []string | var keywords, authors, committers []string | ||||
var after, before string | var after, before string |
return tree, nil | return tree, nil | ||||
} | } | ||||
func getFullPath(treePath, path string) string { | |||||
if treePath != "" { | |||||
if path != "" { | |||||
return treePath + "/" + path | |||||
} | |||||
return treePath | |||||
} | |||||
return path | |||||
} | |||||
func getFileHashes(c *object.Commit, treePath string, paths []string) (map[string]plumbing.Hash, error) { | func getFileHashes(c *object.Commit, treePath string, paths []string) (map[string]plumbing.Hash, error) { | ||||
tree, err := getCommitTree(c, treePath) | tree, err := getCommitTree(c, treePath) | ||||
if err == object.ErrDirectoryNotFound { | if err == object.ErrDirectoryNotFound { |
// IsRepoURLAccessible checks if given repository URL is accessible. | // IsRepoURLAccessible checks if given repository URL is accessible. | ||||
func IsRepoURLAccessible(url string) bool { | func IsRepoURLAccessible(url string) bool { | ||||
_, err := NewCommand("ls-remote", "-q", "-h", url, "HEAD").Run() | _, err := NewCommand("ls-remote", "-q", "-h", url, "HEAD").Run() | ||||
if err != nil { | |||||
return false | |||||
} | |||||
return true | |||||
return err == nil | |||||
} | } | ||||
// InitRepository initializes a new Git repository. | // InitRepository initializes a new Git repository. | ||||
func InitRepository(repoPath string, bare bool) error { | func InitRepository(repoPath string, bare bool) error { | ||||
os.MkdirAll(repoPath, os.ModePerm) | |||||
err := os.MkdirAll(repoPath, os.ModePerm) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
cmd := NewCommand("init") | cmd := NewCommand("init") | ||||
if bare { | if bare { | ||||
cmd.AddArguments("--bare") | cmd.AddArguments("--bare") | ||||
} | } | ||||
_, err := cmd.RunInDir(repoPath) | |||||
_, err = cmd.RunInDir(repoPath) | |||||
return err | return err | ||||
} | } | ||||
// IsBranchExist returns true if given branch exists in current repository. | // IsBranchExist returns true if given branch exists in current repository. | ||||
func (repo *Repository) IsBranchExist(name string) bool { | func (repo *Repository) IsBranchExist(name string) bool { | ||||
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(BranchPrefix+name), true) | _, err := repo.gogitRepo.Reference(plumbing.ReferenceName(BranchPrefix+name), true) | ||||
if err != nil { | |||||
return false | |||||
} | |||||
return true | |||||
return err == nil | |||||
} | } | ||||
// Branch represents a Git branch. | // Branch represents a Git branch. | ||||
return nil, err | return nil, err | ||||
} | } | ||||
branches.ForEach(func(branch *plumbing.Reference) error { | |||||
_ = branches.ForEach(func(branch *plumbing.Reference) error { | |||||
branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix)) | branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix)) | ||||
return nil | return nil | ||||
}) | }) |
func (repo *Repository) IsCommitExist(name string) bool { | func (repo *Repository) IsCommitExist(name string) bool { | ||||
hash := plumbing.NewHash(name) | hash := plumbing.NewHash(name) | ||||
_, err := repo.gogitRepo.CommitObject(hash) | _, err := repo.gogitRepo.CommitObject(hash) | ||||
if err != nil { | |||||
return false | |||||
} | |||||
return true | |||||
return err == nil | |||||
} | } | ||||
// GetBranchCommitID returns last commit ID string of given branch. | // GetBranchCommitID returns last commit ID string of given branch. |
"strconv" | "strconv" | ||||
"strings" | "strings" | ||||
"time" | "time" | ||||
logger "code.gitea.io/gitea/modules/log" | |||||
) | ) | ||||
// CompareInfo represents needed information for comparing references. | // CompareInfo represents needed information for comparing references. | ||||
if err = repo.AddRemote(tmpRemote, basePath, true); err != nil { | if err = repo.AddRemote(tmpRemote, basePath, true); err != nil { | ||||
return nil, fmt.Errorf("AddRemote: %v", err) | return nil, fmt.Errorf("AddRemote: %v", err) | ||||
} | } | ||||
defer repo.RemoveRemote(tmpRemote) | |||||
defer func() { | |||||
if err := repo.RemoveRemote(tmpRemote); err != nil { | |||||
logger.Error("GetPullRequestInfo: RemoveRemote: %v", err) | |||||
} | |||||
}() | |||||
} | } | ||||
compareInfo := new(CompareInfo) | compareInfo := new(CompareInfo) |
// IsTagExist returns true if given tag exists in the repository. | // IsTagExist returns true if given tag exists in the repository. | ||||
func (repo *Repository) IsTagExist(name string) bool { | func (repo *Repository) IsTagExist(name string) bool { | ||||
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(TagPrefix+name), true) | _, err := repo.gogitRepo.Reference(plumbing.ReferenceName(TagPrefix+name), true) | ||||
if err != nil { | |||||
return false | |||||
} | |||||
return true | |||||
return err == nil | |||||
} | } | ||||
// CreateTag create one tag in the repository | // CreateTag create one tag in the repository | ||||
return nil, err | return nil, err | ||||
} | } | ||||
tags.ForEach(func(tag *plumbing.Reference) error { | |||||
_ = tags.ForEach(func(tag *plumbing.Reference) error { | |||||
tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix)) | tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix)) | ||||
return nil | return nil | ||||
}) | }) |
import ( | import ( | ||||
"fmt" | "fmt" | ||||
"os" | "os" | ||||
"path/filepath" | |||||
"strings" | "strings" | ||||
"sync" | "sync" | ||||
) | ) | ||||
return fmt.Errorf("%v - %s", err, stderr) | return fmt.Errorf("%v - %s", err, stderr) | ||||
} | } | ||||
// If the object is stored in its own file (i.e not in a pack file), | |||||
// this function returns the full path to the object file. | |||||
// It does not test if the file exists. | |||||
func filepathFromSHA1(rootdir, sha1 string) string { | |||||
return filepath.Join(rootdir, "objects", sha1[:2], sha1[2:]) | |||||
} | |||||
// RefEndName return the end name of a ref name | // RefEndName return the end name of a ref name | ||||
func RefEndName(refStr string) string { | func RefEndName(refStr string) string { | ||||
if strings.HasPrefix(refStr, BranchPrefix) { | if strings.HasPrefix(refStr, BranchPrefix) { |
} | } | ||||
var writerPool WriterPool | var writerPool WriterPool | ||||
var regex regexp.Regexp | |||||
// Options represents the configuration for the gzip middleware | // Options represents the configuration for the gzip middleware | ||||
type Options struct { | type Options struct { | ||||
if rangeHdr := ctx.Req.Header.Get(rangeHeader); rangeHdr != "" { | if rangeHdr := ctx.Req.Header.Get(rangeHeader); rangeHdr != "" { | ||||
match := regex.FindStringSubmatch(rangeHdr) | match := regex.FindStringSubmatch(rangeHdr) | ||||
if match != nil && len(match) > 1 { | |||||
if len(match) > 1 { | |||||
return | return | ||||
} | } | ||||
} | } | ||||
if proxy.writer == nil { | if proxy.writer == nil { | ||||
err := proxy.startPlain() | err := proxy.startPlain() | ||||
if err != nil { | if err != nil { | ||||
err = fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error()) | |||||
return fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error()) | |||||
} | } | ||||
} | } | ||||
} | } | ||||
if r.req.Method == "GET" && len(paramBody) > 0 { | if r.req.Method == "GET" && len(paramBody) > 0 { | ||||
if strings.Index(r.url, "?") != -1 { | |||||
if strings.Contains(r.url, "?") { | |||||
r.url += "&" + paramBody | r.url += "&" + paramBody | ||||
} else { | } else { | ||||
r.url = r.url + "?" + paramBody | r.url = r.url + "?" + paramBody | ||||
} | } | ||||
} | } | ||||
for k, v := range r.params { | for k, v := range r.params { | ||||
bodyWriter.WriteField(k, v) | |||||
err := bodyWriter.WriteField(k, v) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
} | } | ||||
bodyWriter.Close() | |||||
pw.Close() | |||||
_ = bodyWriter.Close() | |||||
_ = pw.Close() | |||||
}() | }() | ||||
r.Header("Content-Type", bodyWriter.FormDataContentType()) | r.Header("Content-Type", bodyWriter.FormDataContentType()) | ||||
r.req.Body = ioutil.NopCloser(pr) | r.req.Body = ioutil.NopCloser(pr) | ||||
Proxy: proxy, | Proxy: proxy, | ||||
Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout), | Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout), | ||||
} | } | ||||
} else { | |||||
// if r.transport is *http.Transport then set the settings. | |||||
if t, ok := trans.(*http.Transport); ok { | |||||
if t.TLSClientConfig == nil { | |||||
t.TLSClientConfig = r.setting.TLSClientConfig | |||||
} | |||||
if t.Proxy == nil { | |||||
t.Proxy = r.setting.Proxy | |||||
} | |||||
if t.Dial == nil { | |||||
t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout) | |||||
} | |||||
} else if t, ok := trans.(*http.Transport); ok { | |||||
if t.TLSClientConfig == nil { | |||||
t.TLSClientConfig = r.setting.TLSClientConfig | |||||
} | |||||
if t.Proxy == nil { | |||||
t.Proxy = r.setting.Proxy | |||||
} | |||||
if t.Dial == nil { | |||||
t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout) | |||||
} | } | ||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
conn.SetDeadline(time.Now().Add(rwTimeout)) | |||||
return conn, nil | |||||
return conn, conn.SetDeadline(time.Now().Add(rwTimeout)) | |||||
} | } | ||||
} | } |
package indexer | package indexer | ||||
import ( | import ( | ||||
"fmt" | |||||
"os" | "os" | ||||
"strconv" | "strconv" | ||||
return strconv.FormatInt(id, 36) | return strconv.FormatInt(id, 36) | ||||
} | } | ||||
// idOfIndexerID the integer id associated with an indexer id | |||||
func idOfIndexerID(indexerID string) (int64, error) { | |||||
id, err := strconv.ParseInt(indexerID, 36, 64) | |||||
if err != nil { | |||||
return 0, fmt.Errorf("Unexpected indexer ID %s: %v", indexerID, err) | |||||
} | |||||
return id, nil | |||||
} | |||||
// numericEqualityQuery a numeric equality query for the given value and field | // numericEqualityQuery a numeric equality query for the given value and field | ||||
func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery { | func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery { | ||||
f := float64(value) | f := float64(value) | ||||
return q | return q | ||||
} | } | ||||
func newMatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery { | |||||
q := bleve.NewMatchPhraseQuery(matchPhrase) | |||||
q.FieldVal = field | |||||
q.Analyzer = analyzer | |||||
return q | |||||
} | |||||
const unicodeNormalizeName = "unicodeNormalize" | const unicodeNormalizeName = "unicodeNormalize" | ||||
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error { | func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error { |
return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType) | return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType) | ||||
} | } | ||||
go issueIndexerQueue.Run() | |||||
go func() { | |||||
err = issueIndexerQueue.Run() | |||||
if err != nil { | |||||
log.Error("issueIndexerQueue.Run: %v", err) | |||||
} | |||||
}() | |||||
if populate { | if populate { | ||||
if syncReindex { | if syncReindex { | ||||
comments = append(comments, comment.Content) | comments = append(comments, comment.Content) | ||||
} | } | ||||
} | } | ||||
issueIndexerQueue.Push(&IndexerData{ | |||||
_ = issueIndexerQueue.Push(&IndexerData{ | |||||
ID: issue.ID, | ID: issue.ID, | ||||
RepoID: issue.RepoID, | RepoID: issue.RepoID, | ||||
Title: issue.Title, | Title: issue.Title, | ||||
return | return | ||||
} | } | ||||
if len(ids) <= 0 { | |||||
if len(ids) == 0 { | |||||
return | return | ||||
} | } | ||||
issueIndexerQueue.Push(&IndexerData{ | |||||
_ = issueIndexerQueue.Push(&IndexerData{ | |||||
IDs: ids, | IDs: ids, | ||||
IsDelete: true, | IsDelete: true, | ||||
}) | }) |
select { | select { | ||||
case data := <-c.queue: | case data := <-c.queue: | ||||
if data.IsDelete { | if data.IsDelete { | ||||
c.indexer.Delete(data.IDs...) | |||||
_ = c.indexer.Delete(data.IDs...) | |||||
continue | continue | ||||
} | } | ||||
datas = append(datas, data) | datas = append(datas, data) | ||||
if len(datas) >= c.batchNumber { | if len(datas) >= c.batchNumber { | ||||
c.indexer.Index(datas) | |||||
_ = c.indexer.Index(datas) | |||||
// TODO: save the point | // TODO: save the point | ||||
datas = make([]*IndexerData, 0, c.batchNumber) | datas = make([]*IndexerData, 0, c.batchNumber) | ||||
} | } | ||||
case <-time.After(time.Millisecond * 100): | case <-time.After(time.Millisecond * 100): | ||||
i++ | i++ | ||||
if i >= 3 && len(datas) > 0 { | if i >= 3 && len(datas) > 0 { | ||||
c.indexer.Index(datas) | |||||
_ = c.indexer.Index(datas) | |||||
// TODO: save the point | // TODO: save the point | ||||
datas = make([]*IndexerData, 0, c.batchNumber) | datas = make([]*IndexerData, 0, c.batchNumber) | ||||
} | } |
for { | for { | ||||
i++ | i++ | ||||
if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) { | if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) { | ||||
l.indexer.Index(datas) | |||||
_ = l.indexer.Index(datas) | |||||
datas = make([]*IndexerData, 0, l.batchNumber) | datas = make([]*IndexerData, 0, l.batchNumber) | ||||
i = 0 | i = 0 | ||||
continue | continue | ||||
continue | continue | ||||
} | } | ||||
if len(bs) <= 0 { | |||||
if len(bs) == 0 { | |||||
time.Sleep(time.Millisecond * 100) | time.Sleep(time.Millisecond * 100) | ||||
continue | continue | ||||
} | } |
i++ | i++ | ||||
if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) { | if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) { | ||||
r.indexer.Index(datas) | |||||
_ = r.indexer.Index(datas) | |||||
datas = make([]*IndexerData, 0, r.batchNumber) | datas = make([]*IndexerData, 0, r.batchNumber) | ||||
i = 0 | i = 0 | ||||
} | } | ||||
if len(bs) <= 0 { | |||||
if len(bs) == 0 { | |||||
time.Sleep(time.Millisecond * 100) | time.Sleep(time.Millisecond * 100) | ||||
continue | continue | ||||
} | } |
) | ) | ||||
//checkIsValidRequest check if it a valid request in case of bad request it write the response to ctx. | //checkIsValidRequest check if it a valid request in case of bad request it write the response to ctx. | ||||
func checkIsValidRequest(ctx *context.Context, post bool) bool { | |||||
func checkIsValidRequest(ctx *context.Context) bool { | |||||
if !setting.LFS.StartServer { | if !setting.LFS.StartServer { | ||||
writeStatus(ctx, 404) | writeStatus(ctx, 404) | ||||
return false | return false | ||||
} | } | ||||
ctx.User = user | ctx.User = user | ||||
} | } | ||||
if post { | |||||
mediaParts := strings.Split(ctx.Req.Header.Get("Content-Type"), ";") | |||||
if mediaParts[0] != metaMediaType { | |||||
writeStatus(ctx, 400) | |||||
return false | |||||
} | |||||
} | |||||
return true | return true | ||||
} | } | ||||
// GetListLockHandler list locks | // GetListLockHandler list locks | ||||
func GetListLockHandler(ctx *context.Context) { | func GetListLockHandler(ctx *context.Context) { | ||||
if !checkIsValidRequest(ctx, false) { | |||||
if !checkIsValidRequest(ctx) { | |||||
return | return | ||||
} | } | ||||
ctx.Resp.Header().Set("Content-Type", metaMediaType) | ctx.Resp.Header().Set("Content-Type", metaMediaType) | ||||
// PostLockHandler create lock | // PostLockHandler create lock | ||||
func PostLockHandler(ctx *context.Context) { | func PostLockHandler(ctx *context.Context) { | ||||
if !checkIsValidRequest(ctx, false) { | |||||
if !checkIsValidRequest(ctx) { | |||||
return | return | ||||
} | } | ||||
ctx.Resp.Header().Set("Content-Type", metaMediaType) | ctx.Resp.Header().Set("Content-Type", metaMediaType) | ||||
// VerifyLockHandler list locks for verification | // VerifyLockHandler list locks for verification | ||||
func VerifyLockHandler(ctx *context.Context) { | func VerifyLockHandler(ctx *context.Context) { | ||||
if !checkIsValidRequest(ctx, false) { | |||||
if !checkIsValidRequest(ctx) { | |||||
return | return | ||||
} | } | ||||
ctx.Resp.Header().Set("Content-Type", metaMediaType) | ctx.Resp.Header().Set("Content-Type", metaMediaType) | ||||
// UnLockHandler delete locks | // UnLockHandler delete locks | ||||
func UnLockHandler(ctx *context.Context) { | func UnLockHandler(ctx *context.Context) { | ||||
if !checkIsValidRequest(ctx, false) { | |||||
if !checkIsValidRequest(ctx) { | |||||
return | return | ||||
} | } | ||||
ctx.Resp.Header().Set("Content-Type", metaMediaType) | ctx.Resp.Header().Set("Content-Type", metaMediaType) |
if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" { | if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" { | ||||
regex := regexp.MustCompile(`bytes=(\d+)\-.*`) | regex := regexp.MustCompile(`bytes=(\d+)\-.*`) | ||||
match := regex.FindStringSubmatch(rangeHdr) | match := regex.FindStringSubmatch(rangeHdr) | ||||
if match != nil && len(match) > 1 { | |||||
if len(match) > 1 { | |||||
statusCode = 206 | statusCode = 206 | ||||
fromByte, _ = strconv.ParseInt(match[1], 10, 32) | fromByte, _ = strconv.ParseInt(match[1], 10, 32) | ||||
ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, meta.Size-1, meta.Size-fromByte)) | ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, meta.Size-1, meta.Size-fromByte)) | ||||
} | } | ||||
ctx.Resp.WriteHeader(statusCode) | ctx.Resp.WriteHeader(statusCode) | ||||
io.Copy(ctx.Resp, content) | |||||
content.Close() | |||||
_, _ = io.Copy(ctx.Resp, content) | |||||
_ = content.Close() | |||||
logRequest(ctx.Req, statusCode) | logRequest(ctx.Req, statusCode) | ||||
} | } | ||||
if ctx.Req.Method == "GET" { | if ctx.Req.Method == "GET" { | ||||
enc := json.NewEncoder(ctx.Resp) | enc := json.NewEncoder(ctx.Resp) | ||||
enc.Encode(Represent(rv, meta, true, false)) | |||||
_ = enc.Encode(Represent(rv, meta, true, false)) | |||||
} | } | ||||
logRequest(ctx.Req, 200) | logRequest(ctx.Req, 200) | ||||
ctx.Resp.WriteHeader(sentStatus) | ctx.Resp.WriteHeader(sentStatus) | ||||
enc := json.NewEncoder(ctx.Resp) | enc := json.NewEncoder(ctx.Resp) | ||||
enc.Encode(Represent(rv, meta, meta.Existing, true)) | |||||
_ = enc.Encode(Represent(rv, meta, meta.Existing, true)) | |||||
logRequest(ctx.Req, sentStatus) | logRequest(ctx.Req, sentStatus) | ||||
} | } | ||||
respobj := &BatchResponse{Objects: responseObjects} | respobj := &BatchResponse{Objects: responseObjects} | ||||
enc := json.NewEncoder(ctx.Resp) | enc := json.NewEncoder(ctx.Resp) | ||||
enc.Encode(respobj) | |||||
_ = enc.Encode(respobj) | |||||
logRequest(ctx.Req, 200) | logRequest(ctx.Req, 200) | ||||
} | } | ||||
if i > lasti { | if i > lasti { | ||||
written, err := c.w.Write(bytes[lasti:i]) | written, err := c.w.Write(bytes[lasti:i]) | ||||
totalWritten = totalWritten + written | |||||
totalWritten += written | |||||
if err != nil { | if err != nil { | ||||
return totalWritten, err | return totalWritten, err | ||||
} | } | ||||
if bytes[j] == 'm' { | if bytes[j] == 'm' { | ||||
if c.mode == allowColor { | if c.mode == allowColor { | ||||
written, err := c.w.Write(bytes[i : j+1]) | written, err := c.w.Write(bytes[i : j+1]) | ||||
totalWritten = totalWritten + written | |||||
totalWritten += written | |||||
if err != nil { | if err != nil { | ||||
return totalWritten, err | return totalWritten, err | ||||
} | } | ||||
} | } | ||||
return fmt.Sprintf(format, v...) | return fmt.Sprintf(format, v...) | ||||
} | } | ||||
return fmt.Sprintf(format) | |||||
return format | |||||
} | } | ||||
// ColorFprintf will write to the provided writer similar to ColorSprintf | // ColorFprintf will write to the provided writer similar to ColorSprintf | ||||
} | } | ||||
return fmt.Fprintf(w, format, v...) | return fmt.Fprintf(w, format, v...) | ||||
} | } | ||||
return fmt.Fprintf(w, format) | |||||
return fmt.Fprint(w, format) | |||||
} | } | ||||
// ColorFormatted structs provide their own colored string when formatted with ColorSprintf | // ColorFormatted structs provide their own colored string when formatted with ColorSprintf |
} | } | ||||
if tcpConn, ok := conn.(*net.TCPConn); ok { | if tcpConn, ok := conn.(*net.TCPConn); ok { | ||||
tcpConn.SetKeepAlive(true) | |||||
err = tcpConn.SetKeepAlive(true) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | } | ||||
i.innerWriter = conn | i.innerWriter = conn |
assert.NoError(t, err) | assert.NoError(t, err) | ||||
assert.Equal(t, expected, string(written)) | assert.Equal(t, expected, string(written)) | ||||
return | |||||
} | } | ||||
func TestConnLogger(t *testing.T) { | func TestConnLogger(t *testing.T) { |
return | return | ||||
} | } | ||||
l.loggerProvider.Flush() | l.loggerProvider.Flush() | ||||
case _, _ = <-l.close: | |||||
case <-l.close: | |||||
l.closeLogger() | l.closeLogger() | ||||
return | return | ||||
} | } | ||||
l.loggerProvider.Flush() | l.loggerProvider.Flush() | ||||
l.loggerProvider.Close() | l.loggerProvider.Close() | ||||
l.closed <- true | l.closed <- true | ||||
return | |||||
} | } | ||||
// Close this ChannelledLog | // Close this ChannelledLog | ||||
} | } | ||||
m.mutex.Unlock() | m.mutex.Unlock() | ||||
m.closed <- true | m.closed <- true | ||||
return | |||||
} | } | ||||
// Start processing the MultiChannelledLog | // Start processing the MultiChannelledLog |
func (log *FileLogger) deleteOldLog() { | func (log *FileLogger) deleteOldLog() { | ||||
dir := filepath.Dir(log.Filename) | dir := filepath.Dir(log.Filename) | ||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { | |||||
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { | |||||
defer func() { | defer func() { | ||||
if r := recover(); r != nil { | if r := recover(); r != nil { | ||||
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r) | returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r) | ||||
// there are no buffering messages in file logger in memory. | // there are no buffering messages in file logger in memory. | ||||
// flush file means sync file from disk. | // flush file means sync file from disk. | ||||
func (log *FileLogger) Flush() { | func (log *FileLogger) Flush() { | ||||
log.mw.fd.Sync() | |||||
_ = log.mw.fd.Sync() | |||||
} | } | ||||
// GetName returns the default name for this implementation | // GetName returns the default name for this implementation |
assert.Equal(t, expected, string(logData)) | assert.Equal(t, expected, string(logData)) | ||||
event.level = WARN | event.level = WARN | ||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
fileLogger.LogEvent(&event) | fileLogger.LogEvent(&event) | ||||
fileLogger.Flush() | fileLogger.Flush() | ||||
logData, err = ioutil.ReadFile(filename) | logData, err = ioutil.ReadFile(filename) | ||||
err = realFileLogger.DoRotate() | err = realFileLogger.DoRotate() | ||||
assert.Error(t, err) | assert.Error(t, err) | ||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
fileLogger.LogEvent(&event) | fileLogger.LogEvent(&event) | ||||
fileLogger.Flush() | fileLogger.Flush() | ||||
logData, err = ioutil.ReadFile(filename) | logData, err = ioutil.ReadFile(filename) | ||||
assert.Equal(t, expected, string(logData)) | assert.Equal(t, expected, string(logData)) | ||||
// Should fail to rotate | // Should fail to rotate | ||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
fileLogger.LogEvent(&event) | fileLogger.LogEvent(&event) | ||||
fileLogger.Flush() | fileLogger.Flush() | ||||
logData, err = ioutil.ReadFile(filename) | logData, err = ioutil.ReadFile(filename) | ||||
assert.Equal(t, expected, string(logData)) | assert.Equal(t, expected, string(logData)) | ||||
event.level = WARN | event.level = WARN | ||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg) | |||||
fileLogger.LogEvent(&event) | fileLogger.LogEvent(&event) | ||||
fileLogger.Flush() | fileLogger.Flush() | ||||
logData, err = ioutil.ReadFile(filename) | logData, err = ioutil.ReadFile(filename) |
for _, flag := range strings.Split(strings.ToLower(from), ",") { | for _, flag := range strings.Split(strings.ToLower(from), ",") { | ||||
f, ok := flagFromString[strings.TrimSpace(flag)] | f, ok := flagFromString[strings.TrimSpace(flag)] | ||||
if ok { | if ok { | ||||
flags = flags | f | |||||
flags |= f | |||||
} | } | ||||
} | } | ||||
return flags | return flags |
func (l *LoggerAsWriter) Log(msg string) { | func (l *LoggerAsWriter) Log(msg string) { | ||||
for _, logger := range l.ourLoggers { | for _, logger := range l.ourLoggers { | ||||
// Set the skip to reference the call just above this | // Set the skip to reference the call just above this | ||||
logger.Log(1, l.level, msg) | |||||
_ = logger.Log(1, l.level, msg) | |||||
} | } | ||||
} | } | ||||
"strings" | "strings" | ||||
) | ) | ||||
const ( | |||||
subjectPhrase = "Diagnostic message from server" | |||||
) | |||||
type smtpWriter struct { | type smtpWriter struct { | ||||
owner *SMTPLogger | owner *SMTPLogger | ||||
} | } |
mode: removeColor, | mode: removeColor, | ||||
}).Write([]byte(event.msg)) | }).Write([]byte(event.msg)) | ||||
msg = baw | msg = baw | ||||
if logger.regexp.Match(msg) { | |||||
return true | |||||
} | |||||
return false | |||||
return logger.regexp.Match(msg) | |||||
} | } | ||||
// Close the base logger | // Close the base logger |
} | } | ||||
func processMailQueue() { | func processMailQueue() { | ||||
for { | |||||
select { | |||||
case msg := <-mailQueue: | |||||
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) | |||||
if err := gomail.Send(Sender, msg.Message); err != nil { | |||||
log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) | |||||
} else { | |||||
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) | |||||
} | |||||
for msg := range mailQueue { | |||||
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) | |||||
if err := gomail.Send(Sender, msg.Message); err != nil { | |||||
log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) | |||||
} else { | |||||
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) | |||||
} | } | ||||
} | } | ||||
} | } |
return ret | return ret | ||||
} | } | ||||
// cutoutVerbosePrefix cutouts URL prefix including sub-path to | |||||
// return a clean unified string of request URL path. | |||||
func cutoutVerbosePrefix(prefix string) string { | |||||
if len(prefix) == 0 || prefix[0] != '/' { | |||||
return prefix | |||||
} | |||||
count := 0 | |||||
for i := 0; i < len(prefix); i++ { | |||||
if prefix[i] == '/' { | |||||
count++ | |||||
} | |||||
if count >= 3+setting.AppSubURLDepth { | |||||
return prefix[:i] | |||||
} | |||||
} | |||||
return prefix | |||||
} | |||||
// IsSameDomain checks if given url string has the same hostname as current Gitea instance | // IsSameDomain checks if given url string has the same hostname as current Gitea instance | ||||
func IsSameDomain(s string) bool { | func IsSameDomain(s string) bool { | ||||
if strings.HasPrefix(s, "/") { | if strings.HasPrefix(s, "/") { | ||||
} | } | ||||
func (p *postProcessError) Error() string { | func (p *postProcessError) Error() string { | ||||
return "PostProcess: " + p.context + ", " + p.Error() | |||||
return "PostProcess: " + p.context + ", " + p.err.Error() | |||||
} | } | ||||
type processor func(ctx *postProcessCtx, node *html.Node) | type processor func(ctx *postProcessCtx, node *html.Node) | ||||
// ignore everything else | // ignore everything else | ||||
} | } | ||||
func (ctx *postProcessCtx) visitNodeForShortLinks(node *html.Node) { | |||||
switch node.Type { | |||||
case html.TextNode: | |||||
shortLinkProcessorFull(ctx, node, true) | |||||
case html.ElementNode: | |||||
if node.Data == "code" || node.Data == "pre" || node.Data == "a" { | |||||
return | |||||
} | |||||
for n := node.FirstChild; n != nil; n = n.NextSibling { | |||||
ctx.visitNodeForShortLinks(n) | |||||
} | |||||
} | |||||
} | |||||
// textNode runs the passed node through various processors, in order to handle | // textNode runs the passed node through various processors, in order to handle | ||||
// all kinds of special links handled by the post-processing. | // all kinds of special links handled by the post-processing. | ||||
func (ctx *postProcessCtx) textNode(node *html.Node) { | func (ctx *postProcessCtx) textNode(node *html.Node) { |
return link(util.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index)) | return link(util.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index)) | ||||
} | } | ||||
// urlContentsLink an HTML link whose contents is the target URL | |||||
func urlContentsLink(href string) string { | |||||
return link(href, href) | |||||
} | |||||
// link an HTML link | // link an HTML link | ||||
func link(href, contents string) string { | func link(href, contents string) string { | ||||
return fmt.Sprintf("<a href=\"%s\">%s</a>", href, contents) | return fmt.Sprintf("<a href=\"%s\">%s</a>", href, contents) |