summaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
Diffstat (limited to 'modules')
-rw-r--r--modules/auth/auth.go22
-rw-r--r--modules/auth/ldap/ldap.go14
-rw-r--r--modules/base/tool.go6
-rw-r--r--modules/context/api.go2
-rw-r--r--modules/context/context.go6
-rw-r--r--modules/context/panic.go76
-rw-r--r--modules/context/repo.go4
-rw-r--r--modules/cron/cron.go12
-rw-r--r--modules/indexer/issues/indexer.go8
-rw-r--r--modules/indexer/issues/queue_disk.go8
-rw-r--r--modules/indexer/repo.go10
-rw-r--r--modules/lfs/server.go2
-rw-r--r--modules/log/base.go328
-rw-r--r--modules/log/base_test.go277
-rw-r--r--modules/log/colors.go348
-rw-r--r--modules/log/conn.go122
-rw-r--r--modules/log/conn_test.go240
-rw-r--r--modules/log/console.go88
-rw-r--r--modules/log/console_test.go137
-rw-r--r--modules/log/console_windows.go43
-rw-r--r--modules/log/errors.go62
-rw-r--r--modules/log/event.go335
-rw-r--r--modules/log/file.go209
-rw-r--r--modules/log/file_test.go247
-rw-r--r--modules/log/level.go111
-rw-r--r--modules/log/level_test.go55
-rw-r--r--modules/log/log.go451
-rw-r--r--modules/log/log_test.go154
-rw-r--r--modules/log/logger.go156
-rw-r--r--modules/log/provider.go26
-rw-r--r--modules/log/router.go103
-rw-r--r--modules/log/smtp.go82
-rw-r--r--modules/log/smtp_test.go86
-rw-r--r--modules/log/stack.go83
-rw-r--r--modules/log/xorm.go91
-rw-r--r--modules/mailer/mailer.go2
-rw-r--r--modules/markup/external/external.go8
-rw-r--r--modules/markup/markup.go2
-rw-r--r--modules/markup/orgmode/orgmode.go2
-rw-r--r--modules/notification/indexer/indexer.go6
-rw-r--r--modules/notification/mail/mail.go10
-rw-r--r--modules/notification/ui/ui.go2
-rw-r--r--modules/pprof/pprof.go6
-rw-r--r--modules/setting/cache.go2
-rw-r--r--modules/setting/cron.go2
-rw-r--r--modules/setting/git.go4
-rw-r--r--modules/setting/log.go375
-rw-r--r--modules/setting/mailer.go4
-rw-r--r--modules/setting/repository.go12
-rw-r--r--modules/setting/setting.go110
-rw-r--r--modules/ssh/ssh.go30
-rw-r--r--modules/templates/helper.go6
-rw-r--r--modules/util/url.go4
53 files changed, 3659 insertions, 932 deletions
diff --git a/modules/auth/auth.go b/modules/auth/auth.go
index 4be358b737..c9e5c44da5 100644
--- a/modules/auth/auth.go
+++ b/modules/auth/auth.go
@@ -63,13 +63,13 @@ func SignedInID(ctx *macaron.Context, sess session.Store) int64 {
t, err := models.GetAccessTokenBySHA(tokenSHA)
if err != nil {
if models.IsErrAccessTokenNotExist(err) || models.IsErrAccessTokenEmpty(err) {
- log.Error(4, "GetAccessTokenBySHA: %v", err)
+ log.Error("GetAccessTokenBySHA: %v", err)
}
return 0
}
t.UpdatedUnix = util.TimeStampNow()
if err = models.UpdateAccessToken(t); err != nil {
- log.Error(4, "UpdateAccessToken: %v", err)
+ log.Error("UpdateAccessToken: %v", err)
}
ctx.Data["IsApiToken"] = true
return t.UID
@@ -92,7 +92,7 @@ func checkOAuthAccessToken(accessToken string) int64 {
}
token, err := models.ParseOAuth2Token(accessToken)
if err != nil {
- log.Trace("ParseOAuth2Token", err)
+ log.Trace("ParseOAuth2Token: %v", err)
return 0
}
var grant *models.OAuth2Grant
@@ -120,7 +120,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
if err == nil {
return user, false
} else if !models.IsErrUserNotExist(err) {
- log.Error(4, "GetUserById: %v", err)
+ log.Error("GetUserById: %v", err)
}
}
@@ -130,7 +130,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
u, err := models.GetUserByName(webAuthUser)
if err != nil {
if !models.IsErrUserNotExist(err) {
- log.Error(4, "GetUserByName: %v", err)
+ log.Error("GetUserByName: %v", err)
return nil, false
}
@@ -151,7 +151,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
}
if err = models.CreateUser(u); err != nil {
// FIXME: should I create a system notice?
- log.Error(4, "CreateUser: %v", err)
+ log.Error("CreateUser: %v", err)
return nil, false
}
return u, false
@@ -183,13 +183,13 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
if isUsernameToken {
u, err = models.GetUserByID(token.UID)
if err != nil {
- log.Error(4, "GetUserByID: %v", err)
+ log.Error("GetUserByID: %v", err)
return nil, false
}
} else {
u, err = models.GetUserByName(uname)
if err != nil {
- log.Error(4, "GetUserByID: %v", err)
+ log.Error("GetUserByID: %v", err)
return nil, false
}
if u.ID != token.UID {
@@ -198,11 +198,11 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
}
token.UpdatedUnix = util.TimeStampNow()
if err = models.UpdateAccessToken(token); err != nil {
- log.Error(4, "UpdateAccessToken: %v", err)
+ log.Error("UpdateAccessToken: %v", err)
}
} else {
if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) {
- log.Error(4, "GetAccessTokenBySha: %v", err)
+ log.Error("GetAccessTokenBySha: %v", err)
}
}
@@ -210,7 +210,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
u, err = models.UserSignIn(uname, passwd)
if err != nil {
if !models.IsErrUserNotExist(err) {
- log.Error(4, "UserSignIn: %v", err)
+ log.Error("UserSignIn: %v", err)
}
return nil, false
}
diff --git a/modules/auth/ldap/ldap.go b/modules/auth/ldap/ldap.go
index f202f94086..ddeaf12430 100644
--- a/modules/auth/ldap/ldap.go
+++ b/modules/auth/ldap/ldap.go
@@ -107,7 +107,7 @@ func (ls *Source) findUserDN(l *ldap.Conn, name string) (string, bool) {
userDN := sr.Entries[0].DN
if userDN == "" {
- log.Error(4, "LDAP search was successful, but found no DN!")
+ log.Error("LDAP search was successful, but found no DN!")
return "", false
}
@@ -162,7 +162,7 @@ func checkAdmin(l *ldap.Conn, ls *Source, userDN string) bool {
sr, err := l.Search(search)
if err != nil {
- log.Error(4, "LDAP Admin Search failed unexpectedly! (%v)", err)
+ log.Error("LDAP Admin Search failed unexpectedly! (%v)", err)
} else if len(sr.Entries) < 1 {
log.Trace("LDAP Admin Search found no matching entries.")
} else {
@@ -176,12 +176,12 @@ func checkAdmin(l *ldap.Conn, ls *Source, userDN string) bool {
func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResult {
// See https://tools.ietf.org/search/rfc4513#section-5.1.2
if len(passwd) == 0 {
- log.Debug("Auth. failed for %s, password cannot be empty")
+ log.Debug("Auth. failed for %s, password cannot be empty", name)
return nil
}
l, err := dial(ls)
if err != nil {
- log.Error(4, "LDAP Connect error, %s:%v", ls.Host, err)
+ log.Error("LDAP Connect error, %s:%v", ls.Host, err)
ls.Enabled = false
return nil
}
@@ -261,7 +261,7 @@ func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResul
sr, err := l.Search(search)
if err != nil {
- log.Error(4, "LDAP Search failed unexpectedly! (%v)", err)
+ log.Error("LDAP Search failed unexpectedly! (%v)", err)
return nil
} else if len(sr.Entries) < 1 {
if directBind {
@@ -311,7 +311,7 @@ func (ls *Source) UsePagedSearch() bool {
func (ls *Source) SearchEntries() []*SearchResult {
l, err := dial(ls)
if err != nil {
- log.Error(4, "LDAP Connect error, %s:%v", ls.Host, err)
+ log.Error("LDAP Connect error, %s:%v", ls.Host, err)
ls.Enabled = false
return nil
}
@@ -349,7 +349,7 @@ func (ls *Source) SearchEntries() []*SearchResult {
sr, err = l.Search(search)
}
if err != nil {
- log.Error(4, "LDAP Search failed unexpectedly! (%v)", err)
+ log.Error("LDAP Search failed unexpectedly! (%v)", err)
return nil
}
diff --git a/modules/base/tool.go b/modules/base/tool.go
index 359cf87d77..681577b76f 100644
--- a/modules/base/tool.go
+++ b/modules/base/tool.go
@@ -110,7 +110,7 @@ func GetRandomBytesAsBase64(n int) string {
_, err := io.ReadFull(rand.Reader, bytes)
if err != nil {
- log.Fatal(4, "Error reading random bytes: %v", err)
+ log.Fatal("Error reading random bytes: %v", err)
}
return base64.RawURLEncoding.EncodeToString(bytes)
@@ -197,12 +197,12 @@ const DefaultAvatarSize = -1
func libravatarURL(email string) (*url.URL, error) {
urlStr, err := setting.LibravatarService.FromEmail(email)
if err != nil {
- log.Error(4, "LibravatarService.FromEmail(email=%s): error %v", email, err)
+ log.Error("LibravatarService.FromEmail(email=%s): error %v", email, err)
return nil, err
}
u, err := url.Parse(urlStr)
if err != nil {
- log.Error(4, "Failed to parse libravatar url(%s): error %v", urlStr, err)
+ log.Error("Failed to parse libravatar url(%s): error %v", urlStr, err)
return nil, err
}
return u, nil
diff --git a/modules/context/api.go b/modules/context/api.go
index 04a472b382..89b0bb749a 100644
--- a/modules/context/api.go
+++ b/modules/context/api.go
@@ -72,7 +72,7 @@ func (ctx *APIContext) Error(status int, title string, obj interface{}) {
}
if status == 500 {
- log.Error(4, "%s: %s", title, message)
+ log.Error("%s: %s", title, message)
}
ctx.JSON(status, APIError{
diff --git a/modules/context/context.go b/modules/context/context.go
index 5c7254de22..770d42e2c7 100644
--- a/modules/context/context.go
+++ b/modules/context/context.go
@@ -116,7 +116,7 @@ func (ctx *Context) RenderWithErr(msg string, tpl base.TplName, form interface{}
// NotFound displays a 404 (Not Found) page and prints the given error, if any.
func (ctx *Context) NotFound(title string, err error) {
if err != nil {
- log.Error(4, "%s: %v", title, err)
+ log.Error("%s: %v", title, err)
if macaron.Env != macaron.PROD {
ctx.Data["ErrorMsg"] = err
}
@@ -131,7 +131,7 @@ func (ctx *Context) NotFound(title string, err error) {
// error, if any.
func (ctx *Context) ServerError(title string, err error) {
if err != nil {
- log.Error(4, "%s: %v", title, err)
+ log.Error("%s: %v", title, err)
if macaron.Env != macaron.PROD {
ctx.Data["ErrorMsg"] = err
}
@@ -156,7 +156,7 @@ func (ctx *Context) NotFoundOrServerError(title string, errck func(error) bool,
// HandleText handles HTTP status code
func (ctx *Context) HandleText(status int, title string) {
if (status/100 == 4) || (status/100 == 5) {
- log.Error(4, "%s", title)
+ log.Error("%s", title)
}
ctx.PlainText(status, []byte(title))
}
diff --git a/modules/context/panic.go b/modules/context/panic.go
index 810c31a065..8ed91dea65 100644
--- a/modules/context/panic.go
+++ b/modules/context/panic.go
@@ -17,11 +17,9 @@
package context
import (
- "bytes"
"fmt"
- "io/ioutil"
- "runtime"
+ "code.gitea.io/gitea/modules/log"
macaron "gopkg.in/macaron.v1"
)
@@ -32,7 +30,7 @@ func Recovery() macaron.Handler {
return func(ctx *Context) {
defer func() {
if err := recover(); err != nil {
- combinedErr := fmt.Errorf("%s\n%s", err, string(stack(3)))
+ combinedErr := fmt.Errorf("%s\n%s", err, string(log.Stack(2)))
ctx.ServerError("PANIC:", combinedErr)
}
}()
@@ -40,73 +38,3 @@ func Recovery() macaron.Handler {
ctx.Next()
}
}
-
-var (
- unknown = []byte("???")
-)
-
-// Although we could just use debug.Stack(), this routine will return the source code
-// skip the provided number of frames - i.e. allowing us to ignore this function call
-// and the preceding function call.
-// If the problem is a lack of memory of course all this is not going to work...
-func stack(skip int) []byte {
- buf := new(bytes.Buffer)
-
- // Store the last file we opened as its probable that the preceding stack frame
- // will be in the same file
- var lines [][]byte
- var lastFilename string
- for i := skip; ; i++ { // Skip over frames
- programCounter, filename, lineNumber, ok := runtime.Caller(i)
- // If we can't retrieve the information break - basically we're into go internals at this point.
- if !ok {
- break
- }
-
- // Print equivalent of debug.Stack()
- fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter)
- // Now try to print the offending line
- if filename != lastFilename {
- data, err := ioutil.ReadFile(filename)
- if err != nil {
- // can't read this sourcefile
- // likely we don't have the sourcecode available
- continue
- }
- lines = bytes.Split(data, []byte{'\n'})
- lastFilename = filename
- }
- fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber))
- }
- return buf.Bytes()
-}
-
-// functionName converts the provided programCounter into a function name
-func functionName(programCounter uintptr) []byte {
- function := runtime.FuncForPC(programCounter)
- if function == nil {
- return unknown
- }
- name := []byte(function.Name())
-
- // Because we provide the filename we can drop the preceding package name.
- if lastslash := bytes.LastIndex(name, []byte("/")); lastslash >= 0 {
- name = name[lastslash+1:]
- }
- // And the current package name.
- if period := bytes.Index(name, []byte(".")); period >= 0 {
- name = name[period+1:]
- }
- // And we should just replace the interpunct with a dot
- name = bytes.Replace(name, []byte("ยท"), []byte("."), -1)
- return name
-}
-
-// source returns a space-trimmed slice of the n'th line.
-func source(lines [][]byte, n int) []byte {
- n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
- if n < 0 || n >= len(lines) {
- return unknown
- }
- return bytes.TrimSpace(lines[n])
-}
diff --git a/modules/context/repo.go b/modules/context/repo.go
index 20a3d19428..0cbd89a75f 100644
--- a/modules/context/repo.go
+++ b/modules/context/repo.go
@@ -124,7 +124,7 @@ func (r *Repository) BranchNameSubURL() string {
case r.IsViewCommit:
return "commit/" + r.BranchName
}
- log.Error(4, "Unknown view type for repo: %v", r)
+ log.Error("Unknown view type for repo: %v", r)
return ""
}
@@ -536,7 +536,7 @@ func getRefName(ctx *Context, pathType RepoRefType) string {
}
return path
default:
- log.Error(4, "Unrecognized path type: %v", path)
+ log.Error("Unrecognized path type: %v", path)
}
return ""
}
diff --git a/modules/cron/cron.go b/modules/cron/cron.go
index ede73e0666..24457f5013 100644
--- a/modules/cron/cron.go
+++ b/modules/cron/cron.go
@@ -25,7 +25,7 @@ func NewContext() {
if setting.Cron.UpdateMirror.Enabled {
entry, err = c.AddFunc("Update mirrors", setting.Cron.UpdateMirror.Schedule, models.MirrorUpdate)
if err != nil {
- log.Fatal(4, "Cron[Update mirrors]: %v", err)
+ log.Fatal("Cron[Update mirrors]: %v", err)
}
if setting.Cron.UpdateMirror.RunAtStart {
entry.Prev = time.Now()
@@ -36,7 +36,7 @@ func NewContext() {
if setting.Cron.RepoHealthCheck.Enabled {
entry, err = c.AddFunc("Repository health check", setting.Cron.RepoHealthCheck.Schedule, models.GitFsck)
if err != nil {
- log.Fatal(4, "Cron[Repository health check]: %v", err)
+ log.Fatal("Cron[Repository health check]: %v", err)
}
if setting.Cron.RepoHealthCheck.RunAtStart {
entry.Prev = time.Now()
@@ -47,7 +47,7 @@ func NewContext() {
if setting.Cron.CheckRepoStats.Enabled {
entry, err = c.AddFunc("Check repository statistics", setting.Cron.CheckRepoStats.Schedule, models.CheckRepoStats)
if err != nil {
- log.Fatal(4, "Cron[Check repository statistics]: %v", err)
+ log.Fatal("Cron[Check repository statistics]: %v", err)
}
if setting.Cron.CheckRepoStats.RunAtStart {
entry.Prev = time.Now()
@@ -58,7 +58,7 @@ func NewContext() {
if setting.Cron.ArchiveCleanup.Enabled {
entry, err = c.AddFunc("Clean up old repository archives", setting.Cron.ArchiveCleanup.Schedule, models.DeleteOldRepositoryArchives)
if err != nil {
- log.Fatal(4, "Cron[Clean up old repository archives]: %v", err)
+ log.Fatal("Cron[Clean up old repository archives]: %v", err)
}
if setting.Cron.ArchiveCleanup.RunAtStart {
entry.Prev = time.Now()
@@ -69,7 +69,7 @@ func NewContext() {
if setting.Cron.SyncExternalUsers.Enabled {
entry, err = c.AddFunc("Synchronize external users", setting.Cron.SyncExternalUsers.Schedule, models.SyncExternalUsers)
if err != nil {
- log.Fatal(4, "Cron[Synchronize external users]: %v", err)
+ log.Fatal("Cron[Synchronize external users]: %v", err)
}
if setting.Cron.SyncExternalUsers.RunAtStart {
entry.Prev = time.Now()
@@ -80,7 +80,7 @@ func NewContext() {
if setting.Cron.DeletedBranchesCleanup.Enabled {
entry, err = c.AddFunc("Remove old deleted branches", setting.Cron.DeletedBranchesCleanup.Schedule, models.RemoveOldDeletedBranches)
if err != nil {
- log.Fatal(4, "Cron[Remove old deleted branches]: %v", err)
+ log.Fatal("Cron[Remove old deleted branches]: %v", err)
}
if setting.Cron.DeletedBranchesCleanup.RunAtStart {
entry.Prev = time.Now()
diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go
index 4d7e64adc7..3d066ca3fa 100644
--- a/modules/indexer/issues/indexer.go
+++ b/modules/indexer/issues/indexer.go
@@ -116,7 +116,7 @@ func populateIssueIndexer() {
Collaborate: util.OptionalBoolFalse,
})
if err != nil {
- log.Error(4, "SearchRepositoryByName: %v", err)
+ log.Error("SearchRepositoryByName: %v", err)
continue
}
if len(repos) == 0 {
@@ -130,11 +130,11 @@ func populateIssueIndexer() {
IsPull: util.OptionalBoolNone,
})
if err != nil {
- log.Error(4, "Issues: %v", err)
+ log.Error("Issues: %v", err)
continue
}
if err = models.IssueList(is).LoadDiscussComments(); err != nil {
- log.Error(4, "LoadComments: %v", err)
+ log.Error("LoadComments: %v", err)
continue
}
for _, issue := range is {
@@ -166,7 +166,7 @@ func DeleteRepoIssueIndexer(repo *models.Repository) {
var ids []int64
ids, err := models.GetIssueIDsByRepoID(repo.ID)
if err != nil {
- log.Error(4, "getIssueIDsByRepoID failed: %v", err)
+ log.Error("getIssueIDsByRepoID failed: %v", err)
return
}
diff --git a/modules/indexer/issues/queue_disk.go b/modules/indexer/issues/queue_disk.go
index be6867f9ca..cf9e6aee22 100644
--- a/modules/indexer/issues/queue_disk.go
+++ b/modules/indexer/issues/queue_disk.go
@@ -53,7 +53,7 @@ func (l *LevelQueue) Run() error {
bs, err := l.queue.RPop()
if err != nil {
if err != levelqueue.ErrNotFound {
- log.Error(4, "RPop: %v", err)
+ log.Error("RPop: %v", err)
}
time.Sleep(time.Millisecond * 100)
continue
@@ -67,7 +67,7 @@ func (l *LevelQueue) Run() error {
var data IndexerData
err = json.Unmarshal(bs, &data)
if err != nil {
- log.Error(4, "Unmarshal: %v", err)
+ log.Error("Unmarshal: %v", err)
time.Sleep(time.Millisecond * 100)
continue
}
@@ -77,11 +77,11 @@ func (l *LevelQueue) Run() error {
if data.IsDelete {
if data.ID > 0 {
if err = l.indexer.Delete(data.ID); err != nil {
- log.Error(4, "indexer.Delete: %v", err)
+ log.Error("indexer.Delete: %v", err)
}
} else if len(data.IDs) > 0 {
if err = l.indexer.Delete(data.IDs...); err != nil {
- log.Error(4, "indexer.Delete: %v", err)
+ log.Error("indexer.Delete: %v", err)
}
}
time.Sleep(time.Millisecond * 10)
diff --git a/modules/indexer/repo.go b/modules/indexer/repo.go
index 20e0fa6b18..287d23854b 100644
--- a/modules/indexer/repo.go
+++ b/modules/indexer/repo.go
@@ -68,7 +68,7 @@ func (update RepoIndexerUpdate) AddToFlushingBatch(batch rupture.FlushingBatch)
case RepoIndexerOpDelete:
return batch.Delete(id)
default:
- log.Error(4, "Unrecognized repo indexer op: %d", update.Op)
+ log.Error("Unrecognized repo indexer op: %d", update.Op)
}
return nil
}
@@ -78,17 +78,17 @@ func InitRepoIndexer(populateIndexer func() error) {
var err error
repoIndexer, err = openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion)
if err != nil {
- log.Fatal(4, "InitRepoIndexer: %v", err)
+ log.Fatal("InitRepoIndexer: %v", err)
}
if repoIndexer != nil {
return
}
if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil {
- log.Fatal(4, "CreateRepoIndexer: %v", err)
+ log.Fatal("CreateRepoIndexer: %v", err)
}
if err = populateIndexer(); err != nil {
- log.Fatal(4, "PopulateRepoIndex: %v", err)
+ log.Fatal("PopulateRepoIndex: %v", err)
}
}
@@ -135,7 +135,7 @@ func filenameIndexerID(repoID int64, filename string) string {
func filenameOfIndexerID(indexerID string) string {
index := strings.IndexByte(indexerID, '_')
if index == -1 {
- log.Error(4, "Unexpected ID in repo indexer: %s", indexerID)
+ log.Error("Unexpected ID in repo indexer: %s", indexerID)
}
return indexerID[index+1:]
}
diff --git a/modules/lfs/server.go b/modules/lfs/server.go
index 188a6b8d57..8ae6326842 100644
--- a/modules/lfs/server.go
+++ b/modules/lfs/server.go
@@ -333,7 +333,7 @@ func PutHandler(ctx *context.Context) {
ctx.Resp.WriteHeader(500)
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
if err = repository.RemoveLFSMetaObjectByOid(rv.Oid); err != nil {
- log.Error(4, "RemoveLFSMetaObjectByOid: %v", err)
+ log.Error("RemoveLFSMetaObjectByOid: %v", err)
}
return
}
diff --git a/modules/log/base.go b/modules/log/base.go
new file mode 100644
index 0000000000..5577737e04
--- /dev/null
+++ b/modules/log/base.go
@@ -0,0 +1,328 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+ "sync"
+)
+
+// These flags define which text to prefix to each log entry generated
+// by the Logger. Bits are or'ed together to control what's printed.
+// There is no control over the order they appear (the order listed
+// here) or the format they present (as described in the comments).
+// The prefix is followed by a colon only if more than time is stated
+// is specified. For example, flags Ldate | Ltime
+// produce, 2009/01/23 01:23:23 message.
+// The standard is:
+// 2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message
+const (
+ Ldate = 1 << iota // the date in the local time zone: 2009/01/23
+ Ltime // the time in the local time zone: 01:23:23
+ Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
+ Llongfile // full file name and line number: /a/b/c/d.go:23
+ Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
+ Lfuncname // function name of the caller: runtime.Caller()
+ Lshortfuncname // last part of the function name
+ LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
+ Llevelinitial // Initial character of the provided level in brackets eg. [I] for info
+ Llevel // Provided level in brackets [INFO]
+
+ // Last 20 characters of the filename
+ Lmedfile = Lshortfile | Llongfile
+
+ // LstdFlags is the initial value for the standard logger
+ LstdFlags = Ldate | Ltime | Lmedfile | Lshortfuncname | Llevelinitial
+)
+
+var flagFromString = map[string]int{
+ "none": 0,
+ "date": Ldate,
+ "time": Ltime,
+ "microseconds": Lmicroseconds,
+ "longfile": Llongfile,
+ "shortfile": Lshortfile,
+ "funcname": Lfuncname,
+ "shortfuncname": Lshortfuncname,
+ "utc": LUTC,
+ "levelinitial": Llevelinitial,
+ "level": Llevel,
+ "medfile": Lmedfile,
+ "stdflags": LstdFlags,
+}
+
+// FlagsFromString takes a comma separated list of flags and returns
+// the flags for this string
+func FlagsFromString(from string) int {
+ flags := 0
+ for _, flag := range strings.Split(strings.ToLower(from), ",") {
+ f, ok := flagFromString[strings.TrimSpace(flag)]
+ if ok {
+ flags = flags | f
+ }
+ }
+ return flags
+}
+
+type byteArrayWriter []byte
+
+func (b *byteArrayWriter) Write(p []byte) (int, error) {
+ *b = append(*b, p...)
+ return len(p), nil
+}
+
+// BaseLogger represent a basic logger for Gitea
+type BaseLogger struct {
+ out io.WriteCloser
+ mu sync.Mutex
+
+ Level Level `json:"level"`
+ StacktraceLevel Level `json:"stacktraceLevel"`
+ Flags int `json:"flags"`
+ Prefix string `json:"prefix"`
+ Colorize bool `json:"colorize"`
+ Expression string `json:"expression"`
+ regexp *regexp.Regexp
+}
+
+func (b *BaseLogger) createLogger(out io.WriteCloser, level ...Level) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.out = out
+ switch b.Flags {
+ case 0:
+ b.Flags = LstdFlags
+ case -1:
+ b.Flags = 0
+ }
+ if len(level) > 0 {
+ b.Level = level[0]
+ }
+ b.createExpression()
+}
+
+func (b *BaseLogger) createExpression() {
+ if len(b.Expression) > 0 {
+ var err error
+ b.regexp, err = regexp.Compile(b.Expression)
+ if err != nil {
+ b.regexp = nil
+ }
+ }
+}
+
+// GetLevel returns the logging level for this logger
+func (b *BaseLogger) GetLevel() Level {
+ return b.Level
+}
+
+// GetStacktraceLevel returns the stacktrace logging level for this logger
+func (b *BaseLogger) GetStacktraceLevel() Level {
+ return b.StacktraceLevel
+}
+
+// Copy of cheap integer to fixed-width decimal to ascii from logger.
+func itoa(buf *[]byte, i int, wid int) {
+ var b [20]byte
+ bp := len(b) - 1
+ for i >= 10 || wid > 1 {
+ wid--
+ q := i / 10
+ b[bp] = byte('0' + i - q*10)
+ bp--
+ i = q
+ }
+ // i < 10
+ b[bp] = byte('0' + i)
+ *buf = append(*buf, b[bp:]...)
+}
+
+func (b *BaseLogger) createMsg(buf *[]byte, event *Event) {
+ *buf = append(*buf, b.Prefix...)
+ t := event.time
+ if b.Flags&(Ldate|Ltime|Lmicroseconds) != 0 {
+ if b.Colorize {
+ *buf = append(*buf, fgCyanBytes...)
+ }
+ if b.Flags&LUTC != 0 {
+ t = t.UTC()
+ }
+ if b.Flags&Ldate != 0 {
+ year, month, day := t.Date()
+ itoa(buf, year, 4)
+ *buf = append(*buf, '/')
+ itoa(buf, int(month), 2)
+ *buf = append(*buf, '/')
+ itoa(buf, day, 2)
+ *buf = append(*buf, ' ')
+ }
+ if b.Flags&(Ltime|Lmicroseconds) != 0 {
+ hour, min, sec := t.Clock()
+ itoa(buf, hour, 2)
+ *buf = append(*buf, ':')
+ itoa(buf, min, 2)
+ *buf = append(*buf, ':')
+ itoa(buf, sec, 2)
+ if b.Flags&Lmicroseconds != 0 {
+ *buf = append(*buf, '.')
+ itoa(buf, t.Nanosecond()/1e3, 6)
+ }
+ *buf = append(*buf, ' ')
+ }
+ if b.Colorize {
+ *buf = append(*buf, resetBytes...)
+ }
+
+ }
+ if b.Flags&(Lshortfile|Llongfile) != 0 {
+ if b.Colorize {
+ *buf = append(*buf, fgGreenBytes...)
+ }
+ file := event.filename
+ if b.Flags&Lmedfile == Lmedfile {
+ startIndex := len(file) - 20
+ if startIndex > 0 {
+ file = "..." + file[startIndex:]
+ }
+ } else if b.Flags&Lshortfile != 0 {
+ startIndex := strings.LastIndexByte(file, '/')
+ if startIndex > 0 && startIndex < len(file) {
+ file = file[startIndex+1:]
+ }
+ }
+ *buf = append(*buf, file...)
+ *buf = append(*buf, ':')
+ itoa(buf, event.line, -1)
+ if b.Flags&(Lfuncname|Lshortfuncname) != 0 {
+ *buf = append(*buf, ':')
+ } else {
+ if b.Colorize {
+ *buf = append(*buf, resetBytes...)
+ }
+ *buf = append(*buf, ' ')
+ }
+ }
+ if b.Flags&(Lfuncname|Lshortfuncname) != 0 {
+ if b.Colorize {
+ *buf = append(*buf, fgGreenBytes...)
+ }
+ funcname := event.caller
+ if b.Flags&Lshortfuncname != 0 {
+ lastIndex := strings.LastIndexByte(funcname, '.')
+ if lastIndex > 0 && len(funcname) > lastIndex+1 {
+ funcname = funcname[lastIndex+1:]
+ }
+ }
+ *buf = append(*buf, funcname...)
+ if b.Colorize {
+ *buf = append(*buf, resetBytes...)
+ }
+ *buf = append(*buf, ' ')
+
+ }
+ if b.Flags&(Llevel|Llevelinitial) != 0 {
+ level := strings.ToUpper(event.level.String())
+ if b.Colorize {
+ *buf = append(*buf, levelToColor[event.level]...)
+ }
+ *buf = append(*buf, '[')
+ if b.Flags&Llevelinitial != 0 {
+ *buf = append(*buf, level[0])
+ } else {
+ *buf = append(*buf, level...)
+ }
+ *buf = append(*buf, ']')
+ if b.Colorize {
+ *buf = append(*buf, resetBytes...)
+ }
+ *buf = append(*buf, ' ')
+ }
+
+ var msg = []byte(event.msg)
+ if len(msg) > 0 && msg[len(msg)-1] == '\n' {
+ msg = msg[:len(msg)-1]
+ }
+
+ pawMode := allowColor
+ if !b.Colorize {
+ pawMode = removeColor
+ }
+
+ baw := byteArrayWriter(*buf)
+ (&protectedANSIWriter{
+ w: &baw,
+ mode: pawMode,
+ }).Write([]byte(msg))
+ *buf = baw
+
+ if event.stacktrace != "" && b.StacktraceLevel <= event.level {
+ lines := bytes.Split([]byte(event.stacktrace), []byte("\n"))
+ if len(lines) > 1 {
+ for _, line := range lines {
+ *buf = append(*buf, "\n\t"...)
+ *buf = append(*buf, line...)
+ }
+ }
+ *buf = append(*buf, '\n')
+ }
+ *buf = append(*buf, '\n')
+}
+
+// LogEvent logs the event to the internal writer
+func (b *BaseLogger) LogEvent(event *Event) error {
+ if b.Level > event.level {
+ return nil
+ }
+
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.Match(event) {
+ return nil
+ }
+ var buf []byte
+ b.createMsg(&buf, event)
+ _, err := b.out.Write(buf)
+ return err
+}
+
+// Match checks if the given event matches the logger's regexp expression
+func (b *BaseLogger) Match(event *Event) bool {
+ if b.regexp == nil {
+ return true
+ }
+ if b.regexp.Match([]byte(fmt.Sprintf("%s:%d:%s", event.filename, event.line, event.caller))) {
+ return true
+ }
+ // Match on the non-colored msg - therefore strip out colors
+ var msg []byte
+ baw := byteArrayWriter(msg)
+ (&protectedANSIWriter{
+ w: &baw,
+ mode: removeColor,
+ }).Write([]byte(event.msg))
+ msg = baw
+ if b.regexp.Match(msg) {
+ return true
+ }
+ return false
+}
+
+// Close the base logger
+func (b *BaseLogger) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.out != nil {
+ b.out.Close()
+ }
+}
+
+// GetName returns empty for these provider loggers
+func (b *BaseLogger) GetName() string {
+ return ""
+}
diff --git a/modules/log/base_test.go b/modules/log/base_test.go
new file mode 100644
index 0000000000..3686c26ce8
--- /dev/null
+++ b/modules/log/base_test.go
@@ -0,0 +1,277 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type CallbackWriteCloser struct {
+ callback func([]byte, bool)
+}
+
+func (c CallbackWriteCloser) Write(p []byte) (int, error) {
+ c.callback(p, false)
+ return len(p), nil
+}
+
+func (c CallbackWriteCloser) Close() error {
+ c.callback(nil, true)
+ return nil
+}
+
+func TestBaseLogger(t *testing.T) {
+ var written []byte
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written = p
+ closed = close
+ },
+ }
+ prefix := "TestPrefix "
+ b := BaseLogger{
+ out: c,
+ Level: INFO,
+ Flags: LstdFlags | LUTC,
+ Prefix: prefix,
+ }
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ assert.Equal(t, INFO, b.GetLevel())
+
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = DEBUG
+ expected = ""
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+
+ event.level = TRACE
+ expected = ""
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+
+ event.level = WARN
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = ERROR
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = CRITICAL
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ b.Close()
+ assert.Equal(t, true, closed)
+}
+
+func TestBaseLoggerDated(t *testing.T) {
+ var written []byte
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written = p
+ closed = close
+ },
+ }
+ prefix := ""
+ b := BaseLogger{
+ out: c,
+ Level: WARN,
+ Flags: Ldate | Ltime | Lmicroseconds | Lshortfile | Llevel,
+ Prefix: prefix,
+ }
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
+
+ dateString := date.Format("2006/01/02 15:04:05.000000")
+
+ event := Event{
+ level: WARN,
+ msg: "TEST MESSAGE TEST\n",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ assert.Equal(t, WARN, b.GetLevel())
+
+ expected := fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = INFO
+ expected = ""
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = ERROR
+ expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = DEBUG
+ expected = ""
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = CRITICAL
+ expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = TRACE
+ expected = ""
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ b.Close()
+ assert.Equal(t, true, closed)
+}
+
+func TestBaseLoggerMultiLineNoFlagsRegexp(t *testing.T) {
+ var written []byte
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written = p
+ closed = close
+ },
+ }
+ prefix := ""
+ b := BaseLogger{
+ Level: DEBUG,
+ StacktraceLevel: ERROR,
+ Flags: -1,
+ Prefix: prefix,
+ Expression: "FILENAME",
+ }
+ b.createLogger(c)
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
+
+ event := Event{
+ level: DEBUG,
+ msg: "TEST\nMESSAGE\nTEST",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ assert.Equal(t, DEBUG, b.GetLevel())
+
+ expected := "TEST\n\tMESSAGE\n\tTEST\n"
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.filename = "ELSEWHERE"
+
+ b.LogEvent(&event)
+ assert.Equal(t, "", string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.caller = "FILENAME"
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event = Event{
+ level: DEBUG,
+ msg: "TEST\nFILENAME\nTEST",
+ caller: "CALLER",
+ filename: "FULL/ELSEWHERE",
+ line: 1,
+ time: date,
+ }
+ expected = "TEST\n\tFILENAME\n\tTEST\n"
+ b.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+}
+
+func TestBrokenRegexp(t *testing.T) {
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ closed = close
+ },
+ }
+
+ b := BaseLogger{
+ Level: DEBUG,
+ StacktraceLevel: ERROR,
+ Flags: -1,
+ Prefix: prefix,
+ Expression: "\\",
+ }
+ b.createLogger(c)
+ assert.Empty(t, b.regexp)
+ b.Close()
+ assert.Equal(t, true, closed)
+}
diff --git a/modules/log/colors.go b/modules/log/colors.go
new file mode 100644
index 0000000000..fa8a664f08
--- /dev/null
+++ b/modules/log/colors.go
@@ -0,0 +1,348 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const escape = "\033"
+
+// ColorAttribute defines a single SGR Code
+type ColorAttribute int
+
+// Base ColorAttributes
+const (
+ Reset ColorAttribute = iota
+ Bold
+ Faint
+ Italic
+ Underline
+ BlinkSlow
+ BlinkRapid
+ ReverseVideo
+ Concealed
+ CrossedOut
+)
+
+// Foreground text colors
+const (
+ FgBlack ColorAttribute = iota + 30
+ FgRed
+ FgGreen
+ FgYellow
+ FgBlue
+ FgMagenta
+ FgCyan
+ FgWhite
+)
+
+// Foreground Hi-Intensity text colors
+const (
+ FgHiBlack ColorAttribute = iota + 90
+ FgHiRed
+ FgHiGreen
+ FgHiYellow
+ FgHiBlue
+ FgHiMagenta
+ FgHiCyan
+ FgHiWhite
+)
+
+// Background text colors
+const (
+ BgBlack ColorAttribute = iota + 40
+ BgRed
+ BgGreen
+ BgYellow
+ BgBlue
+ BgMagenta
+ BgCyan
+ BgWhite
+)
+
+// Background Hi-Intensity text colors
+const (
+ BgHiBlack ColorAttribute = iota + 100
+ BgHiRed
+ BgHiGreen
+ BgHiYellow
+ BgHiBlue
+ BgHiMagenta
+ BgHiCyan
+ BgHiWhite
+)
+
+var colorAttributeToString = map[ColorAttribute]string{
+ Reset: "Reset",
+ Bold: "Bold",
+ Faint: "Faint",
+ Italic: "Italic",
+ Underline: "Underline",
+ BlinkSlow: "BlinkSlow",
+ BlinkRapid: "BlinkRapid",
+ ReverseVideo: "ReverseVideo",
+ Concealed: "Concealed",
+ CrossedOut: "CrossedOut",
+ FgBlack: "FgBlack",
+ FgRed: "FgRed",
+ FgGreen: "FgGreen",
+ FgYellow: "FgYellow",
+ FgBlue: "FgBlue",
+ FgMagenta: "FgMagenta",
+ FgCyan: "FgCyan",
+ FgWhite: "FgWhite",
+ FgHiBlack: "FgHiBlack",
+ FgHiRed: "FgHiRed",
+ FgHiGreen: "FgHiGreen",
+ FgHiYellow: "FgHiYellow",
+ FgHiBlue: "FgHiBlue",
+ FgHiMagenta: "FgHiMagenta",
+ FgHiCyan: "FgHiCyan",
+ FgHiWhite: "FgHiWhite",
+ BgBlack: "BgBlack",
+ BgRed: "BgRed",
+ BgGreen: "BgGreen",
+ BgYellow: "BgYellow",
+ BgBlue: "BgBlue",
+ BgMagenta: "BgMagenta",
+ BgCyan: "BgCyan",
+ BgWhite: "BgWhite",
+ BgHiBlack: "BgHiBlack",
+ BgHiRed: "BgHiRed",
+ BgHiGreen: "BgHiGreen",
+ BgHiYellow: "BgHiYellow",
+ BgHiBlue: "BgHiBlue",
+ BgHiMagenta: "BgHiMagenta",
+ BgHiCyan: "BgHiCyan",
+ BgHiWhite: "BgHiWhite",
+}
+
+func (c *ColorAttribute) String() string {
+ return colorAttributeToString[*c]
+}
+
+var colorAttributeFromString = map[string]ColorAttribute{}
+
+// ColorAttributeFromString will return a ColorAttribute given a string
+func ColorAttributeFromString(from string) ColorAttribute {
+ lowerFrom := strings.TrimSpace(strings.ToLower(from))
+ return colorAttributeFromString[lowerFrom]
+}
+
+// ColorString converts a list of ColorAttributes to a color string
+func ColorString(attrs ...ColorAttribute) string {
+ return string(ColorBytes(attrs...))
+}
+
+// ColorBytes converts a list of ColorAttributes to a byte array
+func ColorBytes(attrs ...ColorAttribute) []byte {
+ bytes := make([]byte, 0, 20)
+ bytes = append(bytes, escape[0], '[')
+ if len(attrs) > 0 {
+ bytes = append(bytes, strconv.Itoa(int(attrs[0]))...)
+ for _, a := range attrs[1:] {
+ bytes = append(bytes, ';')
+ bytes = append(bytes, strconv.Itoa(int(a))...)
+ }
+ } else {
+ bytes = append(bytes, strconv.Itoa(int(Bold))...)
+ }
+ bytes = append(bytes, 'm')
+ return bytes
+}
+
+var levelToColor = map[Level]string{
+ TRACE: ColorString(Bold, FgCyan),
+ DEBUG: ColorString(Bold, FgBlue),
+ INFO: ColorString(Bold, FgGreen),
+ WARN: ColorString(Bold, FgYellow),
+ ERROR: ColorString(Bold, FgRed),
+ CRITICAL: ColorString(Bold, BgMagenta),
+ FATAL: ColorString(Bold, BgRed),
+ NONE: ColorString(Reset),
+}
+
+var resetBytes = ColorBytes(Reset)
+var fgCyanBytes = ColorBytes(FgCyan)
+var fgGreenBytes = ColorBytes(FgGreen)
+var fgBoldBytes = ColorBytes(Bold)
+
+type protectedANSIWriterMode int
+
+const (
+ escapeAll protectedANSIWriterMode = iota
+ allowColor
+ removeColor
+)
+
+type protectedANSIWriter struct {
+ w io.Writer
+ mode protectedANSIWriterMode
+}
+
+// Write will protect against unusual characters
+func (c *protectedANSIWriter) Write(bytes []byte) (int, error) {
+ end := len(bytes)
+ totalWritten := 0
+normalLoop:
+ for i := 0; i < end; {
+ lasti := i
+
+ if c.mode == escapeAll {
+ for i < end && (bytes[i] >= ' ' || bytes[i] == '\n') {
+ i++
+ }
+ } else {
+ for i < end && bytes[i] >= ' ' {
+ i++
+ }
+ }
+
+ if i > lasti {
+ written, err := c.w.Write(bytes[lasti:i])
+ totalWritten = totalWritten + written
+ if err != nil {
+ return totalWritten, err
+ }
+
+ }
+ if i >= end {
+ break
+ }
+
+ // If we're not just escaping all we should prefix all newlines with a \t
+ if c.mode != escapeAll {
+ if bytes[i] == '\n' {
+ written, err := c.w.Write([]byte{'\n', '\t'})
+ if written > 0 {
+ totalWritten++
+ }
+ if err != nil {
+ return totalWritten, err
+ }
+ i++
+ continue normalLoop
+ }
+
+ if bytes[i] == escape[0] && i+1 < end && bytes[i+1] == '[' {
+ for j := i + 2; j < end; j++ {
+ if bytes[j] >= '0' && bytes[j] <= '9' {
+ continue
+ }
+ if bytes[j] == ';' {
+ continue
+ }
+ if bytes[j] == 'm' {
+ if c.mode == allowColor {
+ written, err := c.w.Write(bytes[i : j+1])
+ totalWritten = totalWritten + written
+ if err != nil {
+ return totalWritten, err
+ }
+ } else {
+ totalWritten = j
+ }
+ i = j + 1
+ continue normalLoop
+ }
+ break
+ }
+ }
+ }
+
+ // Process naughty character
+ if _, err := fmt.Fprintf(c.w, `\%#o03d`, bytes[i]); err != nil {
+ return totalWritten, err
+ }
+ i++
+ totalWritten++
+ }
+ return totalWritten, nil
+}
+
+// ColoredValue will Color the provided value
+type ColoredValue struct {
+ ColorBytes *[]byte
+ ResetBytes *[]byte
+ Value *interface{}
+}
+
+// NewColoredValue is a helper function to create a ColoredValue from a Value
+// If no color is provided it defaults to Bold with standard Reset
+// If a ColoredValue is provided it is not changed
+func NewColoredValue(value interface{}, color ...ColorAttribute) *ColoredValue {
+ return NewColoredValuePointer(&value, color...)
+}
+
+// NewColoredValuePointer is a helper function to create a ColoredValue from a Value Pointer
+// If no color is provided it defaults to Bold with standard Reset
+// If a ColoredValue is provided it is not changed
+func NewColoredValuePointer(value *interface{}, color ...ColorAttribute) *ColoredValue {
+ if val, ok := (*value).(*ColoredValue); ok {
+ return val
+ }
+ if len(color) > 0 {
+ bytes := ColorBytes(color...)
+ return &ColoredValue{
+ ColorBytes: &bytes,
+ ResetBytes: &resetBytes,
+ Value: value,
+ }
+ }
+ return &ColoredValue{
+ ColorBytes: &fgBoldBytes,
+ ResetBytes: &resetBytes,
+ Value: value,
+ }
+
+}
+
+// NewColoredValueBytes creates a value from the provided value with color bytes
+// If a ColoredValue is provided it is not changed
+func NewColoredValueBytes(value interface{}, colorBytes *[]byte) *ColoredValue {
+ if val, ok := value.(*ColoredValue); ok {
+ return val
+ }
+ return &ColoredValue{
+ ColorBytes: colorBytes,
+ ResetBytes: &resetBytes,
+ Value: &value,
+ }
+}
+
+// Format will format the provided value and protect against ANSI spoofing within the value
+func (cv *ColoredValue) Format(s fmt.State, c rune) {
+ s.Write([]byte(*cv.ColorBytes))
+ fmt.Fprintf(&protectedANSIWriter{w: s}, fmtString(s, c), *(cv.Value))
+ s.Write([]byte(*cv.ResetBytes))
+}
+
+func fmtString(s fmt.State, c rune) string {
+ var width, precision string
+ base := make([]byte, 0, 8)
+ base = append(base, '%')
+ for _, c := range []byte(" +-#0") {
+ if s.Flag(int(c)) {
+ base = append(base, c)
+ }
+ }
+ if w, ok := s.Width(); ok {
+ width = strconv.Itoa(w)
+ }
+ if p, ok := s.Precision(); ok {
+ precision = "." + strconv.Itoa(p)
+ }
+ return fmt.Sprintf("%s%s%s%c", base, width, precision, c)
+}
+
+func init() {
+ for attr, from := range colorAttributeToString {
+ colorAttributeFromString[strings.ToLower(from)] = attr
+ }
+}
diff --git a/modules/log/conn.go b/modules/log/conn.go
index edb149dfa5..d48bb4b334 100644
--- a/modules/log/conn.go
+++ b/modules/log/conn.go
@@ -1,4 +1,5 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@@ -7,73 +8,60 @@ package log
import (
"encoding/json"
"io"
- "log"
"net"
)
-// ConnWriter implements LoggerInterface.
-// it writes messages in keep-live tcp connection.
-type ConnWriter struct {
- lg *log.Logger
+type connWriter struct {
innerWriter io.WriteCloser
ReconnectOnMsg bool `json:"reconnectOnMsg"`
Reconnect bool `json:"reconnect"`
Net string `json:"net"`
Addr string `json:"addr"`
- Level int `json:"level"`
-}
-
-// NewConn creates new ConnWrite returning as LoggerInterface.
-func NewConn() LoggerInterface {
- conn := new(ConnWriter)
- conn.Level = TRACE
- return conn
}
-// Init inits connection writer with json config.
-// json config only need key "level".
-func (cw *ConnWriter) Init(jsonconfig string) error {
- return json.Unmarshal([]byte(jsonconfig), cw)
+// Close the inner writer
+func (i *connWriter) Close() error {
+ if i.innerWriter != nil {
+ return i.innerWriter.Close()
+ }
+ return nil
}
-// WriteMsg writes message in connection.
-// if connection is down, try to re-connect.
-func (cw *ConnWriter) WriteMsg(msg string, skip, level int) error {
- if cw.Level > level {
- return nil
- }
- if cw.neededConnectOnMsg() {
- if err := cw.connect(); err != nil {
- return err
+// Write the data to the connection
+func (i *connWriter) Write(p []byte) (int, error) {
+ if i.neededConnectOnMsg() {
+ if err := i.connect(); err != nil {
+ return 0, err
}
}
- if cw.ReconnectOnMsg {
- defer cw.innerWriter.Close()
+ if i.ReconnectOnMsg {
+ defer i.innerWriter.Close()
}
- cw.lg.Println(msg)
- return nil
-}
-// Flush no things for this implementation
-func (cw *ConnWriter) Flush() {
+ return i.innerWriter.Write(p)
}
-// Destroy destroy connection writer and close tcp listener.
-func (cw *ConnWriter) Destroy() {
- if cw.innerWriter == nil {
- return
+func (i *connWriter) neededConnectOnMsg() bool {
+ if i.Reconnect {
+ i.Reconnect = false
+ return true
}
- cw.innerWriter.Close()
+
+ if i.innerWriter == nil {
+ return true
+ }
+
+ return i.ReconnectOnMsg
}
-func (cw *ConnWriter) connect() error {
- if cw.innerWriter != nil {
- cw.innerWriter.Close()
- cw.innerWriter = nil
+func (i *connWriter) connect() error {
+ if i.innerWriter != nil {
+ i.innerWriter.Close()
+ i.innerWriter = nil
}
- conn, err := net.Dial(cw.Net, cw.Addr)
+ conn, err := net.Dial(i.Net, i.Addr)
if err != nil {
return err
}
@@ -82,22 +70,50 @@ func (cw *ConnWriter) connect() error {
tcpConn.SetKeepAlive(true)
}
- cw.innerWriter = conn
- cw.lg = log.New(conn, "", log.Ldate|log.Ltime)
+ i.innerWriter = conn
return nil
}
-func (cw *ConnWriter) neededConnectOnMsg() bool {
- if cw.Reconnect {
- cw.Reconnect = false
- return true
- }
+// ConnLogger implements LoggerProvider.
+// it writes messages in keep-live tcp connection.
+type ConnLogger struct {
+ BaseLogger
+ ReconnectOnMsg bool `json:"reconnectOnMsg"`
+ Reconnect bool `json:"reconnect"`
+ Net string `json:"net"`
+ Addr string `json:"addr"`
+}
- if cw.innerWriter == nil {
- return true
+// NewConn creates new ConnLogger returning as LoggerProvider.
+func NewConn() LoggerProvider {
+ conn := new(ConnLogger)
+ conn.Level = TRACE
+ return conn
+}
+
+// Init inits connection writer with json config.
+// json config only need key "level".
+func (log *ConnLogger) Init(jsonconfig string) error {
+ err := json.Unmarshal([]byte(jsonconfig), log)
+ if err != nil {
+ return err
}
+ log.createLogger(&connWriter{
+ ReconnectOnMsg: log.ReconnectOnMsg,
+ Reconnect: log.Reconnect,
+ Net: log.Net,
+ Addr: log.Addr,
+ }, log.Level)
+ return nil
+}
+
+// Flush does nothing for this implementation
+func (log *ConnLogger) Flush() {
+}
- return cw.ReconnectOnMsg
+// GetName returns the default name for this implementation
+func (log *ConnLogger) GetName() string {
+ return "conn"
}
func init() {
diff --git a/modules/log/conn_test.go b/modules/log/conn_test.go
new file mode 100644
index 0000000000..380a115d96
--- /dev/null
+++ b/modules/log/conn_test.go
@@ -0,0 +1,240 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func listenReadAndClose(t *testing.T, l net.Listener, expected string) {
+ conn, err := l.Accept()
+ assert.NoError(t, err)
+ defer conn.Close()
+ written, err := ioutil.ReadAll(conn)
+
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(written))
+ return
+}
+
+func TestConnLogger(t *testing.T) {
+ var written []byte
+
+ protocol := "tcp"
+ address := ":3099"
+
+ l, err := net.Listen(protocol, address)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+
+ logger := NewConn()
+ connLogger := logger.(*ConnLogger)
+
+ logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, true, true, protocol, address))
+
+ assert.Equal(t, flags, connLogger.Flags)
+ assert.Equal(t, level, connLogger.Level)
+ assert.Equal(t, level, logger.GetLevel())
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ listenReadAndClose(t, l, expected)
+ }()
+ go func() {
+ defer wg.Done()
+ err := logger.LogEvent(&event)
+ assert.NoError(t, err)
+ }()
+ wg.Wait()
+
+ written = written[:0]
+
+ event.level = WARN
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ listenReadAndClose(t, l, expected)
+ }()
+ go func() {
+ defer wg.Done()
+ err := logger.LogEvent(&event)
+ assert.NoError(t, err)
+ }()
+ wg.Wait()
+
+ logger.Close()
+}
+
+func TestConnLoggerBadConfig(t *testing.T) {
+ logger := NewConn()
+
+ err := logger.Init("{")
+ assert.Equal(t, "unexpected end of JSON input", err.Error())
+ logger.Close()
+}
+
+func TestConnLoggerCloseBeforeSend(t *testing.T) {
+ protocol := "tcp"
+ address := ":3099"
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+
+ logger := NewConn()
+
+ logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
+ logger.Close()
+}
+
+func TestConnLoggerFailConnect(t *testing.T) {
+ protocol := "tcp"
+ address := ":3099"
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+
+ logger := NewConn()
+
+ logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
+
+ assert.Equal(t, level, logger.GetLevel())
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ //dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ err := logger.LogEvent(&event)
+ assert.Error(t, err)
+
+ logger.Close()
+}
+
+func TestConnLoggerClose(t *testing.T) {
+ var written []byte
+
+ protocol := "tcp"
+ address := ":3099"
+
+ l, err := net.Listen(protocol, address)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+
+ logger := NewConn()
+ connLogger := logger.(*ConnLogger)
+
+ logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
+
+ assert.Equal(t, flags, connLogger.Flags)
+ assert.Equal(t, level, connLogger.Level)
+ assert.Equal(t, level, logger.GetLevel())
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ err := logger.LogEvent(&event)
+ assert.NoError(t, err)
+ logger.Close()
+ }()
+ go func() {
+ defer wg.Done()
+ listenReadAndClose(t, l, expected)
+ }()
+ wg.Wait()
+
+ logger = NewConn()
+ connLogger = logger.(*ConnLogger)
+
+ logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, true, protocol, address))
+
+ assert.Equal(t, flags, connLogger.Flags)
+ assert.Equal(t, level, connLogger.Level)
+ assert.Equal(t, level, logger.GetLevel())
+
+ written = written[:0]
+
+ event.level = WARN
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ listenReadAndClose(t, l, expected)
+ }()
+ go func() {
+ defer wg.Done()
+ err := logger.LogEvent(&event)
+ assert.NoError(t, err)
+ logger.Close()
+
+ }()
+ wg.Wait()
+ logger.Flush()
+ logger.Close()
+}
diff --git a/modules/log/console.go b/modules/log/console.go
index 04991243fc..cf2dfa499f 100644
--- a/modules/log/console.go
+++ b/modules/log/console.go
@@ -1,4 +1,5 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@@ -6,75 +7,72 @@ package log
import (
"encoding/json"
- "log"
+ "io"
"os"
- "runtime"
)
-// Brush brush type
-type Brush func(string) string
+// CanColorStdout reports if we can color the Stdout
+// Although we could do terminal sniffing and the like - in reality
+// most tools on *nix are happy to display ansi colors.
+// We will terminal sniff on Windows in console_windows.go
+var CanColorStdout = true
-// NewBrush create a brush according color
-func NewBrush(color string) Brush {
- pre := "\033["
- reset := "\033[0m"
- return func(text string) string {
- return pre + color + "m" + text + reset
- }
+// CanColorStderr reports if we can color the Stderr
+var CanColorStderr = true
+
+type nopWriteCloser struct {
+ w io.WriteCloser
}
-var colors = []Brush{
- NewBrush("1;36"), // Trace cyan
- NewBrush("1;34"), // Debug blue
- NewBrush("1;32"), // Info green
- NewBrush("1;33"), // Warn yellow
- NewBrush("1;31"), // Error red
- NewBrush("1;35"), // Critical purple
- NewBrush("1;31"), // Fatal red
+func (n *nopWriteCloser) Write(p []byte) (int, error) {
+ return n.w.Write(p)
}
-// ConsoleWriter implements LoggerInterface and writes messages to terminal.
-type ConsoleWriter struct {
- lg *log.Logger
- Level int `json:"level"`
+func (n *nopWriteCloser) Close() error {
+ return nil
}
-// NewConsole create ConsoleWriter returning as LoggerInterface.
-func NewConsole() LoggerInterface {
- return &ConsoleWriter{
- lg: log.New(os.Stdout, "", log.Ldate|log.Ltime),
- Level: TRACE,
- }
+// ConsoleLogger implements LoggerProvider and writes messages to terminal.
+type ConsoleLogger struct {
+ BaseLogger
+ Stderr bool `json:"stderr"`
}
-// Init inits connection writer with json config.
-// json config only need key "level".
-func (cw *ConsoleWriter) Init(config string) error {
- return json.Unmarshal([]byte(config), cw)
+// NewConsoleLogger create ConsoleLogger returning as LoggerProvider.
+func NewConsoleLogger() LoggerProvider {
+ log := &ConsoleLogger{}
+ log.createLogger(&nopWriteCloser{
+ w: os.Stdout,
+ })
+ return log
}
-// WriteMsg writes message in console.
-// if OS is windows, ignore colors.
-func (cw *ConsoleWriter) WriteMsg(msg string, skip, level int) error {
- if cw.Level > level {
- return nil
+// Init inits connection writer with json config.
+// json config only need key "level".
+func (log *ConsoleLogger) Init(config string) error {
+ err := json.Unmarshal([]byte(config), log)
+ if err != nil {
+ return err
}
- if runtime.GOOS == "windows" {
- cw.lg.Println(msg)
+ if log.Stderr {
+ log.createLogger(&nopWriteCloser{
+ w: os.Stderr,
+ })
} else {
- cw.lg.Println(colors[level](msg))
+ log.createLogger(log.out)
}
return nil
}
// Flush when log should be flushed
-func (cw *ConsoleWriter) Flush() {
+func (log *ConsoleLogger) Flush() {
}
-// Destroy when writer is destroy
-func (cw *ConsoleWriter) Destroy() {
+// GetName returns the default name for this implementation
+func (log *ConsoleLogger) GetName() string {
+ return "console"
}
func init() {
- Register("console", NewConsole)
+ Register("console", NewConsoleLogger)
}
diff --git a/modules/log/console_test.go b/modules/log/console_test.go
new file mode 100644
index 0000000000..a028b5b875
--- /dev/null
+++ b/modules/log/console_test.go
@@ -0,0 +1,137 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConsoleLoggerBadConfig(t *testing.T) {
+ logger := NewConsoleLogger()
+
+ err := logger.Init("{")
+ assert.Equal(t, "unexpected end of JSON input", err.Error())
+ logger.Close()
+}
+
+func TestConsoleLoggerMinimalConfig(t *testing.T) {
+ for _, level := range Levels() {
+ var written []byte
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written = p
+ closed = close
+ },
+ }
+ prefix := ""
+ flags := LstdFlags
+
+ cw := NewConsoleLogger()
+ realCW := cw.(*ConsoleLogger)
+ cw.Init(fmt.Sprintf("{\"level\":\"%s\"}", level))
+ nwc := realCW.out.(*nopWriteCloser)
+ nwc.w = c
+
+ assert.Equal(t, flags, realCW.Flags)
+ assert.Equal(t, FromString(level), realCW.Level)
+ assert.Equal(t, FromString(level), cw.GetLevel())
+ assert.Equal(t, prefix, realCW.Prefix)
+ assert.Equal(t, "", string(written))
+ cw.Close()
+ assert.Equal(t, false, closed)
+
+ }
+}
+
+func TestConsoleLogger(t *testing.T) {
+ var written []byte
+ var closed bool
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written = p
+ closed = close
+ },
+ }
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+
+ cw := NewConsoleLogger()
+ realCW := cw.(*ConsoleLogger)
+ realCW.Colorize = false
+ nwc := realCW.out.(*nopWriteCloser)
+ nwc.w = c
+
+ cw.Init(fmt.Sprintf("{\"expression\":\"FILENAME\",\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d}", prefix, level.String(), flags))
+
+ assert.Equal(t, flags, realCW.Flags)
+ assert.Equal(t, level, realCW.Level)
+ assert.Equal(t, level, cw.GetLevel())
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ cw.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ event.level = DEBUG
+ expected = ""
+ cw.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+
+ event.level = TRACE
+ expected = ""
+ cw.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+
+ nonMatchEvent := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FI_LENAME",
+ line: 1,
+ time: date,
+ }
+ event.level = INFO
+ expected = ""
+ cw.LogEvent(&nonMatchEvent)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+
+ event.level = WARN
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ cw.LogEvent(&event)
+ assert.Equal(t, expected, string(written))
+ assert.Equal(t, false, closed)
+ written = written[:0]
+
+ cw.Close()
+ assert.Equal(t, false, closed)
+}
diff --git a/modules/log/console_windows.go b/modules/log/console_windows.go
new file mode 100644
index 0000000000..61469aacf8
--- /dev/null
+++ b/modules/log/console_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "os"
+
+ "github.com/mattn/go-isatty"
+ "golang.org/x/sys/windows"
+)
+
+func enableVTMode(console windows.Handle) bool {
+ mode := uint32(0)
+ err := windows.GetConsoleMode(console, &mode)
+ if err != nil {
+ return false
+ }
+
+ // EnableVirtualTerminalProcessing is the console mode to allow ANSI code
+ // interpretation on the console. See:
+ // https://docs.microsoft.com/en-us/windows/console/setconsolemode
+ // It only works on windows 10. Earlier terminals will fail with an err which we will
+ // handle to say don't color
+ mode = mode | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ err = windows.SetConsoleMode(console, mode)
+ return err == nil
+}
+
+func init() {
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ CanColorStdout = enableVTMode(windows.Stdout)
+ } else {
+ CanColorStdout = isatty.IsCygwinTerminal(os.Stderr.Fd())
+ }
+
+ if isatty.IsTerminal(os.Stderr.Fd()) {
+ CanColorStderr = enableVTMode(windows.Stderr)
+ } else {
+ CanColorStderr = isatty.IsCygwinTerminal(os.Stderr.Fd())
+ }
+}
diff --git a/modules/log/errors.go b/modules/log/errors.go
new file mode 100644
index 0000000000..1fe54d4265
--- /dev/null
+++ b/modules/log/errors.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import "fmt"
+
+// ErrTimeout represents a "Timeout" kind of error.
+type ErrTimeout struct {
+ Name string
+ Provider string
+}
+
+// IsErrTimeout checks if an error is a ErrTimeout.
+func IsErrTimeout(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(ErrTimeout)
+ return ok
+}
+
+func (err ErrTimeout) Error() string {
+ return fmt.Sprintf("Log Timeout for %s (%s)", err.Name, err.Provider)
+}
+
+// ErrUnknownProvider represents a "Unknown Provider" kind of error.
+type ErrUnknownProvider struct {
+ Provider string
+}
+
+// IsErrUnknownProvider checks if an error is a ErrUnknownProvider.
+func IsErrUnknownProvider(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(ErrUnknownProvider)
+ return ok
+}
+
+func (err ErrUnknownProvider) Error() string {
+ return fmt.Sprintf("Unknown Log Provider \"%s\" (Was it registered?)", err.Provider)
+}
+
+// ErrDuplicateName represents a Duplicate Name error
+type ErrDuplicateName struct {
+ Name string
+}
+
+// IsErrDuplicateName checks if an error is a ErrDuplicateName.
+func IsErrDuplicateName(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(ErrDuplicateName)
+ return ok
+}
+
+func (err ErrDuplicateName) Error() string {
+ return fmt.Sprintf("Duplicate named logger: %s", err.Name)
+}
diff --git a/modules/log/event.go b/modules/log/event.go
new file mode 100644
index 0000000000..2ec1f9587d
--- /dev/null
+++ b/modules/log/event.go
@@ -0,0 +1,335 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+// Event represents a logging event
+type Event struct {
+ level Level
+ msg string
+ caller string
+ filename string
+ line int
+ time time.Time
+ stacktrace string
+}
+
+// EventLogger represents the behaviours of a logger
+type EventLogger interface {
+ LogEvent(event *Event) error
+ Close()
+ Flush()
+ GetLevel() Level
+ GetStacktraceLevel() Level
+ GetName() string
+}
+
+// ChannelledLog represents a cached channel to a LoggerProvider
+type ChannelledLog struct {
+ name string
+ provider string
+ queue chan *Event
+ loggerProvider LoggerProvider
+ flush chan bool
+ close chan bool
+ closed chan bool
+}
+
+// NewChannelledLog a new logger instance with given logger provider and config.
+func NewChannelledLog(name, provider, config string, bufferLength int64) (*ChannelledLog, error) {
+ if log, ok := providers[provider]; ok {
+ l := &ChannelledLog{
+ queue: make(chan *Event, bufferLength),
+ flush: make(chan bool),
+ close: make(chan bool),
+ closed: make(chan bool),
+ }
+ l.loggerProvider = log()
+ if err := l.loggerProvider.Init(config); err != nil {
+ return nil, err
+ }
+ l.name = name
+ l.provider = provider
+ go l.Start()
+ return l, nil
+ }
+ return nil, ErrUnknownProvider{provider}
+}
+
+// Start processing the ChannelledLog
+func (l *ChannelledLog) Start() {
+ for {
+ select {
+ case event, ok := <-l.queue:
+ if !ok {
+ l.closeLogger()
+ return
+ }
+ l.loggerProvider.LogEvent(event)
+ case _, ok := <-l.flush:
+ if !ok {
+ l.closeLogger()
+ return
+ }
+ l.loggerProvider.Flush()
+ case _, _ = <-l.close:
+ l.closeLogger()
+ return
+ }
+ }
+}
+
+// LogEvent logs an event to this ChannelledLog
+func (l *ChannelledLog) LogEvent(event *Event) error {
+ select {
+ case l.queue <- event:
+ return nil
+ case <-time.After(60 * time.Second):
+ // We're blocked!
+ return ErrTimeout{
+ Name: l.name,
+ Provider: l.provider,
+ }
+ }
+}
+
+func (l *ChannelledLog) closeLogger() {
+ l.loggerProvider.Flush()
+ l.loggerProvider.Close()
+ l.closed <- true
+ return
+}
+
+// Close this ChannelledLog
+func (l *ChannelledLog) Close() {
+ l.close <- true
+ <-l.closed
+}
+
+// Flush this ChannelledLog
+func (l *ChannelledLog) Flush() {
+ l.flush <- true
+}
+
+// GetLevel gets the level of this ChannelledLog
+func (l *ChannelledLog) GetLevel() Level {
+ return l.loggerProvider.GetLevel()
+}
+
+// GetStacktraceLevel gets the level of this ChannelledLog
+func (l *ChannelledLog) GetStacktraceLevel() Level {
+ return l.loggerProvider.GetStacktraceLevel()
+}
+
+// GetName returns the name of this ChannelledLog
+func (l *ChannelledLog) GetName() string {
+ return l.name
+}
+
+// MultiChannelledLog represents a cached channel to a LoggerProvider
+type MultiChannelledLog struct {
+ name string
+ bufferLength int64
+ queue chan *Event
+ mutex sync.Mutex
+ loggers map[string]EventLogger
+ flush chan bool
+ close chan bool
+ started bool
+ level Level
+ stacktraceLevel Level
+ closed chan bool
+}
+
+// NewMultiChannelledLog a new logger instance with given logger provider and config.
+func NewMultiChannelledLog(name string, bufferLength int64) *MultiChannelledLog {
+ m := &MultiChannelledLog{
+ name: name,
+ queue: make(chan *Event, bufferLength),
+ flush: make(chan bool),
+ bufferLength: bufferLength,
+ loggers: make(map[string]EventLogger),
+ level: NONE,
+ stacktraceLevel: NONE,
+ close: make(chan bool),
+ closed: make(chan bool),
+ }
+ return m
+}
+
+// AddLogger adds a logger to this MultiChannelledLog
+func (m *MultiChannelledLog) AddLogger(logger EventLogger) error {
+ m.mutex.Lock()
+ name := logger.GetName()
+ if _, has := m.loggers[name]; has {
+ m.mutex.Unlock()
+ return ErrDuplicateName{name}
+ }
+ m.loggers[name] = logger
+ if logger.GetLevel() < m.level {
+ m.level = logger.GetLevel()
+ }
+ if logger.GetStacktraceLevel() < m.stacktraceLevel {
+ m.stacktraceLevel = logger.GetStacktraceLevel()
+ }
+ m.mutex.Unlock()
+ go m.Start()
+ return nil
+}
+
+// DelLogger removes a sub logger from this MultiChannelledLog
+// NB: If you delete the last sublogger this logger will simply drop
+// log events
+func (m *MultiChannelledLog) DelLogger(name string) bool {
+ m.mutex.Lock()
+ logger, has := m.loggers[name]
+ if !has {
+ m.mutex.Unlock()
+ return false
+ }
+ delete(m.loggers, name)
+ m.internalResetLevel()
+ m.mutex.Unlock()
+ logger.Flush()
+ logger.Close()
+ return true
+}
+
+// GetEventLogger returns a sub logger from this MultiChannelledLog
+func (m *MultiChannelledLog) GetEventLogger(name string) EventLogger {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ return m.loggers[name]
+}
+
+// GetEventLoggerNames returns a list of names
+func (m *MultiChannelledLog) GetEventLoggerNames() []string {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ var keys []string
+ for k := range m.loggers {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func (m *MultiChannelledLog) closeLoggers() {
+ m.mutex.Lock()
+ for _, logger := range m.loggers {
+ logger.Flush()
+ logger.Close()
+ }
+ m.mutex.Unlock()
+ m.closed <- true
+ return
+}
+
+// Start processing the MultiChannelledLog
+func (m *MultiChannelledLog) Start() {
+ m.mutex.Lock()
+ if m.started {
+ m.mutex.Unlock()
+ return
+ }
+ m.started = true
+ m.mutex.Unlock()
+ for {
+ select {
+ case event, ok := <-m.queue:
+ if !ok {
+ m.closeLoggers()
+ return
+ }
+ m.mutex.Lock()
+ for _, logger := range m.loggers {
+ err := logger.LogEvent(event)
+ if err != nil {
+ fmt.Println(err)
+ }
+ }
+ m.mutex.Unlock()
+ case _, ok := <-m.flush:
+ if !ok {
+ m.closeLoggers()
+ return
+ }
+ m.mutex.Lock()
+ for _, logger := range m.loggers {
+ logger.Flush()
+ }
+ m.mutex.Unlock()
+ case <-m.close:
+ m.closeLoggers()
+ return
+ }
+ }
+}
+
+// LogEvent logs an event to this MultiChannelledLog
+func (m *MultiChannelledLog) LogEvent(event *Event) error {
+ select {
+ case m.queue <- event:
+ return nil
+ case <-time.After(60 * time.Second):
+ // We're blocked!
+ return ErrTimeout{
+ Name: m.name,
+ Provider: "MultiChannelledLog",
+ }
+ }
+}
+
+// Close this MultiChannelledLog
+func (m *MultiChannelledLog) Close() {
+ m.close <- true
+ <-m.closed
+}
+
+// Flush this ChannelledLog
+func (m *MultiChannelledLog) Flush() {
+ m.flush <- true
+}
+
+// GetLevel gets the level of this MultiChannelledLog
+func (m *MultiChannelledLog) GetLevel() Level {
+ return m.level
+}
+
+// GetStacktraceLevel gets the level of this MultiChannelledLog
+func (m *MultiChannelledLog) GetStacktraceLevel() Level {
+ return m.stacktraceLevel
+}
+
+func (m *MultiChannelledLog) internalResetLevel() Level {
+ m.level = NONE
+ for _, logger := range m.loggers {
+ level := logger.GetLevel()
+ if level < m.level {
+ m.level = level
+ }
+ level = logger.GetStacktraceLevel()
+ if level < m.stacktraceLevel {
+ m.stacktraceLevel = level
+ }
+ }
+ return m.level
+}
+
+// ResetLevel will reset the level of this MultiChannelledLog
+func (m *MultiChannelledLog) ResetLevel() Level {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ return m.internalResetLevel()
+}
+
+// GetName gets the name of this MultiChannelledLog
+func (m *MultiChannelledLog) GetName() string {
+ return m.name
+}
diff --git a/modules/log/file.go b/modules/log/file.go
index 38a28fc651..5c219c3910 100644
--- a/modules/log/file.go
+++ b/modules/log/file.go
@@ -5,10 +5,11 @@
package log
import (
+ "bufio"
+ "compress/gzip"
"encoding/json"
"errors"
"fmt"
- "log"
"os"
"path/filepath"
"strings"
@@ -16,10 +17,10 @@ import (
"time"
)
-// FileLogWriter implements LoggerInterface.
+// FileLogger implements LoggerProvider.
// It writes messages by lines limit, file size limit, or time frequency.
-type FileLogWriter struct {
- *log.Logger
+type FileLogger struct {
+ BaseLogger
mw *MuxWriter
// The opened file
Filename string `json:"filename"`
@@ -35,47 +36,57 @@ type FileLogWriter struct {
Rotate bool `json:"rotate"`
- startLock sync.Mutex // Only one log can write to the file
+ Compress bool `json:"compress"`
+ CompressionLevel int `json:"compressionLevel"`
- Level int `json:"level"`
+ startLock sync.Mutex // Only one log can write to the file
}
// MuxWriter an *os.File writer with locker.
type MuxWriter struct {
- sync.Mutex
- fd *os.File
+ mu sync.Mutex
+ fd *os.File
+ owner *FileLogger
}
// Write writes to os.File.
-func (l *MuxWriter) Write(b []byte) (int, error) {
- l.Lock()
- defer l.Unlock()
- return l.fd.Write(b)
+func (mw *MuxWriter) Write(b []byte) (int, error) {
+ mw.mu.Lock()
+ defer mw.mu.Unlock()
+ mw.owner.docheck(len(b))
+ return mw.fd.Write(b)
+}
+
+// Close the internal writer
+func (mw *MuxWriter) Close() error {
+ return mw.fd.Close()
}
// SetFd sets os.File in writer.
-func (l *MuxWriter) SetFd(fd *os.File) {
- if l.fd != nil {
- l.fd.Close()
+func (mw *MuxWriter) SetFd(fd *os.File) {
+ if mw.fd != nil {
+ mw.fd.Close()
}
- l.fd = fd
-}
-
-// NewFileWriter create a FileLogWriter returning as LoggerInterface.
-func NewFileWriter() LoggerInterface {
- w := &FileLogWriter{
- Filename: "",
- Maxsize: 1 << 28, //256 MB
- Daily: true,
- Maxdays: 7,
- Rotate: true,
- Level: TRACE,
+ mw.fd = fd
+}
+
+// NewFileLogger create a FileLogger returning as LoggerProvider.
+func NewFileLogger() LoggerProvider {
+ log := &FileLogger{
+ Filename: "",
+ Maxsize: 1 << 28, //256 MB
+ Daily: true,
+ Maxdays: 7,
+ Rotate: true,
+ Compress: true,
+ CompressionLevel: gzip.DefaultCompression,
}
+ log.Level = TRACE
// use MuxWriter instead direct use os.File for lock write when rotate
- w.mw = new(MuxWriter)
- // set MuxWriter as Logger's io.Writer
- w.Logger = log.New(w.mw, "", log.Ldate|log.Ltime)
- return w
+ log.mw = new(MuxWriter)
+ log.mw.owner = log
+
+ return log
}
// Init file logger with json config.
@@ -87,109 +98,131 @@ func NewFileWriter() LoggerInterface {
// "maxdays":15,
// "rotate":true
// }
-func (w *FileLogWriter) Init(config string) error {
- if err := json.Unmarshal([]byte(config), w); err != nil {
+func (log *FileLogger) Init(config string) error {
+ if err := json.Unmarshal([]byte(config), log); err != nil {
return err
}
- if len(w.Filename) == 0 {
+ if len(log.Filename) == 0 {
return errors.New("config must have filename")
}
- return w.StartLogger()
+ // set MuxWriter as Logger's io.Writer
+ log.createLogger(log.mw)
+ return log.StartLogger()
}
// StartLogger start file logger. create log file and set to locker-inside file writer.
-func (w *FileLogWriter) StartLogger() error {
- fd, err := w.createLogFile()
+func (log *FileLogger) StartLogger() error {
+ fd, err := log.createLogFile()
if err != nil {
return err
}
- w.mw.SetFd(fd)
- return w.initFd()
+ log.mw.SetFd(fd)
+ return log.initFd()
}
-func (w *FileLogWriter) docheck(size int) {
- w.startLock.Lock()
- defer w.startLock.Unlock()
- if w.Rotate && ((w.Maxsize > 0 && w.maxsizeCursize >= w.Maxsize) ||
- (w.Daily && time.Now().Day() != w.dailyOpenDate)) {
- if err := w.DoRotate(); err != nil {
- fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
+func (log *FileLogger) docheck(size int) {
+ log.startLock.Lock()
+ defer log.startLock.Unlock()
+ if log.Rotate && ((log.Maxsize > 0 && log.maxsizeCursize >= log.Maxsize) ||
+ (log.Daily && time.Now().Day() != log.dailyOpenDate)) {
+ if err := log.DoRotate(); err != nil {
+ fmt.Fprintf(os.Stderr, "FileLogger(%q): %s\n", log.Filename, err)
return
}
}
- w.maxsizeCursize += size
+ log.maxsizeCursize += size
}
-// WriteMsg writes logger message into file.
-func (w *FileLogWriter) WriteMsg(msg string, skip, level int) error {
- if level < w.Level {
- return nil
- }
- n := 24 + len(msg) // 24 stand for the length "2013/06/23 21:00:22 [T] "
- w.docheck(n)
- w.Logger.Println(msg)
- return nil
-}
-
-func (w *FileLogWriter) createLogFile() (*os.File, error) {
+func (log *FileLogger) createLogFile() (*os.File, error) {
// Open the log file
- return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
+ return os.OpenFile(log.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
}
-func (w *FileLogWriter) initFd() error {
- fd := w.mw.fd
+func (log *FileLogger) initFd() error {
+ fd := log.mw.fd
finfo, err := fd.Stat()
if err != nil {
return fmt.Errorf("get stat: %v", err)
}
- w.maxsizeCursize = int(finfo.Size())
- w.dailyOpenDate = time.Now().Day()
+ log.maxsizeCursize = int(finfo.Size())
+ log.dailyOpenDate = time.Now().Day()
return nil
}
// DoRotate means it need to write file in new file.
// new file name like xx.log.2013-01-01.2
-func (w *FileLogWriter) DoRotate() error {
- _, err := os.Lstat(w.Filename)
+func (log *FileLogger) DoRotate() error {
+ _, err := os.Lstat(log.Filename)
if err == nil { // file exists
// Find the next available number
num := 1
fname := ""
for ; err == nil && num <= 999; num++ {
- fname = w.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
+ fname = log.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
_, err = os.Lstat(fname)
+ if log.Compress && err != nil {
+ _, err = os.Lstat(fname + ".gz")
+ }
}
// return error if the last file checked still existed
if err == nil {
- return fmt.Errorf("rotate: cannot find free log number to rename %s", w.Filename)
+ return fmt.Errorf("rotate: cannot find free log number to rename %s", log.Filename)
}
- // block Logger's io.Writer
- w.mw.Lock()
- defer w.mw.Unlock()
-
- fd := w.mw.fd
+ fd := log.mw.fd
fd.Close()
// close fd before rename
// Rename the file to its newfound home
- if err = os.Rename(w.Filename, fname); err != nil {
+ if err = os.Rename(log.Filename, fname); err != nil {
return fmt.Errorf("Rotate: %v", err)
}
+ if log.Compress {
+ go compressOldLogFile(fname, log.CompressionLevel)
+ }
+
// re-start logger
- if err = w.StartLogger(); err != nil {
+ if err = log.StartLogger(); err != nil {
return fmt.Errorf("Rotate StartLogger: %v", err)
}
- go w.deleteOldLog()
+ go log.deleteOldLog()
}
return nil
}
-func (w *FileLogWriter) deleteOldLog() {
- dir := filepath.Dir(w.Filename)
+func compressOldLogFile(fname string, compressionLevel int) error {
+ reader, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ defer reader.Close()
+ buffer := bufio.NewReader(reader)
+ fw, err := os.OpenFile(fname+".gz", os.O_WRONLY|os.O_CREATE, 0660)
+ if err != nil {
+ return err
+ }
+ defer fw.Close()
+ zw, err := gzip.NewWriterLevel(fw, compressionLevel)
+ if err != nil {
+ return err
+ }
+ defer zw.Close()
+ _, err = buffer.WriteTo(zw)
+ if err != nil {
+ zw.Close()
+ fw.Close()
+ os.Remove(fname + ".gz")
+ return err
+ }
+ reader.Close()
+ return os.Remove(fname)
+}
+
+func (log *FileLogger) deleteOldLog() {
+ dir := filepath.Dir(log.Filename)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
defer func() {
if r := recover(); r != nil {
@@ -197,8 +230,8 @@ func (w *FileLogWriter) deleteOldLog() {
}
}()
- if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*w.Maxdays) {
- if strings.HasPrefix(filepath.Base(path), filepath.Base(w.Filename)) {
+ if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*log.Maxdays) {
+ if strings.HasPrefix(filepath.Base(path), filepath.Base(log.Filename)) {
if err := os.Remove(path); err != nil {
returnErr = fmt.Errorf("Failed to remove %s: %v", path, err)
@@ -209,18 +242,18 @@ func (w *FileLogWriter) deleteOldLog() {
})
}
-// Destroy destroy file logger, close file writer.
-func (w *FileLogWriter) Destroy() {
- w.mw.fd.Close()
-}
-
// Flush flush file logger.
// there are no buffering messages in file logger in memory.
// flush file means sync file from disk.
-func (w *FileLogWriter) Flush() {
- w.mw.fd.Sync()
+func (log *FileLogger) Flush() {
+ log.mw.fd.Sync()
+}
+
+// GetName returns the default name for this implementation
+func (log *FileLogger) GetName() string {
+ return "file"
}
func init() {
- Register("file", NewFileWriter)
+ Register("file", NewFileLogger)
}
diff --git a/modules/log/file_test.go b/modules/log/file_test.go
new file mode 100644
index 0000000000..9527069079
--- /dev/null
+++ b/modules/log/file_test.go
@@ -0,0 +1,247 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFileLoggerFails(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "TestFileLogger")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+ //filename := filepath.Join(tmpDir, "test.log")
+
+ fileLogger := NewFileLogger()
+ //realFileLogger, ok := fileLogger.(*FileLogger)
+ //assert.Equal(t, true, ok)
+
+ // Fail if there is bad json
+ err = fileLogger.Init("{")
+ assert.Error(t, err)
+
+ // Fail if there is no filename
+ err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, ""))
+ assert.Error(t, err)
+
+ // Fail if the file isn't a filename
+ err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, filepath.ToSlash(tmpDir)))
+ assert.Error(t, err)
+
+}
+
+func TestFileLogger(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "TestFileLogger")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+ filename := filepath.Join(tmpDir, "test.log")
+
+ fileLogger := NewFileLogger()
+ realFileLogger, ok := fileLogger.(*FileLogger)
+ assert.Equal(t, true, ok)
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+
+ fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":false}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
+
+ assert.Equal(t, flags, realFileLogger.Flags)
+ assert.Equal(t, level, realFileLogger.Level)
+ assert.Equal(t, level, fileLogger.GetLevel())
+
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err := ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ event.level = DEBUG
+ expected = expected + ""
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ event.level = TRACE
+ expected = expected + ""
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ event.level = WARN
+ expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ // Should rotate
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1))
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ assert.Equal(t, expected, string(logData))
+
+ for num := 2; num <= 999; num++ {
+ file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0666)
+ assert.NoError(t, err)
+ file.Close()
+ }
+ err = realFileLogger.DoRotate()
+ assert.Error(t, err)
+
+ expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ // Should fail to rotate
+ expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ fileLogger.Close()
+}
+
+func TestCompressFileLogger(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "TestFileLogger")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+ filename := filepath.Join(tmpDir, "test.log")
+
+ fileLogger := NewFileLogger()
+ realFileLogger, ok := fileLogger.(*FileLogger)
+ assert.Equal(t, true, ok)
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+
+ fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":true}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
+
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err := ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ event.level = WARN
+ expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+ logData, err = ioutil.ReadFile(filename)
+ assert.NoError(t, err)
+ assert.Equal(t, expected, string(logData))
+
+ // Should rotate
+ fileLogger.LogEvent(&event)
+ fileLogger.Flush()
+
+ for num := 2; num <= 999; num++ {
+ file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d.gz", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0666)
+ assert.NoError(t, err)
+ file.Close()
+ }
+ err = realFileLogger.DoRotate()
+ assert.Error(t, err)
+}
+
+func TestCompressOldFile(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "TestFileLogger")
+ assert.NoError(t, err)
+ defer os.RemoveAll(tmpDir)
+ fname := filepath.Join(tmpDir, "test")
+ nonGzip := filepath.Join(tmpDir, "test-nonGzip")
+
+ f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY, 0660)
+ assert.NoError(t, err)
+ ng, err := os.OpenFile(nonGzip, os.O_CREATE|os.O_WRONLY, 0660)
+ assert.NoError(t, err)
+
+ for i := 0; i < 999; i++ {
+ f.WriteString("This is a test file\n")
+ ng.WriteString("This is a test file\n")
+ }
+ f.Close()
+ ng.Close()
+
+ err = compressOldLogFile(fname, -1)
+ assert.NoError(t, err)
+
+ _, err = os.Lstat(fname + ".gz")
+ assert.NoError(t, err)
+
+ f, err = os.Open(fname + ".gz")
+ assert.NoError(t, err)
+ zr, err := gzip.NewReader(f)
+ assert.NoError(t, err)
+ data, err := ioutil.ReadAll(zr)
+ assert.NoError(t, err)
+ original, err := ioutil.ReadFile(nonGzip)
+ assert.NoError(t, err)
+ assert.Equal(t, original, data)
+}
diff --git a/modules/log/level.go b/modules/log/level.go
new file mode 100644
index 0000000000..6131fcb510
--- /dev/null
+++ b/modules/log/level.go
@@ -0,0 +1,111 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// Level is the level of the logger
+type Level int
+
+const (
+ // TRACE represents the lowest log level
+ TRACE Level = iota
+ // DEBUG is for debug logging
+ DEBUG
+ // INFO is for information
+ INFO
+ // WARN is for warning information
+ WARN
+ // ERROR is for error reporting
+ ERROR
+ // CRITICAL is for critical errors
+ CRITICAL
+ // FATAL is for fatal errors
+ FATAL
+ // NONE is for no logging
+ NONE
+)
+
+var toString = map[Level]string{
+ TRACE: "trace",
+ DEBUG: "debug",
+ INFO: "info",
+ WARN: "warn",
+ ERROR: "error",
+ CRITICAL: "critical",
+ FATAL: "fatal",
+ NONE: "none",
+}
+
+var toLevel = map[string]Level{
+ "trace": TRACE,
+ "debug": DEBUG,
+ "info": INFO,
+ "warn": WARN,
+ "error": ERROR,
+ "critical": CRITICAL,
+ "fatal": FATAL,
+ "none": NONE,
+}
+
+// Levels returns all the possible logging levels
+func Levels() []string {
+ keys := make([]string, 0)
+ for key := range toLevel {
+ keys = append(keys, key)
+ }
+ return keys
+}
+
+func (l Level) String() string {
+ s, ok := toString[l]
+ if ok {
+ return s
+ }
+ return "info"
+}
+
+// MarshalJSON takes a Level and turns it into text
+func (l Level) MarshalJSON() ([]byte, error) {
+ buffer := bytes.NewBufferString(`"`)
+ buffer.WriteString(toString[l])
+ buffer.WriteString(`"`)
+ return buffer.Bytes(), nil
+}
+
+// FromString takes a level string and returns a Level
+func FromString(level string) Level {
+ temp, ok := toLevel[strings.ToLower(level)]
+ if !ok {
+ return INFO
+ }
+ return temp
+}
+
+// UnmarshalJSON takes text and turns it into a Level
+func (l *Level) UnmarshalJSON(b []byte) error {
+ var tmp interface{}
+ err := json.Unmarshal(b, &tmp)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Err: %v", err)
+ return err
+ }
+
+ switch v := tmp.(type) {
+ case string:
+ *l = FromString(string(v))
+ case int:
+ *l = FromString(Level(v).String())
+ default:
+ *l = INFO
+ }
+ return nil
+}
diff --git a/modules/log/level_test.go b/modules/log/level_test.go
new file mode 100644
index 0000000000..40f6310bfd
--- /dev/null
+++ b/modules/log/level_test.go
@@ -0,0 +1,55 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type testLevel struct {
+ Level Level `json:"level"`
+}
+
+func TestLevelMarshalUnmarshalJSON(t *testing.T) {
+ levelBytes, err := json.Marshal(testLevel{
+ Level: INFO,
+ })
+ assert.NoError(t, err)
+ assert.Equal(t, string(makeTestLevelBytes(INFO.String())), string(levelBytes))
+
+ var testLevel testLevel
+ err = json.Unmarshal(levelBytes, &testLevel)
+ assert.NoError(t, err)
+ assert.Equal(t, INFO, testLevel.Level)
+
+ err = json.Unmarshal(makeTestLevelBytes(`FOFOO`), &testLevel)
+ assert.NoError(t, err)
+ assert.Equal(t, INFO, testLevel.Level)
+
+ err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 2)), &testLevel)
+ assert.NoError(t, err)
+ assert.Equal(t, INFO, testLevel.Level)
+
+ err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 10012)), &testLevel)
+ assert.NoError(t, err)
+ assert.Equal(t, INFO, testLevel.Level)
+
+ err = json.Unmarshal([]byte(`{"level":{}}`), &testLevel)
+ assert.NoError(t, err)
+ assert.Equal(t, INFO, testLevel.Level)
+
+ assert.Equal(t, INFO.String(), Level(1001).String())
+
+ err = json.Unmarshal([]byte(`{"level":{}`), &testLevel.Level)
+ assert.Error(t, err)
+}
+
+func makeTestLevelBytes(level string) []byte {
+ return []byte(fmt.Sprintf(`{"level":"%s"}`, level))
+}
diff --git a/modules/log/log.go b/modules/log/log.go
index 98a8014b51..c0fd117967 100644
--- a/modules/log/log.go
+++ b/modules/log/log.go
@@ -8,48 +8,68 @@ import (
"fmt"
"os"
"path"
- "path/filepath"
"runtime"
"strings"
-
- "golang.org/x/sync/syncmap"
)
var (
- loggers []*Logger
+ // DEFAULT is the name of the default logger
+ DEFAULT = "default"
+ // NamedLoggers map of named loggers
+ NamedLoggers = make(map[string]*Logger)
// GitLogger logger for git
GitLogger *Logger
+ prefix string
)
-// NewLogger create a logger
-func NewLogger(bufLen int64, mode, config string) {
- logger := newLogger(bufLen)
-
- isExist := false
- for i, l := range loggers {
- if l.adapter == mode {
- isExist = true
- loggers[i] = logger
- }
+// NewLogger create a logger for the default logger
+func NewLogger(bufLen int64, name, provider, config string) *Logger {
+ err := NewNamedLogger(DEFAULT, bufLen, name, provider, config)
+ if err != nil {
+ CriticalWithSkip(1, "Unable to create default logger: %v", err)
+ panic(err)
}
- if !isExist {
- loggers = append(loggers, logger)
+ return NamedLoggers[DEFAULT]
+}
+
+// NewNamedLogger creates a new named logger for a given configuration
+func NewNamedLogger(name string, bufLen int64, subname, provider, config string) error {
+ logger, ok := NamedLoggers[name]
+ if !ok {
+ logger = newLogger(name, bufLen)
+
+ NamedLoggers[name] = logger
}
- if err := logger.SetLogger(mode, config); err != nil {
- Fatal(2, "Failed to set logger (%s): %v", mode, err)
+
+ return logger.SetLogger(subname, provider, config)
+}
+
+// DelNamedLogger closes and deletes the named logger
+func DelNamedLogger(name string) {
+ l, ok := NamedLoggers[name]
+ if ok {
+ delete(NamedLoggers, name)
+ l.Close()
}
}
-// DelLogger removes loggers that are for the given mode
-func DelLogger(mode string) error {
- for _, l := range loggers {
- if _, ok := l.outputs.Load(mode); ok {
- return l.DelLogger(mode)
- }
+// DelLogger removes the named sublogger from the default logger
+func DelLogger(name string) error {
+ logger := NamedLoggers[DEFAULT]
+ found, err := logger.DelLogger(name)
+ if !found {
+ Trace("Log %s not found, no need to delete", name)
}
+ return err
+}
- Trace("Log adapter %s not found, no need to delete", mode)
- return nil
+// GetLogger returns either a named logger or the default logger
+func GetLogger(name string) *Logger {
+ logger, ok := NamedLoggers[name]
+ if ok {
+ return logger
+ }
+ return NamedLoggers[DEFAULT]
}
// NewGitLogger create a logger for git
@@ -58,333 +78,168 @@ func NewGitLogger(logPath string) {
path := path.Dir(logPath)
if err := os.MkdirAll(path, os.ModePerm); err != nil {
- Fatal(4, "Failed to create dir %s: %v", path, err)
+ Fatal("Failed to create dir %s: %v", path, err)
}
- GitLogger = newLogger(0)
- GitLogger.SetLogger("file", fmt.Sprintf(`{"level":0,"filename":"%s","rotate":false}`, logPath))
+ GitLogger = newLogger("git", 0)
+ GitLogger.SetLogger("file", "file", fmt.Sprintf(`{"level":"TRACE","filename":"%s","rotate":false}`, logPath))
+}
+
+// GetLevel returns the minimum logger level
+func GetLevel() Level {
+ return NamedLoggers[DEFAULT].GetLevel()
+}
+
+// GetStacktraceLevel returns the minimum logger level
+func GetStacktraceLevel() Level {
+ return NamedLoggers[DEFAULT].GetStacktraceLevel()
}
// Trace records trace log
func Trace(format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Trace(format, v...)
- }
+ Log(1, TRACE, format, v...)
+}
+
+// IsTrace returns true if at least one logger is TRACE
+func IsTrace() bool {
+ return GetLevel() <= TRACE
}
// Debug records debug log
func Debug(format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Debug(format, v...)
- }
+ Log(1, DEBUG, format, v...)
}
-// Info records info log
-func Info(format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Info(format, v...)
- }
+// IsDebug returns true if at least one logger is DEBUG
+func IsDebug() bool {
+ return GetLevel() <= DEBUG
}
-// Warn records warning log
-func Warn(format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Warn(format, v...)
- }
+// Info records info log
+func Info(format string, v ...interface{}) {
+ Log(1, INFO, format, v...)
}
-// Error records error log
-func Error(skip int, format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Error(skip, format, v...)
- }
+// IsInfo returns true if at least one logger is INFO
+func IsInfo() bool {
+ return GetLevel() <= INFO
}
-// Critical records critical log
-func Critical(skip int, format string, v ...interface{}) {
- for _, logger := range loggers {
- logger.Critical(skip, format, v...)
- }
+// Warn records warning log
+func Warn(format string, v ...interface{}) {
+ Log(1, WARN, format, v...)
}
-// Fatal records error log and exit process
-func Fatal(skip int, format string, v ...interface{}) {
- Error(skip, format, v...)
- for _, l := range loggers {
- l.Close()
- }
- os.Exit(1)
+// IsWarn returns true if at least one logger is WARN
+func IsWarn() bool {
+ return GetLevel() <= WARN
}
-// Close closes all the loggers
-func Close() {
- for _, l := range loggers {
- l.Close()
- }
+// Error records error log
+func Error(format string, v ...interface{}) {
+ Log(1, ERROR, format, v...)
}
-// .___ __ _____
-// | | _____/ |_ ____________/ ____\____ ____ ____
-// | |/ \ __\/ __ \_ __ \ __\\__ \ _/ ___\/ __ \
-// | | | \ | \ ___/| | \/| | / __ \\ \__\ ___/
-// |___|___| /__| \___ >__| |__| (____ /\___ >___ >
-// \/ \/ \/ \/ \/
-
-// LogLevel level type for log
-//type LogLevel int
-
-// log levels
-const (
- TRACE = iota
- DEBUG
- INFO
- WARN
- ERROR
- CRITICAL
- FATAL
-)
-
-// LoggerInterface represents behaviors of a logger provider.
-type LoggerInterface interface {
- Init(config string) error
- WriteMsg(msg string, skip, level int) error
- Destroy()
- Flush()
+// ErrorWithSkip records error log from "skip" calls back from this function
+func ErrorWithSkip(skip int, format string, v ...interface{}) {
+ Log(skip+1, ERROR, format, v...)
}
-type loggerType func() LoggerInterface
-
-// LoggerAsWriter is a io.Writer shim around the gitea log
-type LoggerAsWriter struct {
- level int
+// IsError returns true if at least one logger is ERROR
+func IsError() bool {
+ return GetLevel() <= ERROR
}
-// NewLoggerAsWriter creates a Writer representation of the logger with setable log level
-func NewLoggerAsWriter(level string) *LoggerAsWriter {
- l := &LoggerAsWriter{}
- switch strings.ToUpper(level) {
- case "TRACE":
- l.level = TRACE
- case "DEBUG":
- l.level = DEBUG
- case "INFO":
- l.level = INFO
- case "WARN":
- l.level = WARN
- case "ERROR":
- l.level = ERROR
- case "CRITICAL":
- l.level = CRITICAL
- case "FATAL":
- l.level = FATAL
- default:
- l.level = INFO
- }
- return l
+// Critical records critical log
+func Critical(format string, v ...interface{}) {
+ Log(1, CRITICAL, format, v...)
}
-// Write implements the io.Writer interface to allow spoofing of macaron
-func (l *LoggerAsWriter) Write(p []byte) (int, error) {
- l.Log(string(p))
- return len(p), nil
+// CriticalWithSkip records critical log from "skip" calls back from this function
+func CriticalWithSkip(skip int, format string, v ...interface{}) {
+ Log(skip+1, CRITICAL, format, v...)
}
-// Log takes a given string and logs it at the set log-level
-func (l *LoggerAsWriter) Log(msg string) {
- for _, logger := range loggers {
- logger.writerMsg(0, l.level, msg)
- }
+// IsCritical returns true if at least one logger is CRITICAL
+func IsCritical() bool {
+ return GetLevel() <= CRITICAL
}
-var adapters = make(map[string]loggerType)
-
-// Register registers given logger provider to adapters.
-func Register(name string, log loggerType) {
- if log == nil {
- panic("log: register provider is nil")
- }
- if _, dup := adapters[name]; dup {
- panic("log: register called twice for provider \"" + name + "\"")
- }
- adapters[name] = log
+// Fatal records fatal log and exit process
+func Fatal(format string, v ...interface{}) {
+ Log(1, FATAL, format, v...)
+ Close()
+ os.Exit(1)
}
-type logMsg struct {
- skip, level int
- msg string
+// FatalWithSkip records fatal log from "skip" calls back from this function
+func FatalWithSkip(skip int, format string, v ...interface{}) {
+ Log(skip+1, FATAL, format, v...)
+ Close()
+ os.Exit(1)
}
-// Logger is default logger in beego application.
-// it can contain several providers and log message into all providers.
-type Logger struct {
- adapter string
- level int
- msg chan *logMsg
- outputs syncmap.Map
- quit chan bool
+// IsFatal returns true if at least one logger is FATAL
+func IsFatal() bool {
+ return GetLevel() <= FATAL
}
-// newLogger initializes and returns a new logger.
-func newLogger(buffer int64) *Logger {
- l := &Logger{
- msg: make(chan *logMsg, buffer),
- quit: make(chan bool),
+// Close closes all the loggers
+func Close() {
+ l, ok := NamedLoggers[DEFAULT]
+ if !ok {
+ return
}
- go l.StartLogger()
- return l
+ delete(NamedLoggers, DEFAULT)
+ l.Close()
}
-// SetLogger sets new logger instance with given logger adapter and config.
-func (l *Logger) SetLogger(adapter string, config string) error {
- if log, ok := adapters[adapter]; ok {
- lg := log()
- if err := lg.Init(config); err != nil {
- return err
- }
- l.outputs.Store(adapter, lg)
- l.adapter = adapter
- } else {
- panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
+// Log a message with defined skip and at logging level
+// A skip of 0 refers to the caller of this command
+func Log(skip int, level Level, format string, v ...interface{}) {
+ l, ok := NamedLoggers[DEFAULT]
+ if ok {
+ l.Log(skip+1, level, format, v...)
}
- return nil
}
-// DelLogger removes a logger adapter instance.
-func (l *Logger) DelLogger(adapter string) error {
- if lg, ok := l.outputs.Load(adapter); ok {
- lg.(LoggerInterface).Destroy()
- l.outputs.Delete(adapter)
- } else {
- panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
- }
- return nil
+// LoggerAsWriter is a io.Writer shim around the gitea log
+type LoggerAsWriter struct {
+ ourLoggers []*Logger
+ level Level
}
-func (l *Logger) writerMsg(skip, level int, msg string) error {
- if l.level > level {
- return nil
- }
- lm := &logMsg{
- skip: skip,
- level: level,
- }
-
- // Only error information needs locate position for debugging.
- if lm.level >= ERROR {
- pc, file, line, ok := runtime.Caller(skip)
- if ok {
- // Get caller function name.
- fn := runtime.FuncForPC(pc)
- var fnName string
- if fn == nil {
- fnName = "?()"
- } else {
- fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()"
- }
-
- fileName := file
- if len(fileName) > 20 {
- fileName = "..." + fileName[len(fileName)-20:]
- }
- lm.msg = fmt.Sprintf("[%s:%d %s] %s", fileName, line, fnName, msg)
- } else {
- lm.msg = msg
- }
- } else {
- lm.msg = msg
+// NewLoggerAsWriter creates a Writer representation of the logger with setable log level
+func NewLoggerAsWriter(level string, ourLoggers ...*Logger) *LoggerAsWriter {
+ if len(ourLoggers) == 0 {
+ ourLoggers = []*Logger{NamedLoggers[DEFAULT]}
}
- l.msg <- lm
- return nil
-}
-
-// StartLogger starts logger chan reading.
-func (l *Logger) StartLogger() {
- for {
- select {
- case bm := <-l.msg:
- l.outputs.Range(func(k, v interface{}) bool {
- if err := v.(LoggerInterface).WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
- fmt.Println("ERROR, unable to WriteMsg:", err)
- }
- return true
- })
- case <-l.quit:
- return
- }
+ l := &LoggerAsWriter{
+ ourLoggers: ourLoggers,
+ level: FromString(level),
}
+ return l
}
-// Flush flushes all chan data.
-func (l *Logger) Flush() {
- l.outputs.Range(func(k, v interface{}) bool {
- v.(LoggerInterface).Flush()
- return true
- })
-}
-
-// Close closes logger, flush all chan data and destroy all adapter instances.
-func (l *Logger) Close() {
- l.quit <- true
- for {
- if len(l.msg) > 0 {
- bm := <-l.msg
- l.outputs.Range(func(k, v interface{}) bool {
- if err := v.(LoggerInterface).WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
- fmt.Println("ERROR, unable to WriteMsg:", err)
- }
- return true
- })
- } else {
- break
- }
+// Write implements the io.Writer interface to allow spoofing of macaron
+func (l *LoggerAsWriter) Write(p []byte) (int, error) {
+ for _, logger := range l.ourLoggers {
+ // Skip = 3 because this presumes that we have been called by log.Println()
+ // If the caller has used log.Output or the like this will be wrong
+ logger.Log(3, l.level, string(p))
}
- l.outputs.Range(func(k, v interface{}) bool {
- v.(LoggerInterface).Flush()
- v.(LoggerInterface).Destroy()
- return true
- })
-}
-
-// Trace records trace log
-func (l *Logger) Trace(format string, v ...interface{}) {
- msg := fmt.Sprintf("[T] "+format, v...)
- l.writerMsg(0, TRACE, msg)
-}
-
-// Debug records debug log
-func (l *Logger) Debug(format string, v ...interface{}) {
- msg := fmt.Sprintf("[D] "+format, v...)
- l.writerMsg(0, DEBUG, msg)
-}
-
-// Info records information log
-func (l *Logger) Info(format string, v ...interface{}) {
- msg := fmt.Sprintf("[I] "+format, v...)
- l.writerMsg(0, INFO, msg)
-}
-
-// Warn records warning log
-func (l *Logger) Warn(format string, v ...interface{}) {
- msg := fmt.Sprintf("[W] "+format, v...)
- l.writerMsg(0, WARN, msg)
-}
-
-// Error records error log
-func (l *Logger) Error(skip int, format string, v ...interface{}) {
- msg := fmt.Sprintf("[E] "+format, v...)
- l.writerMsg(skip, ERROR, msg)
+ return len(p), nil
}
-// Critical records critical log
-func (l *Logger) Critical(skip int, format string, v ...interface{}) {
- msg := fmt.Sprintf("[C] "+format, v...)
- l.writerMsg(skip, CRITICAL, msg)
+// Log takes a given string and logs it at the set log-level
+func (l *LoggerAsWriter) Log(msg string) {
+ for _, logger := range l.ourLoggers {
+ // Set the skip to reference the call just above this
+ logger.Log(1, l.level, msg)
+ }
}
-// Fatal records error log and exit the process
-func (l *Logger) Fatal(skip int, format string, v ...interface{}) {
- msg := fmt.Sprintf("[F] "+format, v...)
- l.writerMsg(skip, FATAL, msg)
- l.Close()
- os.Exit(1)
+func init() {
+ _, filename, _, _ := runtime.Caller(0)
+ prefix = strings.TrimSuffix(filename, "modules/log/log.go")
}
diff --git a/modules/log/log_test.go b/modules/log/log_test.go
new file mode 100644
index 0000000000..9e3d7527b2
--- /dev/null
+++ b/modules/log/log_test.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func baseConsoleTest(t *testing.T, logger *Logger) (chan []byte, chan bool) {
+ written := make(chan []byte)
+ closed := make(chan bool)
+
+ c := CallbackWriteCloser{
+ callback: func(p []byte, close bool) {
+ written <- p
+ closed <- close
+ },
+ }
+ m := logger.MultiChannelledLog
+
+ channelledLog := m.GetEventLogger("console")
+ assert.NotEmpty(t, channelledLog)
+ realChanLog, ok := channelledLog.(*ChannelledLog)
+ assert.Equal(t, true, ok)
+ realCL, ok := realChanLog.loggerProvider.(*ConsoleLogger)
+ assert.Equal(t, true, ok)
+ assert.Equal(t, INFO, realCL.Level)
+ realCL.out = c
+
+ format := "test: %s"
+ args := []interface{}{"A"}
+
+ logger.Log(0, INFO, format, args...)
+ line := <-written
+ assert.Contains(t, string(line), fmt.Sprintf(format, args...))
+ assert.Equal(t, false, <-closed)
+
+ format = "test2: %s"
+ logger.Warn(format, args...)
+ line = <-written
+
+ assert.Contains(t, string(line), fmt.Sprintf(format, args...))
+ assert.Equal(t, false, <-closed)
+
+ format = "testerror: %s"
+ logger.Error(format, args...)
+ line = <-written
+ assert.Contains(t, string(line), fmt.Sprintf(format, args...))
+ assert.Equal(t, false, <-closed)
+ return written, closed
+}
+
+func TestNewLoggerUnexported(t *testing.T) {
+ level := INFO
+ logger := newLogger("UNEXPORTED", 0)
+ err := logger.SetLogger("console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+ assert.NoError(t, err)
+ out := logger.MultiChannelledLog.GetEventLogger("console")
+ assert.NotEmpty(t, out)
+ chanlog, ok := out.(*ChannelledLog)
+ assert.Equal(t, true, ok)
+ assert.Equal(t, "console", chanlog.provider)
+ assert.Equal(t, INFO, logger.GetLevel())
+ baseConsoleTest(t, logger)
+}
+
+func TestNewLoggger(t *testing.T) {
+ level := INFO
+ logger := NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+
+ assert.Equal(t, INFO, GetLevel())
+ assert.Equal(t, false, IsTrace())
+ assert.Equal(t, false, IsDebug())
+ assert.Equal(t, true, IsInfo())
+ assert.Equal(t, true, IsWarn())
+ assert.Equal(t, true, IsError())
+
+ written, closed := baseConsoleTest(t, logger)
+
+ format := "test: %s"
+ args := []interface{}{"A"}
+
+ Log(0, INFO, format, args...)
+ line := <-written
+ assert.Contains(t, string(line), fmt.Sprintf(format, args...))
+ assert.Equal(t, false, <-closed)
+
+ Info(format, args...)
+ line = <-written
+ assert.Contains(t, string(line), fmt.Sprintf(format, args...))
+ assert.Equal(t, false, <-closed)
+
+ go DelLogger("console")
+ line = <-written
+ assert.Equal(t, "", string(line))
+ assert.Equal(t, true, <-closed)
+}
+
+func TestNewLogggerRecreate(t *testing.T) {
+ level := INFO
+ NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+
+ assert.Equal(t, INFO, GetLevel())
+ assert.Equal(t, false, IsTrace())
+ assert.Equal(t, false, IsDebug())
+ assert.Equal(t, true, IsInfo())
+ assert.Equal(t, true, IsWarn())
+ assert.Equal(t, true, IsError())
+
+ format := "test: %s"
+ args := []interface{}{"A"}
+
+ Log(0, INFO, format, args...)
+
+ NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+
+ assert.Equal(t, INFO, GetLevel())
+ assert.Equal(t, false, IsTrace())
+ assert.Equal(t, false, IsDebug())
+ assert.Equal(t, true, IsInfo())
+ assert.Equal(t, true, IsWarn())
+ assert.Equal(t, true, IsError())
+
+ Log(0, INFO, format, args...)
+
+ assert.Panics(t, func() {
+ NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"`, level.String()))
+ })
+
+ go DelLogger("console")
+
+ // We should be able to redelete without a problem
+ go DelLogger("console")
+
+}
+
+func TestNewNamedLogger(t *testing.T) {
+ level := INFO
+ err := NewNamedLogger("test", 0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
+ assert.NoError(t, err)
+ logger := NamedLoggers["test"]
+ assert.Equal(t, level, logger.GetLevel())
+
+ written, closed := baseConsoleTest(t, logger)
+ go DelNamedLogger("test")
+ line := <-written
+ assert.Equal(t, "", string(line))
+ assert.Equal(t, true, <-closed)
+}
diff --git a/modules/log/logger.go b/modules/log/logger.go
new file mode 100644
index 0000000000..925ab02b71
--- /dev/null
+++ b/modules/log/logger.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// Logger is default logger in the Gitea application.
+// it can contain several providers and log message into all providers.
+type Logger struct {
+ *MultiChannelledLog
+ bufferLength int64
+}
+
+// newLogger initializes and returns a new logger.
+func newLogger(name string, buffer int64) *Logger {
+ l := &Logger{
+ MultiChannelledLog: NewMultiChannelledLog(name, buffer),
+ bufferLength: buffer,
+ }
+ return l
+}
+
+// SetLogger sets new logger instance with given logger provider and config.
+func (l *Logger) SetLogger(name, provider, config string) error {
+ eventLogger, err := NewChannelledLog(name, provider, config, l.bufferLength)
+ if err != nil {
+ return fmt.Errorf("Failed to create sublogger (%s): %v", name, err)
+ }
+
+ l.MultiChannelledLog.DelLogger(name)
+
+ err = l.MultiChannelledLog.AddLogger(eventLogger)
+ if err != nil {
+ if IsErrDuplicateName(err) {
+ return fmt.Errorf("Duplicate named sublogger %s %v", name, l.MultiChannelledLog.GetEventLoggerNames())
+ }
+ return fmt.Errorf("Failed to add sublogger (%s): %v", name, err)
+ }
+
+ return nil
+}
+
+// DelLogger deletes a sublogger from this logger.
+func (l *Logger) DelLogger(name string) (bool, error) {
+ return l.MultiChannelledLog.DelLogger(name), nil
+}
+
+// Log msg at the provided level with the provided caller defined by skip (0 being the function that calls this function)
+func (l *Logger) Log(skip int, level Level, format string, v ...interface{}) error {
+ if l.GetLevel() > level {
+ return nil
+ }
+ caller := "?()"
+ pc, filename, line, ok := runtime.Caller(skip + 1)
+ if ok {
+ // Get caller function name.
+ fn := runtime.FuncForPC(pc)
+ if fn != nil {
+ caller = fn.Name() + "()"
+ }
+ }
+ msg := format
+ if len(v) > 0 {
+ args := make([]interface{}, len(v))
+ for i := 0; i < len(args); i++ {
+ args[i] = NewColoredValuePointer(&v[i])
+ }
+ msg = fmt.Sprintf(format, args...)
+ }
+ stack := ""
+ if l.GetStacktraceLevel() <= level {
+ stack = Stack(skip + 1)
+ }
+ return l.SendLog(level, caller, strings.TrimPrefix(filename, prefix), line, msg, stack)
+}
+
+// SendLog sends a log event at the provided level with the information given
+func (l *Logger) SendLog(level Level, caller, filename string, line int, msg string, stack string) error {
+ if l.GetLevel() > level {
+ return nil
+ }
+ event := &Event{
+ level: level,
+ caller: caller,
+ filename: filename,
+ line: line,
+ msg: msg,
+ time: time.Now(),
+ stacktrace: stack,
+ }
+ l.LogEvent(event)
+ return nil
+}
+
+// Trace records trace log
+func (l *Logger) Trace(format string, v ...interface{}) {
+ l.Log(1, TRACE, format, v...)
+}
+
+// Debug records debug log
+func (l *Logger) Debug(format string, v ...interface{}) {
+ l.Log(1, DEBUG, format, v...)
+
+}
+
+// Info records information log
+func (l *Logger) Info(format string, v ...interface{}) {
+ l.Log(1, INFO, format, v...)
+}
+
+// Warn records warning log
+func (l *Logger) Warn(format string, v ...interface{}) {
+ l.Log(1, WARN, format, v...)
+}
+
+// Error records error log
+func (l *Logger) Error(format string, v ...interface{}) {
+ l.Log(1, ERROR, format, v...)
+}
+
+// ErrorWithSkip records error log from "skip" calls back from this function
+func (l *Logger) ErrorWithSkip(skip int, format string, v ...interface{}) {
+ l.Log(skip+1, ERROR, format, v...)
+}
+
+// Critical records critical log
+func (l *Logger) Critical(format string, v ...interface{}) {
+ l.Log(1, CRITICAL, format, v...)
+}
+
+// CriticalWithSkip records critical log from "skip" calls back from this function
+func (l *Logger) CriticalWithSkip(skip int, format string, v ...interface{}) {
+ l.Log(skip+1, CRITICAL, format, v...)
+}
+
+// Fatal records fatal log and exit the process
+func (l *Logger) Fatal(format string, v ...interface{}) {
+ l.Log(1, FATAL, format, v...)
+ l.Close()
+ os.Exit(1)
+}
+
+// FatalWithSkip records fatal log from "skip" calls back from this function and exits the process
+func (l *Logger) FatalWithSkip(skip int, format string, v ...interface{}) {
+ l.Log(skip+1, FATAL, format, v...)
+ l.Close()
+ os.Exit(1)
+}
diff --git a/modules/log/provider.go b/modules/log/provider.go
new file mode 100644
index 0000000000..b31bf392e6
--- /dev/null
+++ b/modules/log/provider.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+// LoggerProvider represents behaviors of a logger provider.
+type LoggerProvider interface {
+ Init(config string) error
+ EventLogger
+}
+
+type loggerProvider func() LoggerProvider
+
+var providers = make(map[string]loggerProvider)
+
+// Register registers given logger provider to providers.
+func Register(name string, log loggerProvider) {
+ if log == nil {
+ panic("log: register provider is nil")
+ }
+ if _, dup := providers[name]; dup {
+ panic("log: register called twice for provider \"" + name + "\"")
+ }
+ providers[name] = log
+}
diff --git a/modules/log/router.go b/modules/log/router.go
new file mode 100644
index 0000000000..842202c781
--- /dev/null
+++ b/modules/log/router.go
@@ -0,0 +1,103 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "net/http"
+ "time"
+
+ macaron "gopkg.in/macaron.v1"
+)
+
+var statusToColor = map[int][]byte{
+ 100: ColorBytes(Bold),
+ 200: ColorBytes(FgGreen),
+ 300: ColorBytes(FgYellow),
+ 304: ColorBytes(FgCyan),
+ 400: ColorBytes(Bold, FgRed),
+ 401: ColorBytes(Bold, FgMagenta),
+ 403: ColorBytes(Bold, FgMagenta),
+ 500: ColorBytes(Bold, BgRed),
+}
+
+func coloredStatus(status int, s ...string) *ColoredValue {
+ color, ok := statusToColor[status]
+ if !ok {
+ color, ok = statusToColor[(status/100)*100]
+ }
+ if !ok {
+ color = fgBoldBytes
+ }
+ if len(s) > 0 {
+ return NewColoredValueBytes(s[0], &color)
+ }
+ return NewColoredValueBytes(status, &color)
+}
+
+var methodToColor = map[string][]byte{
+ "GET": ColorBytes(FgBlue),
+ "POST": ColorBytes(FgGreen),
+ "DELETE": ColorBytes(FgRed),
+ "PATCH": ColorBytes(FgCyan),
+ "PUT": ColorBytes(FgYellow, Faint),
+ "HEAD": ColorBytes(FgBlue, Faint),
+}
+
+func coloredMethod(method string) *ColoredValue {
+ color, ok := methodToColor[method]
+ if !ok {
+ return NewColoredValueBytes(method, &fgBoldBytes)
+ }
+ return NewColoredValueBytes(method, &color)
+}
+
+var durations = []time.Duration{
+ 10 * time.Millisecond,
+ 100 * time.Millisecond,
+ 1 * time.Second,
+ 5 * time.Second,
+ 10 * time.Second,
+}
+
+var durationColors = [][]byte{
+ ColorBytes(FgGreen),
+ ColorBytes(Bold),
+ ColorBytes(FgYellow),
+ ColorBytes(FgRed, Bold),
+ ColorBytes(BgRed),
+}
+
+var wayTooLong = ColorBytes(BgMagenta)
+
+func coloredTime(duration time.Duration) *ColoredValue {
+ for i, k := range durations {
+ if duration < k {
+ return NewColoredValueBytes(duration, &durationColors[i])
+ }
+ }
+ return NewColoredValueBytes(duration, &wayTooLong)
+}
+
+// SetupRouterLogger will setup macaron to routing to the main gitea log
+func SetupRouterLogger(m *macaron.Macaron, level Level) {
+ if GetLevel() <= level {
+ m.Use(RouterHandler(level))
+ }
+}
+
+// RouterHandler is a macaron handler that will log the routing to the default gitea log
+func RouterHandler(level Level) func(ctx *macaron.Context) {
+ return func(ctx *macaron.Context) {
+ start := time.Now()
+
+ GetLogger("router").Log(0, level, "Started %s %s for %s", coloredMethod(ctx.Req.Method), ctx.Req.RequestURI, ctx.RemoteAddr())
+
+ rw := ctx.Resp.(macaron.ResponseWriter)
+ ctx.Next()
+
+ status := rw.Status()
+ GetLogger("router").Log(0, level, "Completed %s %s %v %s in %v", coloredMethod(ctx.Req.Method), ctx.Req.RequestURI, coloredStatus(status), coloredStatus(status, http.StatusText(rw.Status())), coloredTime(time.Since(start)))
+ }
+}
diff --git a/modules/log/smtp.go b/modules/log/smtp.go
index 760b8e8bd7..2e78d713ca 100644
--- a/modules/log/smtp.go
+++ b/modules/log/smtp.go
@@ -1,4 +1,5 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
+// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@@ -6,29 +7,45 @@ package log
import (
"encoding/json"
- "fmt"
"net/smtp"
"strings"
- "time"
)
const (
subjectPhrase = "Diagnostic message from server"
)
-// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server.
-type SMTPWriter struct {
+type smtpWriter struct {
+ owner *SMTPLogger
+}
+
+// Write sends the message as an email
+func (s *smtpWriter) Write(p []byte) (int, error) {
+ return s.owner.sendMail(p)
+}
+
+// Close does nothing
+func (s *smtpWriter) Close() error {
+ return nil
+}
+
+// SMTPLogger implements LoggerProvider and is used to send emails via given SMTP-server.
+type SMTPLogger struct {
+ BaseLogger
Username string `json:"Username"`
Password string `json:"password"`
- Host string `json:"Host"`
+ Host string `json:"host"`
Subject string `json:"subject"`
RecipientAddresses []string `json:"sendTos"`
- Level int `json:"level"`
+ sendMailFn func(string, smtp.Auth, string, []string, []byte) error
}
-// NewSMTPWriter creates smtp writer.
-func NewSMTPWriter() LoggerInterface {
- return &SMTPWriter{Level: TRACE}
+// NewSMTPLogger creates smtp writer.
+func NewSMTPLogger() LoggerProvider {
+ s := &SMTPLogger{}
+ s.Level = TRACE
+ s.sendMailFn = smtp.SendMail
+ return s
}
// Init smtp writer with json config.
@@ -41,49 +58,54 @@ func NewSMTPWriter() LoggerInterface {
// "sendTos":["email1","email2"],
// "level":LevelError
// }
-func (sw *SMTPWriter) Init(jsonconfig string) error {
- return json.Unmarshal([]byte(jsonconfig), sw)
+func (log *SMTPLogger) Init(jsonconfig string) error {
+ err := json.Unmarshal([]byte(jsonconfig), log)
+ if err != nil {
+ return err
+ }
+ log.createLogger(&smtpWriter{
+ owner: log,
+ })
+ log.sendMailFn = smtp.SendMail
+ return nil
}
// WriteMsg writes message in smtp writer.
// it will send an email with subject and only this message.
-func (sw *SMTPWriter) WriteMsg(msg string, skip, level int) error {
- if level < sw.Level {
- return nil
- }
-
- hp := strings.Split(sw.Host, ":")
+func (log *SMTPLogger) sendMail(p []byte) (int, error) {
+ hp := strings.Split(log.Host, ":")
// Set up authentication information.
auth := smtp.PlainAuth(
"",
- sw.Username,
- sw.Password,
+ log.Username,
+ log.Password,
hp[0],
)
// Connect to the server, authenticate, set the sender and recipient,
// and send the email all in one step.
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
- mailmsg := []byte("To: " + strings.Join(sw.RecipientAddresses, ";") + "\r\nFrom: " + sw.Username + "<" + sw.Username +
- ">\r\nSubject: " + sw.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", time.Now().Format("2006-01-02 15:04:05")) + msg)
-
- return smtp.SendMail(
- sw.Host,
+ mailmsg := []byte("To: " + strings.Join(log.RecipientAddresses, ";") + "\r\nFrom: " + log.Username + "<" + log.Username +
+ ">\r\nSubject: " + log.Subject + "\r\n" + contentType + "\r\n\r\n")
+ mailmsg = append(mailmsg, p...)
+ return len(p), log.sendMailFn(
+ log.Host,
auth,
- sw.Username,
- sw.RecipientAddresses,
+ log.Username,
+ log.RecipientAddresses,
mailmsg,
)
}
// Flush when log should be flushed
-func (sw *SMTPWriter) Flush() {
+func (log *SMTPLogger) Flush() {
}
-// Destroy when writer is destroy
-func (sw *SMTPWriter) Destroy() {
+// GetName returns the default name for this implementation
+func (log *SMTPLogger) GetName() string {
+ return "smtp"
}
func init() {
- Register("smtp", NewSMTPWriter)
+ Register("smtp", NewSMTPLogger)
}
diff --git a/modules/log/smtp_test.go b/modules/log/smtp_test.go
new file mode 100644
index 0000000000..216d555215
--- /dev/null
+++ b/modules/log/smtp_test.go
@@ -0,0 +1,86 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "fmt"
+ "net/smtp"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSMTPLogger(t *testing.T) {
+ prefix := "TestPrefix "
+ level := INFO
+ flags := LstdFlags | LUTC | Lfuncname
+ username := "testuser"
+ password := "testpassword"
+ host := "testhost"
+ subject := "testsubject"
+ sendTos := []string{"testto1", "testto2"}
+
+ logger := NewSMTPLogger()
+ smtpLogger, ok := logger.(*SMTPLogger)
+ assert.Equal(t, true, ok)
+
+ err := logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"username\":\"%s\",\"password\":\"%s\",\"host\":\"%s\",\"subject\":\"%s\",\"sendTos\":[\"%s\",\"%s\"]}", prefix, level.String(), flags, username, password, host, subject, sendTos[0], sendTos[1]))
+ assert.NoError(t, err)
+
+ assert.Equal(t, flags, smtpLogger.Flags)
+ assert.Equal(t, level, smtpLogger.Level)
+ assert.Equal(t, level, logger.GetLevel())
+
+ location, _ := time.LoadLocation("EST")
+
+ date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
+
+ dateString := date.UTC().Format("2006/01/02 15:04:05")
+
+ event := Event{
+ level: INFO,
+ msg: "TEST MSG",
+ caller: "CALLER",
+ filename: "FULL/FILENAME",
+ line: 1,
+ time: date,
+ }
+
+ expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+
+ var envToHost string
+ var envFrom string
+ var envTo []string
+ var envMsg []byte
+ smtpLogger.sendMailFn = func(addr string, a smtp.Auth, from string, to []string, msg []byte) error {
+ envToHost = addr
+ envFrom = from
+ envTo = to
+ envMsg = msg
+ return nil
+ }
+
+ err = logger.LogEvent(&event)
+ assert.NoError(t, err)
+ assert.Equal(t, host, envToHost)
+ assert.Equal(t, username, envFrom)
+ assert.Equal(t, sendTos, envTo)
+ assert.Contains(t, string(envMsg), expected)
+
+ logger.Flush()
+
+ event.level = WARN
+ expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
+ err = logger.LogEvent(&event)
+ assert.NoError(t, err)
+ assert.Equal(t, host, envToHost)
+ assert.Equal(t, username, envFrom)
+ assert.Equal(t, sendTos, envTo)
+ assert.Contains(t, string(envMsg), expected)
+
+ logger.Close()
+}
diff --git a/modules/log/stack.go b/modules/log/stack.go
new file mode 100644
index 0000000000..ffe9aa67cc
--- /dev/null
+++ b/modules/log/stack.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "runtime"
+)
+
+var (
+ unknown = []byte("???")
+)
+
+// Stack will skip back the provided number of frames and return a stack trace with source code.
+// Although we could just use debug.Stack(), this routine will return the source code and
+// skip back the provided number of frames - i.e. allowing us to ignore preceding function calls.
+// A skip of 0 returns the stack trace for the calling function, not including this call.
+// If the problem is a lack of memory of course all this is not going to work...
+func Stack(skip int) string {
+ buf := new(bytes.Buffer)
+
+ // Store the last file we opened as its probable that the preceding stack frame
+ // will be in the same file
+ var lines [][]byte
+ var lastFilename string
+ for i := skip + 1; ; i++ { // Skip over frames
+ programCounter, filename, lineNumber, ok := runtime.Caller(i)
+ // If we can't retrieve the information break - basically we're into go internals at this point.
+ if !ok {
+ break
+ }
+
+ // Print equivalent of debug.Stack()
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter)
+ // Now try to print the offending line
+ if filename != lastFilename {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ // can't read this sourcefile
+ // likely we don't have the sourcecode available
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFilename = filename
+ }
+ fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber))
+ }
+ return buf.String()
+}
+
+// functionName converts the provided programCounter into a function name
+func functionName(programCounter uintptr) []byte {
+ function := runtime.FuncForPC(programCounter)
+ if function == nil {
+ return unknown
+ }
+ name := []byte(function.Name())
+
+ // Because we provide the filename we can drop the preceding package name.
+ if lastslash := bytes.LastIndex(name, []byte("/")); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
+ // And the current package name.
+ if period := bytes.Index(name, []byte(".")); period >= 0 {
+ name = name[period+1:]
+ }
+ // And we should just replace the interpunct with a dot
+ name = bytes.Replace(name, []byte("ยท"), []byte("."), -1)
+ return name
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ if n < 0 || n >= len(lines) {
+ return unknown
+ }
+ return bytes.TrimSpace(lines[n])
+}
diff --git a/modules/log/xorm.go b/modules/log/xorm.go
index 1a218a90cd..c08b7fd7d5 100644
--- a/modules/log/xorm.go
+++ b/modules/log/xorm.go
@@ -12,7 +12,6 @@ import (
// XORMLogBridge a logger bridge from Logger to xorm
type XORMLogBridge struct {
- loggers []*Logger
showSQL bool
level core.LogLevel
}
@@ -22,110 +21,80 @@ var (
XORMLogger *XORMLogBridge
)
-// DiscardXORMLogger inits a blank logger for xorm
-func DiscardXORMLogger() {
+// InitXORMLogger inits a log bridge for xorm
+func InitXORMLogger(showSQL bool) {
XORMLogger = &XORMLogBridge{
- showSQL: false,
+ showSQL: showSQL,
}
}
-// NewXORMLogger generate logger for xorm FIXME: configable
-func NewXORMLogger(bufferlen int64, mode, config string) {
- logger := newLogger(bufferlen)
- logger.SetLogger(mode, config)
- if XORMLogger == nil {
- XORMLogger = &XORMLogBridge{
- showSQL: true,
- }
- }
- XORMLogger.loggers = append(XORMLogger.loggers, logger)
+// GetGiteaLevel returns the minimum Gitea logger level
+func (l *XORMLogBridge) GetGiteaLevel() Level {
+ return GetLogger("xorm").GetLevel()
}
-func (l *XORMLogBridge) writerMsg(skip, level int, msg string) error {
- for _, logger := range l.loggers {
- if err := logger.writerMsg(skip, level, msg); err != nil {
- return err
- }
- }
- return nil
+// Log a message with defined skip and at logging level
+func (l *XORMLogBridge) Log(skip int, level Level, format string, v ...interface{}) error {
+ return GetLogger("xorm").Log(skip+1, level, format, v...)
}
// Debug show debug log
func (l *XORMLogBridge) Debug(v ...interface{}) {
- if l.level <= core.LOG_DEBUG {
- msg := fmt.Sprint(v...)
- l.writerMsg(0, DEBUG, "[D]"+msg)
- }
+ l.Log(2, DEBUG, fmt.Sprint(v...))
}
// Debugf show debug log
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
- if l.level <= core.LOG_DEBUG {
- for _, logger := range l.loggers {
- logger.Debug(format, v...)
- }
- }
+ l.Log(2, DEBUG, format, v...)
}
// Error show error log
func (l *XORMLogBridge) Error(v ...interface{}) {
- if l.level <= core.LOG_ERR {
- msg := fmt.Sprint(v...)
- l.writerMsg(0, ERROR, "[E]"+msg)
- }
+ l.Log(2, ERROR, fmt.Sprint(v...))
}
// Errorf show error log
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
- if l.level <= core.LOG_ERR {
- for _, logger := range l.loggers {
- logger.Error(0, format, v...)
- }
- }
+ l.Log(2, ERROR, format, v...)
}
// Info show information level log
func (l *XORMLogBridge) Info(v ...interface{}) {
- if l.level <= core.LOG_INFO {
- msg := fmt.Sprint(v...)
- l.writerMsg(0, INFO, "[I]"+msg)
- }
+ l.Log(2, INFO, fmt.Sprint(v...))
}
// Infof show information level log
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
- if l.level <= core.LOG_INFO {
- for _, logger := range l.loggers {
- logger.Info(format, v...)
- }
- }
+ l.Log(2, INFO, format, v...)
}
// Warn show warning log
func (l *XORMLogBridge) Warn(v ...interface{}) {
- if l.level <= core.LOG_WARNING {
- msg := fmt.Sprint(v...)
- l.writerMsg(0, WARN, "[W] "+msg)
- }
+ l.Log(2, WARN, fmt.Sprint(v...))
}
// Warnf show warnning log
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
- if l.level <= core.LOG_WARNING {
- for _, logger := range l.loggers {
- logger.Warn(format, v...)
- }
- }
+ l.Log(2, WARN, format, v...)
}
// Level get logger level
func (l *XORMLogBridge) Level() core.LogLevel {
- return l.level
+ switch l.GetGiteaLevel() {
+ case TRACE, DEBUG:
+ return core.LOG_DEBUG
+ case INFO:
+ return core.LOG_INFO
+ case WARN:
+ return core.LOG_WARNING
+ case ERROR, CRITICAL:
+ return core.LOG_ERR
+ }
+ return core.LOG_OFF
}
-// SetLevel set logger level
-func (l *XORMLogBridge) SetLevel(level core.LogLevel) {
- l.level = level
+// SetLevel set the logger level
+func (l *XORMLogBridge) SetLevel(lvl core.LogLevel) {
}
// ShowSQL set if record SQL
diff --git a/modules/mailer/mailer.go b/modules/mailer/mailer.go
index 6f4c9488d5..6120068d49 100644
--- a/modules/mailer/mailer.go
+++ b/modules/mailer/mailer.go
@@ -258,7 +258,7 @@ func processMailQueue() {
case msg := <-mailQueue:
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info)
if err := gomail.Send(Sender, msg.Message); err != nil {
- log.Error(3, "Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
+ log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
} else {
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info)
}
diff --git a/modules/markup/external/external.go b/modules/markup/external/external.go
index 5963a06d30..02cf61be16 100644
--- a/modules/markup/external/external.go
+++ b/modules/markup/external/external.go
@@ -67,7 +67,7 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
// write to temp file
f, err := ioutil.TempFile("", "gitea_input")
if err != nil {
- log.Error(4, "%s create temp file when rendering %s failed: %v", p.Name(), p.Command, err)
+ log.Error("%s create temp file when rendering %s failed: %v", p.Name(), p.Command, err)
return []byte("")
}
defer os.Remove(f.Name())
@@ -75,13 +75,13 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
_, err = io.Copy(f, rd)
if err != nil {
f.Close()
- log.Error(4, "%s write data to temp file when rendering %s failed: %v", p.Name(), p.Command, err)
+ log.Error("%s write data to temp file when rendering %s failed: %v", p.Name(), p.Command, err)
return []byte("")
}
err = f.Close()
if err != nil {
- log.Error(4, "%s close temp file when rendering %s failed: %v", p.Name(), p.Command, err)
+ log.Error("%s close temp file when rendering %s failed: %v", p.Name(), p.Command, err)
return []byte("")
}
args = append(args, f.Name())
@@ -98,7 +98,7 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
}
cmd.Stdout = buf
if err := cmd.Run(); err != nil {
- log.Error(4, "%s render run command %s %v failed: %v", p.Name(), commands[0], args, err)
+ log.Error("%s render run command %s %v failed: %v", p.Name(), commands[0], args, err)
return []byte("")
}
return buf.Bytes()
diff --git a/modules/markup/markup.go b/modules/markup/markup.go
index c506007723..0ea4099600 100644
--- a/modules/markup/markup.go
+++ b/modules/markup/markup.go
@@ -74,7 +74,7 @@ func render(parser Parser, rawBytes []byte, urlPrefix string, metas map[string]s
// TODO: one day the error should be returned.
result, err := PostProcess(result, urlPrefix, metas, isWiki)
if err != nil {
- log.Error(3, "PostProcess: %v", err)
+ log.Error("PostProcess: %v", err)
}
return SanitizeBytes(result)
}
diff --git a/modules/markup/orgmode/orgmode.go b/modules/markup/orgmode/orgmode.go
index 6b6d622d7b..f63155201e 100644
--- a/modules/markup/orgmode/orgmode.go
+++ b/modules/markup/orgmode/orgmode.go
@@ -35,7 +35,7 @@ func (Parser) Extensions() []string {
func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) {
defer func() {
if err := recover(); err != nil {
- log.Error(4, "Panic in orgmode.Render: %v Just returning the rawBytes", err)
+ log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err)
result = rawBytes
}
}()
diff --git a/modules/notification/indexer/indexer.go b/modules/notification/indexer/indexer.go
index 45752e411a..66614b2c20 100644
--- a/modules/notification/indexer/indexer.go
+++ b/modules/notification/indexer/indexer.go
@@ -29,7 +29,7 @@ func (r *indexerNotifier) NotifyCreateIssueComment(doer *models.User, repo *mode
if comment.Type == models.CommentTypeComment {
if issue.Comments == nil {
if err := issue.LoadDiscussComments(); err != nil {
- log.Error(4, "LoadComments failed: %v", err)
+ log.Error("LoadComments failed: %v", err)
return
}
} else {
@@ -63,7 +63,7 @@ func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comme
if !found {
if err := c.Issue.LoadDiscussComments(); err != nil {
- log.Error(4, "LoadComments failed: %v", err)
+ log.Error("LoadComments failed: %v", err)
return
}
}
@@ -87,7 +87,7 @@ func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models
if !found {
if err := comment.Issue.LoadDiscussComments(); err != nil {
- log.Error(4, "LoadComments failed: %v", err)
+ log.Error("LoadComments failed: %v", err)
return
}
}
diff --git a/modules/notification/mail/mail.go b/modules/notification/mail/mail.go
index 8bb4173449..11682bf724 100644
--- a/modules/notification/mail/mail.go
+++ b/modules/notification/mail/mail.go
@@ -37,25 +37,25 @@ func (m *mailNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.
}
if err := comment.MailParticipants(act, issue); err != nil {
- log.Error(4, "MailParticipants: %v", err)
+ log.Error("MailParticipants: %v", err)
}
}
func (m *mailNotifier) NotifyNewIssue(issue *models.Issue) {
if err := issue.MailParticipants(); err != nil {
- log.Error(4, "MailParticipants: %v", err)
+ log.Error("MailParticipants: %v", err)
}
}
func (m *mailNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, isClosed bool) {
if err := issue.MailParticipants(); err != nil {
- log.Error(4, "MailParticipants: %v", err)
+ log.Error("MailParticipants: %v", err)
}
}
func (m *mailNotifier) NotifyNewPullRequest(pr *models.PullRequest) {
if err := pr.Issue.MailParticipants(); err != nil {
- log.Error(4, "MailParticipants: %v", err)
+ log.Error("MailParticipants: %v", err)
}
}
@@ -69,6 +69,6 @@ func (m *mailNotifier) NotifyPullRequestReview(pr *models.PullRequest, r *models
act = models.ActionCommentIssue
}
if err := comment.MailParticipants(act, pr.Issue); err != nil {
- log.Error(4, "MailParticipants: %v", err)
+ log.Error("MailParticipants: %v", err)
}
}
diff --git a/modules/notification/ui/ui.go b/modules/notification/ui/ui.go
index 9870043580..a31591c2a7 100644
--- a/modules/notification/ui/ui.go
+++ b/modules/notification/ui/ui.go
@@ -39,7 +39,7 @@ func (ns *notificationService) Run() {
select {
case opts := <-ns.issueQueue:
if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil {
- log.Error(4, "Was unable to create issue notification: %v", err)
+ log.Error("Was unable to create issue notification: %v", err)
}
}
}
diff --git a/modules/pprof/pprof.go b/modules/pprof/pprof.go
index 7f4fc653f3..e02c2d0f2a 100644
--- a/modules/pprof/pprof.go
+++ b/modules/pprof/pprof.go
@@ -17,12 +17,12 @@ import (
func DumpMemProfileForUsername(pprofDataPath, username string) {
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("memprofile_%s_", username))
if err != nil {
- log.GitLogger.Fatal(4, "Could not create memory profile: %v", err)
+ log.GitLogger.Fatal("Could not create memory profile: %v", err)
}
defer f.Close()
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
- log.GitLogger.Fatal(4, "Could not write memory profile: %v", err)
+ log.GitLogger.Fatal("Could not write memory profile: %v", err)
}
}
@@ -31,7 +31,7 @@ func DumpMemProfileForUsername(pprofDataPath, username string) {
func DumpCPUProfileForUsername(pprofDataPath, username string) func() {
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("cpuprofile_%s_", username))
if err != nil {
- log.GitLogger.Fatal(4, "Could not create cpu profile: %v", err)
+ log.GitLogger.Fatal("Could not create cpu profile: %v", err)
}
pprof.StartCPUProfile(f)
diff --git a/modules/setting/cache.go b/modules/setting/cache.go
index cbe73cf4e3..babb62baea 100644
--- a/modules/setting/cache.go
+++ b/modules/setting/cache.go
@@ -35,7 +35,7 @@ func newCacheService() {
case "redis", "memcache":
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
default:
- log.Fatal(4, "Unknown cache adapter: %s", CacheService.Adapter)
+ log.Fatal("Unknown cache adapter: %s", CacheService.Adapter)
}
CacheService.TTL = sec.Key("ITEM_TTL").MustDuration(16 * time.Hour)
diff --git a/modules/setting/cron.go b/modules/setting/cron.go
index 48298b453c..c544c6c228 100644
--- a/modules/setting/cron.go
+++ b/modules/setting/cron.go
@@ -119,6 +119,6 @@ var (
func newCron() {
if err := Cfg.Section("cron").MapTo(&Cron); err != nil {
- log.Fatal(4, "Failed to map Cron settings: %v", err)
+ log.Fatal("Failed to map Cron settings: %v", err)
}
}
diff --git a/modules/setting/git.go b/modules/setting/git.go
index 5d9701a435..0ad16fc81e 100644
--- a/modules/setting/git.go
+++ b/modules/setting/git.go
@@ -56,13 +56,13 @@ var (
func newGit() {
if err := Cfg.Section("git").MapTo(&Git); err != nil {
- log.Fatal(4, "Failed to map Git settings: %v", err)
+ log.Fatal("Failed to map Git settings: %v", err)
}
git.DefaultCommandExecutionTimeout = time.Duration(Git.Timeout.Default) * time.Second
binVersion, err := git.BinVersion()
if err != nil {
- log.Fatal(4, "Error retrieving git version: %v", err)
+ log.Fatal("Error retrieving git version: %v", err)
}
if version.Compare(binVersion, "2.9", ">=") {
diff --git a/modules/setting/log.go b/modules/setting/log.go
index 7b0832c0c8..c6b5b1a617 100644
--- a/modules/setting/log.go
+++ b/modules/setting/log.go
@@ -5,182 +5,287 @@
package setting
import (
- "fmt"
+ "encoding/json"
+ golog "log"
"os"
"path"
"path/filepath"
+ "runtime"
"strings"
"code.gitea.io/gitea/modules/log"
- "github.com/go-xorm/core"
+
+ ini "gopkg.in/ini.v1"
)
-var logLevels = map[string]string{
- "Trace": "0",
- "Debug": "1",
- "Info": "2",
- "Warn": "3",
- "Error": "4",
- "Critical": "5",
+type defaultLogOptions struct {
+ levelName string // LogLevel
+ flags string
+ filename string //path.Join(LogRootPath, "gitea.log")
+ bufferLength int64
+ disableConsole bool
}
-func getLogLevel(section string, key string, defaultValue string) string {
- validLevels := []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}
- return Cfg.Section(section).Key(key).In(defaultValue, validLevels)
+func newDefaultLogOptions() defaultLogOptions {
+ return defaultLogOptions{
+ levelName: LogLevel,
+ flags: "stdflags",
+ filename: filepath.Join(LogRootPath, "gitea.log"),
+ bufferLength: 10000,
+ disableConsole: false,
+ }
}
-func newLogService() {
- log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
+// SubLogDescription describes a sublogger
+type SubLogDescription struct {
+ Name string
+ Provider string
+ Config string
+}
- LogModes = strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
- LogConfigs = make([]string, len(LogModes))
+// LogDescription describes a named logger
+type LogDescription struct {
+ Name string
+ SubLogDescriptions []SubLogDescription
+}
- useConsole := false
- for i := 0; i < len(LogModes); i++ {
- LogModes[i] = strings.TrimSpace(LogModes[i])
- if LogModes[i] == "console" {
- useConsole = true
+func getLogLevel(section *ini.Section, key string, defaultValue string) string {
+ value := section.Key(key).MustString("info")
+ return log.FromString(value).String()
+}
+
+func getStacktraceLogLevel(section *ini.Section, key string, defaultValue string) string {
+ value := section.Key(key).MustString("none")
+ return log.FromString(value).String()
+}
+
+func generateLogConfig(sec *ini.Section, name string, defaults defaultLogOptions) (mode, jsonConfig, levelName string) {
+ levelName = getLogLevel(sec, "LEVEL", LogLevel)
+ level := log.FromString(levelName)
+ stacktraceLevelName := getStacktraceLogLevel(sec, "STACKTRACE_LEVEL", StacktraceLogLevel)
+ stacktraceLevel := log.FromString(stacktraceLevelName)
+ mode = name
+ keys := sec.Keys()
+ logPath := defaults.filename
+ flags := log.FlagsFromString(defaults.flags)
+ expression := ""
+ prefix := ""
+ for _, key := range keys {
+ switch key.Name() {
+ case "MODE":
+ mode = key.MustString(name)
+ case "FILE_NAME":
+ logPath = key.MustString(defaults.filename)
+ forcePathSeparator(logPath)
+ if !filepath.IsAbs(logPath) {
+ logPath = path.Join(LogRootPath, logPath)
+ }
+ case "FLAGS":
+ flags = log.FlagsFromString(key.MustString(defaults.flags))
+ case "EXPRESSION":
+ expression = key.MustString("")
+ case "PREFIX":
+ prefix = key.MustString("")
}
}
- if !useConsole {
- log.DelLogger("console")
+ logConfig := map[string]interface{}{
+ "level": level.String(),
+ "expression": expression,
+ "prefix": prefix,
+ "flags": flags,
+ "stacktraceLevel": stacktraceLevel.String(),
}
- for i, mode := range LogModes {
- sec, err := Cfg.GetSection("log." + mode)
- if err != nil {
- sec, _ = Cfg.NewSection("log." + mode)
+ // Generate log configuration.
+ switch mode {
+ case "console":
+ useStderr := sec.Key("STDERR").MustBool(false)
+ logConfig["stderr"] = useStderr
+ if useStderr {
+ logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStderr)
+ } else {
+ logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStdout)
}
- // Log level.
- levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
- level, ok := logLevels[levelName]
- if !ok {
- log.Fatal(4, "Unknown log level: %s", levelName)
+ case "file":
+ if err := os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
+ panic(err.Error())
}
- // Generate log configuration.
- switch mode {
- case "console":
- LogConfigs[i] = fmt.Sprintf(`{"level":%s}`, level)
- case "file":
- logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "gitea.log"))
- if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
- panic(err.Error())
- }
+ logConfig["colorize"] = sec.Key("COLORIZE").MustBool(runtime.GOOS != "windows")
+ logConfig["filename"] = logPath
+ logConfig["rotate"] = sec.Key("LOG_ROTATE").MustBool(true)
+ logConfig["maxsize"] = 1 << uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28))
+ logConfig["daily"] = sec.Key("DAILY_ROTATE").MustBool(true)
+ logConfig["maxdays"] = sec.Key("MAX_DAYS").MustInt(7)
+ logConfig["compress"] = sec.Key("COMPRESS").MustBool(true)
+ logConfig["compressionLevel"] = sec.Key("COMPRESSION_LEVEL").MustInt(-1)
+ case "conn":
+ logConfig["reconnectOnMsg"] = sec.Key("RECONNECT_ON_MSG").MustBool()
+ logConfig["reconnect"] = sec.Key("RECONNECT").MustBool()
+ logConfig["net"] = sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"})
+ logConfig["addr"] = sec.Key("ADDR").MustString(":7020")
+ case "smtp":
+ logConfig["username"] = sec.Key("USER").MustString("example@example.com")
+ logConfig["password"] = sec.Key("PASSWD").MustString("******")
+ logConfig["host"] = sec.Key("HOST").MustString("127.0.0.1:25")
+ logConfig["sendTos"] = sec.Key("RECEIVERS").MustString("[]")
+ logConfig["subject"] = sec.Key("SUBJECT").MustString("Diagnostic message from Gitea")
+ }
- LogConfigs[i] = fmt.Sprintf(
- `{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
- logPath,
- sec.Key("LOG_ROTATE").MustBool(true),
- 1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
- sec.Key("DAILY_ROTATE").MustBool(true),
- sec.Key("MAX_DAYS").MustInt(7))
- case "conn":
- LogConfigs[i] = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
- sec.Key("RECONNECT_ON_MSG").MustBool(),
- sec.Key("RECONNECT").MustBool(),
- sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
- sec.Key("ADDR").MustString(":7020"))
- case "smtp":
- LogConfigs[i] = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":["%s"],"subject":"%s"}`, level,
- sec.Key("USER").MustString("example@example.com"),
- sec.Key("PASSWD").MustString("******"),
- sec.Key("HOST").MustString("127.0.0.1:25"),
- strings.Replace(sec.Key("RECEIVERS").MustString("example@example.com"), ",", "\",\"", -1),
- sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
- case "database":
- LogConfigs[i] = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
- sec.Key("DRIVER").String(),
- sec.Key("CONN").String())
- }
+ logConfig["colorize"] = sec.Key("COLORIZE").MustBool(false)
- log.NewLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, LogConfigs[i])
- log.Info("Log Mode: %s(%s)", strings.Title(mode), levelName)
+ byteConfig, err := json.Marshal(logConfig)
+ if err != nil {
+ log.Error("Failed to marshal log configuration: %v %v", logConfig, err)
+ return
}
+ jsonConfig = string(byteConfig)
+ return
}
-// NewXORMLogService initializes xorm logger service
-func NewXORMLogService(disableConsole bool) {
- logModes := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
- var logConfigs string
- for _, mode := range logModes {
- mode = strings.TrimSpace(mode)
+func generateNamedLogger(key string, options defaultLogOptions) *LogDescription {
+ description := LogDescription{
+ Name: key,
+ }
+
+ sections := strings.Split(Cfg.Section("log").Key(strings.ToUpper(key)).MustString(""), ",")
- if disableConsole && mode == "console" {
+ //description.Configs = make([]string, len(description.Sections))
+
+ for i := 0; i < len(sections); i++ {
+ sections[i] = strings.TrimSpace(sections[i])
+ }
+
+ for _, name := range sections {
+ if len(name) == 0 || (name == "console" && options.disableConsole) {
continue
}
-
- sec, err := Cfg.GetSection("log." + mode)
+ sec, err := Cfg.GetSection("log." + name + "." + key)
if err != nil {
- sec, _ = Cfg.NewSection("log." + mode)
+ sec, _ = Cfg.NewSection("log." + name + "." + key)
}
- // Log level.
- levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
- level, ok := logLevels[levelName]
- if !ok {
- log.Fatal(4, "Unknown log level: %s", levelName)
- }
+ provider, config, levelName := generateLogConfig(sec, name, options)
- // Generate log configuration.
- switch mode {
- case "console":
- logConfigs = fmt.Sprintf(`{"level":%s}`, level)
- case "file":
- logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "xorm.log"))
- if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
- panic(err.Error())
- }
- logPath = path.Join(filepath.Dir(logPath), "xorm.log")
-
- logConfigs = fmt.Sprintf(
- `{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
- logPath,
- sec.Key("LOG_ROTATE").MustBool(true),
- 1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
- sec.Key("DAILY_ROTATE").MustBool(true),
- sec.Key("MAX_DAYS").MustInt(7))
- case "conn":
- logConfigs = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
- sec.Key("RECONNECT_ON_MSG").MustBool(),
- sec.Key("RECONNECT").MustBool(),
- sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
- sec.Key("ADDR").MustString(":7020"))
- case "smtp":
- logConfigs = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":"%s","subject":"%s"}`, level,
- sec.Key("USER").MustString("example@example.com"),
- sec.Key("PASSWD").MustString("******"),
- sec.Key("HOST").MustString("127.0.0.1:25"),
- sec.Key("RECEIVERS").MustString("[]"),
- sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
- case "database":
- logConfigs = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
- sec.Key("DRIVER").String(),
- sec.Key("CONN").String())
+ log.NewNamedLogger(key, options.bufferLength, name, provider, config)
+
+ description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{
+ Name: name,
+ Provider: provider,
+ Config: config,
+ })
+ log.Info("%s Log: %s(%s:%s)", strings.Title(key), strings.Title(name), provider, levelName)
+ }
+
+ LogDescriptions[key] = &description
+
+ return &description
+}
+
+func newMacaronLogService() {
+ options := newDefaultLogOptions()
+ options.filename = filepath.Join(LogRootPath, "macaron.log")
+ options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
+
+ Cfg.Section("log").Key("MACARON").MustString("file")
+ if RedirectMacaronLog {
+ generateNamedLogger("macaron", options)
+ }
+}
+
+func newAccessLogService() {
+ EnableAccessLog = Cfg.Section("log").Key("ENABLE_ACCESS_LOG").MustBool(false)
+ AccessLogTemplate = Cfg.Section("log").Key("ACCESS_LOG_TEMPLATE").MustString(
+ `{{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"`)
+ Cfg.Section("log").Key("ACCESS").MustString("file")
+ if EnableAccessLog {
+ options := newDefaultLogOptions()
+ options.filename = filepath.Join(LogRootPath, "access.log")
+ options.flags = "" // For the router we don't want any prefixed flags
+ options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
+ generateNamedLogger("access", options)
+ }
+}
+
+func newRouterLogService() {
+ Cfg.Section("log").Key("ROUTER").MustString("console")
+
+ if !DisableRouterLog && RedirectMacaronLog {
+ options := newDefaultLogOptions()
+ options.filename = filepath.Join(LogRootPath, "router.log")
+ options.flags = "date,time" // For the router we don't want any prefixed flags
+ options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
+ generateNamedLogger("router", options)
+ }
+}
+
+func newLogService() {
+ log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
+
+ options := newDefaultLogOptions()
+ options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
+
+ description := LogDescription{
+ Name: log.DEFAULT,
+ }
+
+ sections := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
+
+ useConsole := false
+ for i := 0; i < len(sections); i++ {
+ sections[i] = strings.TrimSpace(sections[i])
+ if sections[i] == "console" {
+ useConsole = true
}
+ }
- log.NewXORMLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, logConfigs)
- if !disableConsole {
- log.Info("XORM Log Mode: %s(%s)", strings.Title(mode), levelName)
+ if !useConsole {
+ log.DelLogger("console")
+ }
+
+ for _, name := range sections {
+ if len(name) == 0 {
+ continue
}
- var lvl core.LogLevel
- switch levelName {
- case "Trace", "Debug":
- lvl = core.LOG_DEBUG
- case "Info":
- lvl = core.LOG_INFO
- case "Warn":
- lvl = core.LOG_WARNING
- case "Error", "Critical":
- lvl = core.LOG_ERR
+ sec, err := Cfg.GetSection("log." + name)
+ if err != nil {
+ sec, _ = Cfg.NewSection("log." + name)
}
- log.XORMLogger.SetLevel(lvl)
+
+ provider, config, levelName := generateLogConfig(sec, name, options)
+ log.NewLogger(options.bufferLength, name, provider, config)
+ description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{
+ Name: name,
+ Provider: provider,
+ Config: config,
+ })
+ log.Info("Gitea Log Mode: %s(%s:%s)", strings.Title(name), strings.Title(provider), levelName)
}
- if len(logConfigs) == 0 {
- log.DiscardXORMLogger()
+ LogDescriptions[log.DEFAULT] = &description
+
+ // Finally redirect the default golog to here
+ golog.SetFlags(0)
+ golog.SetPrefix("")
+ golog.SetOutput(log.NewLoggerAsWriter("INFO", log.GetLogger(log.DEFAULT)))
+}
+
+// NewXORMLogService initializes xorm logger service
+func NewXORMLogService(disableConsole bool) {
+ EnableXORMLog = Cfg.Section("log").Key("ENABLE_XORM_LOG").MustBool(true)
+ if EnableXORMLog {
+ options := newDefaultLogOptions()
+ options.filename = filepath.Join(LogRootPath, "xorm.log")
+ options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
+ options.disableConsole = disableConsole
+
+ Cfg.Section("log").Key("XORM").MustString(",")
+ generateNamedLogger("xorm", options)
+ log.InitXORMLogger(LogSQL)
+ } else {
+ log.InitXORMLogger(false)
}
}
diff --git a/modules/setting/mailer.go b/modules/setting/mailer.go
index 27c3d3a9f2..e627aef017 100644
--- a/modules/setting/mailer.go
+++ b/modules/setting/mailer.go
@@ -84,7 +84,7 @@ func newMailService() {
parsed, err := mail.ParseAddress(MailService.From)
if err != nil {
- log.Fatal(4, "Invalid mailer.FROM (%s): %v", MailService.From, err)
+ log.Fatal("Invalid mailer.FROM (%s): %v", MailService.From, err)
}
MailService.FromName = parsed.Name
MailService.FromEmail = parsed.Address
@@ -96,7 +96,7 @@ func newMailService() {
if MailService.MailerType == "sendmail" {
MailService.SendmailArgs, err = shellquote.Split(sec.Key("SENDMAIL_ARGS").String())
if err != nil {
- log.Error(4, "Failed to parse Sendmail args: %v", CustomConf, err)
+ log.Error("Failed to parse Sendmail args: %s with error %v", CustomConf, err)
}
}
diff --git a/modules/setting/repository.go b/modules/setting/repository.go
index 5880071286..f47661efdf 100644
--- a/modules/setting/repository.go
+++ b/modules/setting/repository.go
@@ -132,7 +132,7 @@ var (
func newRepository() {
homeDir, err := com.HomeDir()
if err != nil {
- log.Fatal(4, "Failed to get home directory: %v", err)
+ log.Fatal("Failed to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
@@ -151,15 +151,15 @@ func newRepository() {
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
- log.Fatal(4, "Failed to map Repository settings: %v", err)
+ log.Fatal("Failed to map Repository settings: %v", err)
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
- log.Fatal(4, "Failed to map Repository.Editor settings: %v", err)
+ log.Fatal("Failed to map Repository.Editor settings: %v", err)
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
- log.Fatal(4, "Failed to map Repository.Upload settings: %v", err)
+ log.Fatal("Failed to map Repository.Upload settings: %v", err)
} else if err = Cfg.Section("repository.local").MapTo(&Repository.Local); err != nil {
- log.Fatal(4, "Failed to map Repository.Local settings: %v", err)
+ log.Fatal("Failed to map Repository.Local settings: %v", err)
} else if err = Cfg.Section("repository.pull-request").MapTo(&Repository.PullRequest); err != nil {
- log.Fatal(4, "Failed to map Repository.PullRequest settings: %v", err)
+ log.Fatal("Failed to map Repository.PullRequest settings: %v", err)
}
if !filepath.IsAbs(Repository.Upload.TempPath) {
diff --git a/modules/setting/setting.go b/modules/setting/setting.go
index 95fca20b11..b6c5740024 100644
--- a/modules/setting/setting.go
+++ b/modules/setting/setting.go
@@ -7,6 +7,7 @@ package setting
import (
"encoding/base64"
+ "fmt"
"io"
"io/ioutil"
"net"
@@ -15,7 +16,6 @@ import (
"os/exec"
"path"
"path/filepath"
- "runtime"
"strconv"
"strings"
"time"
@@ -90,7 +90,6 @@ var (
RedirectOtherPort bool
PortToRedirect string
OfflineMode bool
- DisableRouterLog bool
CertFile string
KeyFile string
StaticRootPath string
@@ -259,10 +258,16 @@ var (
// Log settings
LogLevel string
+ StacktraceLogLevel string
LogRootPath string
- LogModes []string
- LogConfigs []string
+ LogDescriptions = make(map[string]*LogDescription)
RedirectMacaronLog bool
+ DisableRouterLog bool
+ RouterLogLevel log.Level
+ RouterLogMode string
+ EnableAccessLog bool
+ AccessLogTemplate string
+ EnableXORMLog bool
// Attachment settings
AttachmentPath string
@@ -398,19 +403,19 @@ func getWorkPath(appPath string) string {
}
func init() {
- IsWindows = runtime.GOOS == "windows"
- log.NewLogger(0, "console", `{"level": 0}`)
+ // We can rely on log.CanColorStdout being set properly because modules/log/console_windows.go comes before modules/setting/setting.go lexicographically
+ log.NewLogger(0, "console", "console", fmt.Sprintf(`{"level": "trace", "colorize": %t, "stacktraceLevel": "none"}`, log.CanColorStdout))
var err error
if AppPath, err = getAppPath(); err != nil {
- log.Fatal(4, "Failed to get app path: %v", err)
+ log.Fatal("Failed to get app path: %v", err)
}
AppWorkPath = getWorkPath(AppPath)
}
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
- log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
+ log.Fatal("Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
@@ -430,16 +435,16 @@ func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
func createPIDFile(pidPath string) {
currentPid := os.Getpid()
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
- log.Fatal(4, "Failed to create PID folder: %v", err)
+ log.Fatal("Failed to create PID folder: %v", err)
}
file, err := os.Create(pidPath)
if err != nil {
- log.Fatal(4, "Failed to create PID file: %v", err)
+ log.Fatal("Failed to create PID file: %v", err)
}
defer file.Close()
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
- log.Fatal(4, "Failed to write PID information: %v", err)
+ log.Fatal("Failed to write PID information: %v", err)
}
}
@@ -451,12 +456,12 @@ func CheckLFSVersion() {
binVersion, err := git.BinVersion()
if err != nil {
- log.Fatal(4, "Error retrieving git version: %v", err)
+ log.Fatal("Error retrieving git version: %v", err)
}
if !version.Compare(binVersion, "2.1.2", ">=") {
LFS.StartServer = false
- log.Error(4, "LFS server support needs at least Git v2.1.2")
+ log.Error("LFS server support needs at least Git v2.1.2")
} else {
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
@@ -488,7 +493,7 @@ func NewContext() {
if com.IsFile(CustomConf) {
if err := Cfg.Append(CustomConf); err != nil {
- log.Fatal(4, "Failed to load custom conf '%s': %v", CustomConf, err)
+ log.Fatal("Failed to load custom conf '%s': %v", CustomConf, err)
}
} else {
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
@@ -497,14 +502,16 @@ func NewContext() {
homeDir, err := com.HomeDir()
if err != nil {
- log.Fatal(4, "Failed to get home directory: %v", err)
+ log.Fatal("Failed to get home directory: %v", err)
}
homeDir = strings.Replace(homeDir, "\\", "/", -1)
- LogLevel = getLogLevel("log", "LEVEL", "Info")
+ LogLevel = getLogLevel(Cfg.Section("log"), "LEVEL", "Info")
+ StacktraceLogLevel = getStacktraceLogLevel(Cfg.Section("log"), "STACKTRACE_LEVEL", "None")
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
forcePathSeparator(LogRootPath)
RedirectMacaronLog = Cfg.Section("log").Key("REDIRECT_MACARON_LOG").MustBool(false)
+ RouterLogLevel = log.FromString(Cfg.Section("log").Key("ROUTER_LOG_LEVEL").MustString("Info"))
sec := Cfg.Section("server")
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
@@ -521,7 +528,7 @@ func NewContext() {
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
if err != nil || UnixSocketPermissionParsed > 0777 {
- log.Fatal(4, "Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
+ log.Fatal("Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
}
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
}
@@ -547,7 +554,7 @@ func NewContext() {
// Check if has app suburl.
url, err := url.Parse(AppURL)
if err != nil {
- log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppURL, err)
+ log.Fatal("Invalid ROOT_URL '%s': %s", AppURL, err)
}
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
// This value is empty if site does not have sub-url.
@@ -616,7 +623,7 @@ func NewContext() {
}
SSH.KeyTestPath = os.TempDir()
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
- log.Fatal(4, "Failed to map SSH settings: %v", err)
+ log.Fatal("Failed to map SSH settings: %v", err)
}
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
@@ -630,9 +637,9 @@ func NewContext() {
if !SSH.Disabled && !SSH.StartBuiltinServer {
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
- log.Fatal(4, "Failed to create '%s': %v", SSH.RootPath, err)
+ log.Fatal("Failed to create '%s': %v", SSH.RootPath, err)
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
- log.Fatal(4, "Failed to create '%s': %v", SSH.KeyTestPath, err)
+ log.Fatal("Failed to create '%s': %v", SSH.KeyTestPath, err)
}
}
@@ -650,7 +657,7 @@ func NewContext() {
sec = Cfg.Section("server")
if err = sec.MapTo(&LFS); err != nil {
- log.Fatal(4, "Failed to map LFS settings: %v", err)
+ log.Fatal("Failed to map LFS settings: %v", err)
}
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
if !filepath.IsAbs(LFS.ContentPath) {
@@ -661,7 +668,7 @@ func NewContext() {
if LFS.StartServer {
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
- log.Fatal(4, "Failed to create '%s': %v", LFS.ContentPath, err)
+ log.Fatal("Failed to create '%s': %v", LFS.ContentPath, err)
}
LFS.JWTSecretBytes = make([]byte, 32)
@@ -670,7 +677,7 @@ func NewContext() {
if err != nil || n != 32 {
LFS.JWTSecretBase64, err = generate.NewJwtSecret()
if err != nil {
- log.Fatal(4, "Error generating JWT Secret for custom config: %v", err)
+ log.Fatal("Error generating JWT Secret for custom config: %v", err)
return
}
@@ -679,24 +686,24 @@ func NewContext() {
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfg.Append(CustomConf); err != nil {
- log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
+ log.Error("Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
- log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
+ log.Fatal("Failed to create '%s': %v", CustomConf, err)
}
if err := cfg.SaveTo(CustomConf); err != nil {
- log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
+ log.Fatal("Error saving generated JWT Secret to custom config: %v", err)
return
}
}
}
if err = Cfg.Section("oauth2").MapTo(&OAuth2); err != nil {
- log.Fatal(4, "Failed to OAuth2 settings: %v", err)
+ log.Fatal("Failed to OAuth2 settings: %v", err)
return
}
@@ -707,24 +714,24 @@ func NewContext() {
if err != nil || n != 32 {
OAuth2.JWTSecretBase64, err = generate.NewJwtSecret()
if err != nil {
- log.Fatal(4, "error generating JWT secret: %v", err)
+ log.Fatal("error generating JWT secret: %v", err)
return
}
cfg := ini.Empty()
if com.IsFile(CustomConf) {
if err := cfg.Append(CustomConf); err != nil {
- log.Error(4, "failed to load custom conf %s: %v", CustomConf, err)
+ log.Error("failed to load custom conf %s: %v", CustomConf, err)
return
}
}
cfg.Section("oauth2").Key("JWT_SECRET").SetValue(OAuth2.JWTSecretBase64)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
- log.Fatal(4, "failed to create '%s': %v", CustomConf, err)
+ log.Fatal("failed to create '%s': %v", CustomConf, err)
return
}
if err := cfg.SaveTo(CustomConf); err != nil {
- log.Fatal(4, "error saving generating JWT secret to custom config: %v", err)
+ log.Fatal("error saving generating JWT secret to custom config: %v", err)
return
}
}
@@ -780,7 +787,7 @@ func NewContext() {
TimeFormat = TimeFormatKey
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
- log.Fatal(4, "Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
+ log.Fatal("Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
}
log.Trace("Custom TimeFormat: %s", TimeFormat)
}
@@ -790,7 +797,7 @@ func NewContext() {
if InstallLock {
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
if !match {
- log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
+ log.Fatal("Expect user '%s' but current user is: %s", RunUser, currentUser)
}
}
@@ -828,7 +835,7 @@ func NewContext() {
if EnableFederatedAvatar || !DisableGravatar {
GravatarSourceURL, err = url.Parse(GravatarSource)
if err != nil {
- log.Fatal(4, "Failed to parse Gravatar URL(%s): %v",
+ log.Fatal("Failed to parse Gravatar URL(%s): %v",
GravatarSource, err)
}
}
@@ -845,15 +852,15 @@ func NewContext() {
}
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
- log.Fatal(4, "Failed to map UI settings: %v", err)
+ log.Fatal("Failed to map UI settings: %v", err)
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
- log.Fatal(4, "Failed to map Markdown settings: %v", err)
+ log.Fatal("Failed to map Markdown settings: %v", err)
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
- log.Fatal(4, "Fail to map Admin settings: %v", err)
+ log.Fatal("Fail to map Admin settings: %v", err)
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
- log.Fatal(4, "Failed to map API settings: %v", err)
+ log.Fatal("Failed to map API settings: %v", err)
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil {
- log.Fatal(4, "Failed to map Metrics settings: %v", err)
+ log.Fatal("Failed to map Metrics settings: %v", err)
}
newCron()
@@ -909,35 +916,35 @@ func loadInternalToken(sec *ini.Section) string {
}
tempURI, err := url.Parse(uri)
if err != nil {
- log.Fatal(4, "Failed to parse INTERNAL_TOKEN_URI (%s): %v", uri, err)
+ log.Fatal("Failed to parse INTERNAL_TOKEN_URI (%s): %v", uri, err)
}
switch tempURI.Scheme {
case "file":
fp, err := os.OpenFile(tempURI.RequestURI(), os.O_RDWR, 0600)
if err != nil {
- log.Fatal(4, "Failed to open InternalTokenURI (%s): %v", uri, err)
+ log.Fatal("Failed to open InternalTokenURI (%s): %v", uri, err)
}
defer fp.Close()
buf, err := ioutil.ReadAll(fp)
if err != nil {
- log.Fatal(4, "Failed to read InternalTokenURI (%s): %v", uri, err)
+ log.Fatal("Failed to read InternalTokenURI (%s): %v", uri, err)
}
// No token in the file, generate one and store it.
if len(buf) == 0 {
token, err := generate.NewInternalToken()
if err != nil {
- log.Fatal(4, "Error generate internal token: %v", err)
+ log.Fatal("Error generate internal token: %v", err)
}
if _, err := io.WriteString(fp, token); err != nil {
- log.Fatal(4, "Error writing to InternalTokenURI (%s): %v", uri, err)
+ log.Fatal("Error writing to InternalTokenURI (%s): %v", uri, err)
}
return token
}
return string(buf)
default:
- log.Fatal(4, "Unsupported URI-Scheme %q (INTERNAL_TOKEN_URI = %q)", tempURI.Scheme, uri)
+ log.Fatal("Unsupported URI-Scheme %q (INTERNAL_TOKEN_URI = %q)", tempURI.Scheme, uri)
}
return ""
}
@@ -948,7 +955,7 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
if len(token) == 0 {
token, err = generate.NewInternalToken()
if err != nil {
- log.Fatal(4, "Error generate internal token: %v", err)
+ log.Fatal("Error generate internal token: %v", err)
}
// Save secret
@@ -956,17 +963,17 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
if com.IsFile(CustomConf) {
// Keeps custom settings if there is already something.
if err := cfgSave.Append(CustomConf); err != nil {
- log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
+ log.Error("Failed to load custom conf '%s': %v", CustomConf, err)
}
}
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(token)
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
- log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
+ log.Fatal("Failed to create '%s': %v", CustomConf, err)
}
if err := cfgSave.SaveTo(CustomConf); err != nil {
- log.Fatal(4, "Error saving generated INTERNAL_TOKEN to custom config: %v", err)
+ log.Fatal("Error saving generated INTERNAL_TOKEN to custom config: %v", err)
}
}
return token
@@ -976,6 +983,9 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
func NewServices() {
newService()
newLogService()
+ newMacaronLogService()
+ newAccessLogService()
+ newRouterLogService()
NewXORMLogService(false)
newCacheService()
newSessionService()
diff --git a/modules/ssh/ssh.go b/modules/ssh/ssh.go
index d9606488b2..f42c576cdb 100644
--- a/modules/ssh/ssh.go
+++ b/modules/ssh/ssh.go
@@ -43,7 +43,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
ch, reqs, err := newChan.Accept()
if err != nil {
- log.Error(3, "Error accepting channel: %v", err)
+ log.Error("Error accepting channel: %v", err)
continue
}
@@ -61,7 +61,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
args[0] = strings.TrimLeft(args[0], "\x04")
_, _, err := com.ExecCmdBytes("env", args[0]+"="+args[1])
if err != nil {
- log.Error(3, "env: %v", err)
+ log.Error("env: %v", err)
return
}
case "exec":
@@ -79,23 +79,23 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
stdout, err := cmd.StdoutPipe()
if err != nil {
- log.Error(3, "SSH: StdoutPipe: %v", err)
+ log.Error("SSH: StdoutPipe: %v", err)
return
}
stderr, err := cmd.StderrPipe()
if err != nil {
- log.Error(3, "SSH: StderrPipe: %v", err)
+ log.Error("SSH: StderrPipe: %v", err)
return
}
input, err := cmd.StdinPipe()
if err != nil {
- log.Error(3, "SSH: StdinPipe: %v", err)
+ log.Error("SSH: StdinPipe: %v", err)
return
}
// FIXME: check timeout
if err = cmd.Start(); err != nil {
- log.Error(3, "SSH: Start: %v", err)
+ log.Error("SSH: Start: %v", err)
return
}
@@ -105,7 +105,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
io.Copy(ch.Stderr(), stderr)
if err = cmd.Wait(); err != nil {
- log.Error(3, "SSH: Wait: %v", err)
+ log.Error("SSH: Wait: %v", err)
return
}
@@ -121,13 +121,13 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
func listen(config *ssh.ServerConfig, host string, port int) {
listener, err := net.Listen("tcp", host+":"+com.ToStr(port))
if err != nil {
- log.Fatal(4, "Failed to start SSH server: %v", err)
+ log.Fatal("Failed to start SSH server: %v", err)
}
for {
// Once a ServerConfig has been configured, connections can be accepted.
conn, err := listener.Accept()
if err != nil {
- log.Error(3, "SSH: Error accepting incoming connection: %v", err)
+ log.Error("SSH: Error accepting incoming connection: %v", err)
continue
}
@@ -142,7 +142,7 @@ func listen(config *ssh.ServerConfig, host string, port int) {
if err == io.EOF {
log.Warn("SSH: Handshaking with %s was terminated: %v", conn.RemoteAddr(), err)
} else {
- log.Error(3, "SSH: Error on handshaking with %s: %v", conn.RemoteAddr(), err)
+ log.Error("SSH: Error on handshaking with %s: %v", conn.RemoteAddr(), err)
}
return
}
@@ -166,7 +166,7 @@ func Listen(host string, port int, ciphers []string, keyExchanges []string, macs
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
pkey, err := models.SearchPublicKeyByContent(strings.TrimSpace(string(ssh.MarshalAuthorizedKey(key))))
if err != nil {
- log.Error(3, "SearchPublicKeyByContent: %v", err)
+ log.Error("SearchPublicKeyByContent: %v", err)
return nil, err
}
return &ssh.Permissions{Extensions: map[string]string{"key-id": com.ToStr(pkey.ID)}}, nil
@@ -178,23 +178,23 @@ func Listen(host string, port int, ciphers []string, keyExchanges []string, macs
filePath := filepath.Dir(keyPath)
if err := os.MkdirAll(filePath, os.ModePerm); err != nil {
- log.Error(4, "Failed to create dir %s: %v", filePath, err)
+ log.Error("Failed to create dir %s: %v", filePath, err)
}
err := GenKeyPair(keyPath)
if err != nil {
- log.Fatal(4, "Failed to generate private key: %v", err)
+ log.Fatal("Failed to generate private key: %v", err)
}
log.Trace("SSH: New private key is generateed: %s", keyPath)
}
privateBytes, err := ioutil.ReadFile(keyPath)
if err != nil {
- log.Fatal(4, "SSH: Failed to load private key")
+ log.Fatal("SSH: Failed to load private key")
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
- log.Fatal(4, "SSH: Failed to parse private key")
+ log.Fatal("SSH: Failed to parse private key")
}
config.AddHostKey(private)
diff --git a/modules/templates/helper.go b/modules/templates/helper.go
index ce077d1a92..d3eb8c48b8 100644
--- a/modules/templates/helper.go
+++ b/modules/templates/helper.go
@@ -351,7 +351,7 @@ func RenderCommitMessageLink(msg, urlPrefix, urlDefault string, metas map[string
// shouldn't be any special HTML.
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, urlDefault, metas)
if err != nil {
- log.Error(3, "RenderCommitMessage: %v", err)
+ log.Error("RenderCommitMessage: %v", err)
return ""
}
msgLines := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
@@ -366,7 +366,7 @@ func RenderCommitBody(msg, urlPrefix string, metas map[string]string) template.H
cleanMsg := template.HTMLEscapeString(msg)
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, "", metas)
if err != nil {
- log.Error(3, "RenderCommitMessage: %v", err)
+ log.Error("RenderCommitMessage: %v", err)
return ""
}
body := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
@@ -425,7 +425,7 @@ func ActionIcon(opType models.ActionType) string {
func ActionContent2Commits(act Actioner) *models.PushCommits {
push := models.NewPushCommits()
if err := json.Unmarshal([]byte(act.GetContent()), push); err != nil {
- log.Error(4, "json.Unmarshal:\n%s\nERROR: %v", act.GetContent(), err)
+ log.Error("json.Unmarshal:\n%s\nERROR: %v", act.GetContent(), err)
}
return push
}
diff --git a/modules/util/url.go b/modules/util/url.go
index 381e8b935b..537e4c9b52 100644
--- a/modules/util/url.go
+++ b/modules/util/url.go
@@ -30,13 +30,13 @@ func URLJoin(base string, elems ...string) string {
}
baseURL, err := url.Parse(base)
if err != nil {
- log.Error(4, "URLJoin: Invalid base URL %s", base)
+ log.Error("URLJoin: Invalid base URL %s", base)
return ""
}
joinedPath := path.Join(elems...)
argURL, err := url.Parse(joinedPath)
if err != nil {
- log.Error(4, "URLJoin: Invalid arg %s", joinedPath)
+ log.Error("URLJoin: Invalid arg %s", joinedPath)
return ""
}
joinedURL := baseURL.ResolveReference(argURL).String()