summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/mgechev/revive/lint
diff options
context:
space:
mode:
authorLunny Xiao <xiaolunwen@gmail.com>2020-04-04 03:29:12 +0800
committerGitHub <noreply@github.com>2020-04-03 22:29:12 +0300
commit4f63f283c47dcf9e705ce5b8e8857f2b42cff8ad (patch)
treedd5dc2cae6ebae21826ffcce937533559be45a07 /vendor/github.com/mgechev/revive/lint
parent4af7c47b38d382d105726f9553a1a68d46882cbf (diff)
downloadgitea-4f63f283c47dcf9e705ce5b8e8857f2b42cff8ad.tar.gz
gitea-4f63f283c47dcf9e705ce5b8e8857f2b42cff8ad.zip
Rename scripts to build and add revive command as a new build tool command (#10942)
Co-authored-by: techknowlogick <techknowlogick@gitea.io>
Diffstat (limited to 'vendor/github.com/mgechev/revive/lint')
-rw-r--r--vendor/github.com/mgechev/revive/lint/config.go32
-rw-r--r--vendor/github.com/mgechev/revive/lint/failure.go39
-rw-r--r--vendor/github.com/mgechev/revive/lint/file.go278
-rw-r--r--vendor/github.com/mgechev/revive/lint/formatter.go14
-rw-r--r--vendor/github.com/mgechev/revive/lint/linter.go99
-rw-r--r--vendor/github.com/mgechev/revive/lint/package.go178
-rw-r--r--vendor/github.com/mgechev/revive/lint/rule.go31
-rw-r--r--vendor/github.com/mgechev/revive/lint/utils.go128
8 files changed, 799 insertions, 0 deletions
diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go
new file mode 100644
index 0000000000..fe65ace522
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/config.go
@@ -0,0 +1,32 @@
+package lint
+
+// Arguments is type used for the arguments of a rule.
+type Arguments = []interface{}
+
+// RuleConfig is type used for the rule configuration.
+type RuleConfig struct {
+ Arguments Arguments
+ Severity Severity
+}
+
+// RulesConfig defines the config for all rules.
+type RulesConfig = map[string]RuleConfig
+
+// DirectiveConfig is type used for the linter directive configuration.
+type DirectiveConfig struct {
+ Severity Severity
+}
+
+// DirectivesConfig defines the config for all directives.
+type DirectivesConfig = map[string]DirectiveConfig
+
+// Config defines the config of the linter.
+type Config struct {
+ IgnoreGeneratedHeader bool `toml:"ignoreGeneratedHeader"`
+ Confidence float64
+ Severity Severity
+ Rules RulesConfig `toml:"rule"`
+ ErrorCode int `toml:"errorCode"`
+ WarningCode int `toml:"warningCode"`
+ Directives DirectivesConfig `toml:"directive"`
+}
diff --git a/vendor/github.com/mgechev/revive/lint/failure.go b/vendor/github.com/mgechev/revive/lint/failure.go
new file mode 100644
index 0000000000..479b0cb48b
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/failure.go
@@ -0,0 +1,39 @@
+package lint
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+const (
+ // SeverityWarning declares failures of type warning
+ SeverityWarning = "warning"
+ // SeverityError declares failures of type error.
+ SeverityError = "error"
+)
+
+// Severity is the type for the failure types.
+type Severity string
+
+// FailurePosition returns the failure position
+type FailurePosition struct {
+ Start token.Position
+ End token.Position
+}
+
+// Failure defines a struct for a linting failure.
+type Failure struct {
+ Failure string
+ RuleName string
+ Category string
+ Position FailurePosition
+ Node ast.Node `json:"-"`
+ Confidence float64
+ // For future use
+ ReplacementLine string
+}
+
+// GetFilename returns the filename.
+func (f *Failure) GetFilename() string {
+ return f.Position.Start.Filename
+}
diff --git a/vendor/github.com/mgechev/revive/lint/file.go b/vendor/github.com/mgechev/revive/lint/file.go
new file mode 100644
index 0000000000..8bef9c220c
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/file.go
@@ -0,0 +1,278 @@
+package lint
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "math"
+ "regexp"
+ "strings"
+)
+
+// File abstraction used for representing files.
+type File struct {
+ Name string
+ Pkg *Package
+ content []byte
+ AST *ast.File
+}
+
+// IsTest returns if the file contains tests.
+func (f *File) IsTest() bool { return strings.HasSuffix(f.Name, "_test.go") }
+
+// Content returns the file's content.
+func (f *File) Content() []byte {
+ return f.content
+}
+
+// NewFile creates a new file
+func NewFile(name string, content []byte, pkg *Package) (*File, error) {
+ f, err := parser.ParseFile(pkg.fset, name, content, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ return &File{
+ Name: name,
+ content: content,
+ Pkg: pkg,
+ AST: f,
+ }, nil
+}
+
+// ToPosition returns line and column for given position.
+func (f *File) ToPosition(pos token.Pos) token.Position {
+ return f.Pkg.fset.Position(pos)
+}
+
+// Render renters a node.
+func (f *File) Render(x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, f.Pkg.fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+// CommentMap builds a comment map for the file.
+func (f *File) CommentMap() ast.CommentMap {
+ return ast.NewCommentMap(f.Pkg.fset, f.AST, f.AST.Comments)
+}
+
+var basicTypeKinds = map[types.BasicKind]string{
+ types.UntypedBool: "bool",
+ types.UntypedInt: "int",
+ types.UntypedRune: "rune",
+ types.UntypedFloat: "float64",
+ types.UntypedComplex: "complex128",
+ types.UntypedString: "string",
+}
+
+// IsUntypedConst reports whether expr is an untyped constant,
+// and indicates what its default type is.
+// scope may be nil.
+func (f *File) IsUntypedConst(expr ast.Expr) (defType string, ok bool) {
+ // Re-evaluate expr outside of its context to see if it's untyped.
+ // (An expr evaluated within, for example, an assignment context will get the type of the LHS.)
+ exprStr := f.Render(expr)
+ tv, err := types.Eval(f.Pkg.fset, f.Pkg.TypesPkg, expr.Pos(), exprStr)
+ if err != nil {
+ return "", false
+ }
+ if b, ok := tv.Type.(*types.Basic); ok {
+ if dt, ok := basicTypeKinds[b.Kind()]; ok {
+ return dt, true
+ }
+ }
+
+ return "", false
+}
+
+func (f *File) isMain() bool {
+ if f.AST.Name.Name == "main" {
+ return true
+ }
+ return false
+}
+
+const directiveSpecifyDisableReason = "specify-disable-reason"
+
+func (f *File) lint(rules []Rule, config Config, failures chan Failure) {
+ rulesConfig := config.Rules
+ _, mustSpecifyDisableReason := config.Directives[directiveSpecifyDisableReason]
+ disabledIntervals := f.disabledIntervals(rules, mustSpecifyDisableReason, failures)
+ for _, currentRule := range rules {
+ ruleConfig := rulesConfig[currentRule.Name()]
+ currentFailures := currentRule.Apply(f, ruleConfig.Arguments)
+ for idx, failure := range currentFailures {
+ if failure.RuleName == "" {
+ failure.RuleName = currentRule.Name()
+ }
+ if failure.Node != nil {
+ failure.Position = ToFailurePosition(failure.Node.Pos(), failure.Node.End(), f)
+ }
+ currentFailures[idx] = failure
+ }
+ currentFailures = f.filterFailures(currentFailures, disabledIntervals)
+ for _, failure := range currentFailures {
+ if failure.Confidence >= config.Confidence {
+ failures <- failure
+ }
+ }
+ }
+}
+
+type enableDisableConfig struct {
+ enabled bool
+ position int
+}
+
+const directiveRE = `^//[\s]*revive:(enable|disable)(?:-(line|next-line))?(?::([^\s]+))?[\s]*(?: (.+))?$`
+const directivePos = 1
+const modifierPos = 2
+const rulesPos = 3
+const reasonPos = 4
+
+var re = regexp.MustCompile(directiveRE)
+
+func (f *File) disabledIntervals(rules []Rule, mustSpecifyDisableReason bool, failures chan Failure) disabledIntervalsMap {
+ enabledDisabledRulesMap := make(map[string][]enableDisableConfig)
+
+ getEnabledDisabledIntervals := func() disabledIntervalsMap {
+ result := make(disabledIntervalsMap)
+
+ for ruleName, disabledArr := range enabledDisabledRulesMap {
+ ruleResult := []DisabledInterval{}
+ for i := 0; i < len(disabledArr); i++ {
+ interval := DisabledInterval{
+ RuleName: ruleName,
+ From: token.Position{
+ Filename: f.Name,
+ Line: disabledArr[i].position,
+ },
+ To: token.Position{
+ Filename: f.Name,
+ Line: math.MaxInt32,
+ },
+ }
+ if i%2 == 0 {
+ ruleResult = append(ruleResult, interval)
+ } else {
+ ruleResult[len(ruleResult)-1].To.Line = disabledArr[i].position
+ }
+ }
+ result[ruleName] = ruleResult
+ }
+
+ return result
+ }
+
+ handleConfig := func(isEnabled bool, line int, name string) {
+ existing, ok := enabledDisabledRulesMap[name]
+ if !ok {
+ existing = []enableDisableConfig{}
+ enabledDisabledRulesMap[name] = existing
+ }
+ if (len(existing) > 1 && existing[len(existing)-1].enabled == isEnabled) ||
+ (len(existing) == 0 && isEnabled) {
+ return
+ }
+ existing = append(existing, enableDisableConfig{
+ enabled: isEnabled,
+ position: line,
+ })
+ enabledDisabledRulesMap[name] = existing
+ }
+
+ handleRules := func(filename, modifier string, isEnabled bool, line int, ruleNames []string) []DisabledInterval {
+ var result []DisabledInterval
+ for _, name := range ruleNames {
+ if modifier == "line" {
+ handleConfig(isEnabled, line, name)
+ handleConfig(!isEnabled, line, name)
+ } else if modifier == "next-line" {
+ handleConfig(isEnabled, line+1, name)
+ handleConfig(!isEnabled, line+1, name)
+ } else {
+ handleConfig(isEnabled, line, name)
+ }
+ }
+ return result
+ }
+
+ handleComment := func(filename string, c *ast.CommentGroup, line int) {
+ comments := c.List
+ for _, c := range comments {
+ match := re.FindStringSubmatch(c.Text)
+ if len(match) == 0 {
+ return
+ }
+
+ ruleNames := []string{}
+ tempNames := strings.Split(match[rulesPos], ",")
+ for _, name := range tempNames {
+ name = strings.Trim(name, "\n")
+ if len(name) > 0 {
+ ruleNames = append(ruleNames, name)
+ }
+ }
+
+ mustCheckDisablingReason := mustSpecifyDisableReason && match[directivePos] == "disable"
+ if mustCheckDisablingReason && strings.Trim(match[reasonPos], " ") == "" {
+ failures <- Failure{
+ Confidence: 1,
+ RuleName: directiveSpecifyDisableReason,
+ Failure: "reason of lint disabling not found",
+ Position: ToFailurePosition(c.Pos(), c.End(), f),
+ Node: c,
+ }
+ continue // skip this linter disabling directive
+ }
+
+ // TODO: optimize
+ if len(ruleNames) == 0 {
+ for _, rule := range rules {
+ ruleNames = append(ruleNames, rule.Name())
+ }
+ }
+
+ handleRules(filename, match[modifierPos], match[directivePos] == "enable", line, ruleNames)
+ }
+ }
+
+ comments := f.AST.Comments
+ for _, c := range comments {
+ handleComment(f.Name, c, f.ToPosition(c.End()).Line)
+ }
+
+ return getEnabledDisabledIntervals()
+}
+
+func (f *File) filterFailures(failures []Failure, disabledIntervals disabledIntervalsMap) []Failure {
+ result := []Failure{}
+ for _, failure := range failures {
+ fStart := failure.Position.Start.Line
+ fEnd := failure.Position.End.Line
+ intervals, ok := disabledIntervals[failure.RuleName]
+ if !ok {
+ result = append(result, failure)
+ } else {
+ include := true
+ for _, interval := range intervals {
+ intStart := interval.From.Line
+ intEnd := interval.To.Line
+ if (fStart >= intStart && fStart <= intEnd) ||
+ (fEnd >= intStart && fEnd <= intEnd) {
+ include = false
+ break
+ }
+ }
+ if include {
+ result = append(result, failure)
+ }
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/mgechev/revive/lint/formatter.go b/vendor/github.com/mgechev/revive/lint/formatter.go
new file mode 100644
index 0000000000..7c19af278a
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/formatter.go
@@ -0,0 +1,14 @@
+package lint
+
+// FormatterMetadata configuration of a formatter
+type FormatterMetadata struct {
+ Name string
+ Description string
+ Sample string
+}
+
+// Formatter defines an interface for failure formatters
+type Formatter interface {
+ Format(<-chan Failure, Config) (string, error)
+ Name() string
+}
diff --git a/vendor/github.com/mgechev/revive/lint/linter.go b/vendor/github.com/mgechev/revive/lint/linter.go
new file mode 100644
index 0000000000..cdca84fb56
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/linter.go
@@ -0,0 +1,99 @@
+package lint
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/token"
+ "os"
+ "sync"
+)
+
+// ReadFile defines an abstraction for reading files.
+type ReadFile func(path string) (result []byte, err error)
+
+type disabledIntervalsMap = map[string][]DisabledInterval
+
+// Linter is used for linting set of files.
+type Linter struct {
+ reader ReadFile
+}
+
+// New creates a new Linter
+func New(reader ReadFile) Linter {
+ return Linter{reader: reader}
+}
+
+var (
+ genHdr = []byte("// Code generated ")
+ genFtr = []byte(" DO NOT EDIT.")
+)
+
+// Lint lints a set of files with the specified rule.
+func (l *Linter) Lint(packages [][]string, ruleSet []Rule, config Config) (<-chan Failure, error) {
+ failures := make(chan Failure)
+
+ var wg sync.WaitGroup
+ for _, pkg := range packages {
+ wg.Add(1)
+ go func(pkg []string) {
+ if err := l.lintPackage(pkg, ruleSet, config, failures); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ defer wg.Done()
+ }(pkg)
+ }
+
+ go func() {
+ wg.Wait()
+ close(failures)
+ }()
+
+ return failures, nil
+}
+
+func (l *Linter) lintPackage(filenames []string, ruleSet []Rule, config Config, failures chan Failure) error {
+ pkg := &Package{
+ fset: token.NewFileSet(),
+ files: map[string]*File{},
+ mu: sync.Mutex{},
+ }
+ for _, filename := range filenames {
+ content, err := l.reader(filename)
+ if err != nil {
+ return err
+ }
+ if isGenerated(content) && !config.IgnoreGeneratedHeader {
+ continue
+ }
+
+ file, err := NewFile(filename, content, pkg)
+ if err != nil {
+ return err
+ }
+ pkg.files[filename] = file
+ }
+
+ if len(pkg.files) == 0 {
+ return nil
+ }
+
+ pkg.lint(ruleSet, config, failures)
+
+ return nil
+}
+
+// isGenerated reports whether the source file is generated code
+// according the rules from https://golang.org/s/generatedcode.
+// This is inherited from the original go lint.
+func isGenerated(src []byte) bool {
+ sc := bufio.NewScanner(bytes.NewReader(src))
+ for sc.Scan() {
+ b := sc.Bytes()
+ if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/mgechev/revive/lint/package.go b/vendor/github.com/mgechev/revive/lint/package.go
new file mode 100644
index 0000000000..7b6046fd7e
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/package.go
@@ -0,0 +1,178 @@
+package lint
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "sync"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// Package represents a package in the project.
+type Package struct {
+ fset *token.FileSet
+ files map[string]*File
+
+ TypesPkg *types.Package
+ TypesInfo *types.Info
+
+ // sortable is the set of types in the package that implement sort.Interface.
+ Sortable map[string]bool
+ // main is whether this is a "main" package.
+ main int
+ mu sync.Mutex
+}
+
+var newImporter = func(fset *token.FileSet) types.ImporterFrom {
+ return gcexportdata.NewImporter(fset, make(map[string]*types.Package))
+}
+
+var (
+ trueValue = 1
+ falseValue = 2
+ notSet = 3
+)
+
+// IsMain returns if that's the main package.
+func (p *Package) IsMain() bool {
+ if p.main == trueValue {
+ return true
+ } else if p.main == falseValue {
+ return false
+ }
+ for _, f := range p.files {
+ if f.isMain() {
+ p.main = trueValue
+ return true
+ }
+ }
+ p.main = falseValue
+ return false
+}
+
+// TypeCheck performs type checking for given package.
+func (p *Package) TypeCheck() error {
+ p.mu.Lock()
+ // If type checking has already been performed
+ // skip it.
+ if p.TypesInfo != nil || p.TypesPkg != nil {
+ p.mu.Unlock()
+ return nil
+ }
+ config := &types.Config{
+ // By setting a no-op error reporter, the type checker does as much work as possible.
+ Error: func(error) {},
+ Importer: newImporter(p.fset),
+ }
+ info := &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ }
+ var anyFile *File
+ var astFiles []*ast.File
+ for _, f := range p.files {
+ anyFile = f
+ astFiles = append(astFiles, f.AST)
+ }
+
+ typesPkg, err := check(config, anyFile.AST.Name.Name, p.fset, astFiles, info)
+
+ // Remember the typechecking info, even if config.Check failed,
+ // since we will get partial information.
+ p.TypesPkg = typesPkg
+ p.TypesInfo = info
+ p.mu.Unlock()
+ return err
+}
+
+// check function encapsulates the call to go/types.Config.Check method and
+// recovers if the called method panics (see issue #59)
+func check(config *types.Config, n string, fset *token.FileSet, astFiles []*ast.File, info *types.Info) (p *types.Package, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err, _ = r.(error)
+ p = nil
+ return
+ }
+ }()
+
+ return config.Check(n, fset, astFiles, info)
+}
+
+// TypeOf returns the type of an expression.
+func (p *Package) TypeOf(expr ast.Expr) types.Type {
+ if p.TypesInfo == nil {
+ return nil
+ }
+ return p.TypesInfo.TypeOf(expr)
+}
+
+type walker struct {
+ nmap map[string]int
+ has map[string]int
+}
+
+func (w *walker) Visit(n ast.Node) ast.Visitor {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 {
+ return w
+ }
+ // TODO(dsymonds): We could check the signature to be more precise.
+ recv := receiverType(fn)
+ if i, ok := w.nmap[fn.Name.Name]; ok {
+ w.has[recv] |= i
+ }
+ return w
+}
+
+func (p *Package) scanSortable() {
+ p.Sortable = make(map[string]bool)
+
+ // bitfield for which methods exist on each type.
+ const (
+ Len = 1 << iota
+ Less
+ Swap
+ )
+ nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
+ has := make(map[string]int)
+ for _, f := range p.files {
+ ast.Walk(&walker{nmap, has}, f.AST)
+ }
+ for typ, ms := range has {
+ if ms == Len|Less|Swap {
+ p.Sortable[typ] = true
+ }
+ }
+}
+
+// receiverType returns the named type of the method receiver, sans "*",
+// or "invalid-type" if fn.Recv is ill formed.
+func receiverType(fn *ast.FuncDecl) string {
+ switch e := fn.Recv.List[0].Type.(type) {
+ case *ast.Ident:
+ return e.Name
+ case *ast.StarExpr:
+ if id, ok := e.X.(*ast.Ident); ok {
+ return id.Name
+ }
+ }
+ // The parser accepts much more than just the legal forms.
+ return "invalid-type"
+}
+
+func (p *Package) lint(rules []Rule, config Config, failures chan Failure) {
+ p.scanSortable()
+ var wg sync.WaitGroup
+ for _, file := range p.files {
+ wg.Add(1)
+ go (func(file *File) {
+ file.lint(rules, config, failures)
+ defer wg.Done()
+ })(file)
+ }
+ wg.Wait()
+}
diff --git a/vendor/github.com/mgechev/revive/lint/rule.go b/vendor/github.com/mgechev/revive/lint/rule.go
new file mode 100644
index 0000000000..815abfdd88
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/rule.go
@@ -0,0 +1,31 @@
+package lint
+
+import (
+ "go/token"
+)
+
+// DisabledInterval contains a single disabled interval and the associated rule name.
+type DisabledInterval struct {
+ From token.Position
+ To token.Position
+ RuleName string
+}
+
+// Rule defines an abstract rule interaface
+type Rule interface {
+ Name() string
+ Apply(*File, Arguments) []Failure
+}
+
+// AbstractRule defines an abstract rule.
+type AbstractRule struct {
+ Failures []Failure
+}
+
+// ToFailurePosition returns the failure position.
+func ToFailurePosition(start token.Pos, end token.Pos, file *File) FailurePosition {
+ return FailurePosition{
+ Start: file.ToPosition(start),
+ End: file.ToPosition(end),
+ }
+}
diff --git a/vendor/github.com/mgechev/revive/lint/utils.go b/vendor/github.com/mgechev/revive/lint/utils.go
new file mode 100644
index 0000000000..28657c6df0
--- /dev/null
+++ b/vendor/github.com/mgechev/revive/lint/utils.go
@@ -0,0 +1,128 @@
+package lint
+
+import (
+ "strings"
+ "unicode"
+)
+
+// Name returns a different name if it should be different.
+func Name(name string, whitelist, blacklist []string) (should string) {
+ // Fast path for simple cases: "_" and all lowercase.
+ if name == "_" {
+ return name
+ }
+ allLower := true
+ for _, r := range name {
+ if !unicode.IsLower(r) {
+ allLower = false
+ break
+ }
+ }
+ if allLower {
+ return name
+ }
+
+ // Split camelCase at any lower->upper transition, and split on underscores.
+ // Check each word for common initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // index of start of word, scan
+ for i+1 <= len(runes) {
+ eow := false // whether we hit the end of a word
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' {
+ // underscore; shift the remainder forward over any run of underscores
+ eow = true
+ n := 1
+ for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+ n++
+ }
+
+ // Leave at most one underscore if the underscore is between two digits
+ if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
+ n--
+ }
+
+ copy(runes[i+1:], runes[i+n+1:])
+ runes = runes[:len(runes)-n]
+ } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+ // lower->non-lower
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w,i) is a word.
+ word := string(runes[w:i])
+ ignoreInitWarnings := map[string]bool{}
+ for _, i := range whitelist {
+ ignoreInitWarnings[i] = true
+ }
+
+ extraInits := map[string]bool{}
+ for _, i := range blacklist {
+ extraInits[i] = true
+ }
+
+ if u := strings.ToUpper(word); (commonInitialisms[u] || extraInits[u]) && !ignoreInitWarnings[u] {
+ // Keep consistent case, which is lowercase only at the start.
+ if w == 0 && unicode.IsLower(runes[w]) {
+ u = strings.ToLower(u)
+ }
+ // All the common initialisms are ASCII,
+ // so we can replace the bytes exactly.
+ copy(runes[w:], []rune(u))
+ } else if w > 0 && strings.ToLower(word) == word {
+ // already all lowercase, and not the first word, so uppercase the first character.
+ runes[w] = unicode.ToUpper(runes[w])
+ }
+ w = i
+ }
+ return string(runes)
+}
+
+// commonInitialisms is a set of common initialisms.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var commonInitialisms = map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTP": true,
+ "HTTPS": true,
+ "ID": true,
+ "IP": true,
+ "JSON": true,
+ "LHS": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+}