}
if ctx.IsSet("skip-custom-dir") && ctx.Bool("skip-custom-dir") {
- log.Info("Skiping custom directory")
+ log.Info("Skipping custom directory")
} else {
customDir, err := os.Stat(setting.CustomPath)
if err == nil && customDir.IsDir() {
}
}
- // the environment setted on serv command
+ // the environment is set by serv command
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
username := os.Getenv(models.EnvRepoUsername)
reponame := os.Getenv(models.EnvRepoName)
}
}
- // the environment setted on serv command
+ // the environment is set by serv command
repoUser := os.Getenv(models.EnvRepoUsername)
isWiki := os.Getenv(models.EnvRepoIsWiki) == "true"
repoName := os.Getenv(models.EnvRepoName)
return fmt.Errorf("Unsupported storage: %s", ctx.String("type"))
}
- log.Warn("All files have been copied to the new placement but old files are still on the orignial placement.")
+ log.Warn("All files have been copied to the new placement but old files are still on the original placement.")
return nil
}
func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) error {
// If HTTP Challenge enabled, needs to be serving on port 80. For TLSALPN needs 443.
- // Due to docker port mapping this can't be checked programatically
+ // Due to docker port mapping this can't be checked programmatically
// TODO: these are placeholders until we add options for each in settings with appropriate warning
enableHTTPChallenge := true
enableTLSALPNChallenge := true
;; Public is for users visible for everyone
;DEFAULT_USER_VISIBILITY = public
;;
-;; Set whitch visibibilty modes a user can have
+;; Set which visibility modes a user can have
;ALLOWED_USER_VISIBILITY_MODES = public,limited,private
;;
;; Either "public", "limited" or "private", default is "public"
INSTALL_LOCK=true
fi
- # Substitude the environment variables in the template
+ # Substitute the environment variables in the template
APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \
RUN_MODE=${RUN_MODE:-"prod"} \
DOMAIN=${DOMAIN:-"localhost"} \
INSTALL_LOCK=true
fi
- # Substitude the environment variables in the template
+ # Substitute the environment variables in the template
APP_NAME=${APP_NAME:-"Gitea: Git with a cup of tea"} \
RUN_MODE=${RUN_MODE:-"prod"} \
RUN_USER=${USER:-"git"} \
- Options other than `never` and `always` can be combined as a comma separated list.
- `DEFAULT_TRUST_MODEL`: **collaborator**: \[collaborator, committer, collaboratorcommitter\]: The default trust model used for verifying commits.
- `collaborator`: Trust signatures signed by keys of collaborators.
- - `committer`: Trust signatures that match committers (This matches GitHub and will force Gitea signed commits to have Gitea as the commmitter).
- - `collaboratorcommitter`: Trust signatures signed by keys of collaborators which match the commiter.
+ - `committer`: Trust signatures that match committers (This matches GitHub and will force Gitea signed commits to have Gitea as the committer).
+ - `collaboratorcommitter`: Trust signatures signed by keys of collaborators which match the committer.
- `WIKI`: **never**: \[never, pubkey, twofa, always, parentsigned\]: Sign commits to wiki.
- `CRUD_ACTIONS`: **pubkey, twofa, parentsigned**: \[never, pubkey, twofa, parentsigned, always\]: Sign CRUD actions.
- Options as above, with the addition of:
- `PATH`: **data/gitea.db**: For SQLite3 only, the database file path.
- `LOG_SQL`: **true**: Log the executed SQL.
- `DB_RETRIES`: **10**: How many ORM init / DB connect attempts allowed.
-- `DB_RETRY_BACKOFF`: **3s**: time.Duration to wait before trying another ORM init / DB connect attempt, if failure occured.
+- `DB_RETRY_BACKOFF`: **3s**: time.Duration to wait before trying another ORM init / DB connect attempt, if failure occurred.
- `MAX_OPEN_CONNS` **0**: Database maximum open connections - default is 0, meaning there is no limit.
-- `MAX_IDLE_CONNS` **2**: Max idle database connections on connnection pool, default is 2 - this will be capped to `MAX_OPEN_CONNS`.
+- `MAX_IDLE_CONNS` **2**: Max idle database connections on connection pool, default is 2 - this will be capped to `MAX_OPEN_CONNS`.
- `CONN_MAX_LIFETIME` **0 or 3s**: Sets the maximum amount of time a DB connection may be reused - default is 0, meaning there is no limit (except on MySQL where it is 3s - see #6804 & #7071).
Please see #8540 & #8273 for further discussion of the appropriate values for `MAX_OPEN_CONNS`, `MAX_IDLE_CONNS` & `CONN_MAX_LIFETIME` and their
- `LENGTH`: **20**: Maximal queue size before channel queues block
- `BATCH_LENGTH`: **20**: Batch data before passing to the handler
- `CONN_STR`: **redis://127.0.0.1:6379/0**: Connection string for the redis queue type. Options can be set using query params. Similarly LevelDB options can also be set using: **leveldb://relative/path?option=value** or **leveldb:///absolute/path?option=value**, and will override `DATADIR`
-- `QUEUE_NAME`: **_queue**: The suffix for default redis and disk queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overriden in the specific `queue.name` section.
+- `QUEUE_NAME`: **_queue**: The suffix for default redis and disk queue name. Individual queues will default to **`name`**`QUEUE_NAME` but can be overridden in the specific `queue.name` section.
- `SET_NAME`: **_unique**: The suffix that will be added to the default redis and disk queue `set` name for unique queues. Individual queues will default to
**`name`**`QUEUE_NAME`_`SET_NAME`_ but can be overridden in the specific `queue.name` section.
- `WRAP_IF_NECESSARY`: **true**: Will wrap queues with a timeoutable queue if the selected queue is not ready to be created - (Only relevant for the level queue.)
- `AUTO_WATCH_NEW_REPOS`: **true**: Enable this to let all organisation users watch new repos when they are created
- `AUTO_WATCH_ON_CHANGES`: **false**: Enable this to make users watch a repository after their first commit to it
- `DEFAULT_USER_VISIBILITY`: **public**: Set default visibility mode for users, either "public", "limited" or "private".
-- `ALLOWED_USER_VISIBILITY_MODES`: **public,limited,private**: Set whitch visibibilty modes a user can have
+- `ALLOWED_USER_VISIBILITY_MODES`: **public,limited,private**: Set which visibility modes a user can have
- `DEFAULT_ORG_VISIBILITY`: **public**: Set default visibility mode for organisations, either "public", "limited" or "private".
- `DEFAULT_ORG_MEMBER_VISIBLE`: **false** True will make the membership of the users visible when added to the organisation.
- `ALLOW_ONLY_INTERNAL_REGISTRATION`: **false** Set to true to force registration only via gitea.
- ENABLED: **false** Enable markup support; set to **true** to enable this renderer.
- NEED\_POSTPROCESS: **true** set to **true** to replace links / sha1 and etc.
- FILE\_EXTENSIONS: **\<empty\>** List of file extensions that should be rendered by an external
- command. Multiple extentions needs a comma as splitter.
+ command. Multiple extensions needs a comma as splitter.
- RENDER\_COMMAND: External command to render all matching extensions.
- IS\_INPUT\_FILE: **false** Input is not a standard input but a file param followed `RENDER_COMMAND`.
## Time (`time`)
-- `FORMAT`: Time format to diplay on UI. i.e. RFC1123 or 2006-01-02 15:04:05
+- `FORMAT`: Time format to display on UI. i.e. RFC1123 or 2006-01-02 15:04:05
- `DEFAULT_UI_LOCATION`: Default location of time on the UI, so that we can display correct user's time on UI. i.e. Shanghai/Asia
## Task (`task`)
COLORIZE = false ; this can be true if you can strip out the ansi coloring
```
-Sometimes it will be helpful get some specific `TRACE` level logging retricted
+Sometimes it will be helpful get some specific `TRACE` level logging restricted
to messages that match a specific `EXPRESSION`. Adjusting the `MODE` in the
`[log]` section to `MODE = console,traceconsole` to add a new logger output
`traceconsole` and then adding its corresponding section would be helpful:
1. Go to the repository’s **Settings** > **Tags** page.
1. Type a pattern to match a name. You can use a single name, a [glob pattern](https://pkg.go.dev/github.com/gobwas/glob#Compile) or a regular expression.
-1. Choose the allowed users and/or teams. If you leave these fields empty noone is allowed to create or modify this tag.
+1. Choose the allowed users and/or teams. If you leave these fields empty no one is allowed to create or modify this tag.
1. Select **Save** to save the configuration.
## Pattern protected tags
- Create a service principal name for the host where `gitea.exe` is running with class `HTTP`:
- - Start `Command Prompt` or `PowerShell` as a priviledged domain user (eg. Domain Administrator)
+ - Start `Command Prompt` or `PowerShell` as a privileged domain user (eg. Domain Administrator)
- Run the command below, replacing `host.domain.local` with the fully qualified domain name (FQDN) of the server where the web application will be running, and `domain\user` with the name of the account created in the previous step:
```sh
- Click the `Sign In` button on the dashboard and choose SSPI to be automatically logged in with the same user that is currently logged on to the computer
- If it does not work, make sure that:
- - You are not running the web browser on the same server where gitea is running. You should be running the web browser on a domain joined computer (client) that is different from the server. If both the client and server are runnning on the same computer NTLM will be prefered over Kerberos.
+ - You are not running the web browser on the same server where gitea is running. You should be running the web browser on a domain joined computer (client) that is different from the server. If both the client and server are running on the same computer NTLM will be preferred over Kerberos.
- There is only one `HTTP/...` SPN for the host
- The SPN contains only the hostname, without the port
- You have added the URL of the web app to the `Local intranet zone`
Restricted users are limited to a subset of the content based on their organization/team memberships and collaborations, ignoring the public flag on organizations/repos etc.\_\_
-Example use case: A company runs a Gitea instance that requires login. Most repos are public (accessible/browseable by all co-workers).
+Example use case: A company runs a Gitea instance that requires login. Most repos are public (accessible/browsable by all co-workers).
At some point, a customer or third party needs access to a specific repo and only that repo. Making such a customer account restricted and granting any needed access using team membership(s) and/or collaboration(s) is a simple way to achieve that without the need to make everything private.
- Configuration viewer
- Everything in config file
- System notices
- - When somthing unexpected happens
+ - When something unexpected happens
- Monitoring
- Current processes
- Cron jobs
- Libravatar
- Custom
- Password
- - Mutiple email addresses
+ - Multiple email addresses
- SSH Keys
- Connected applications
- Two factor authentication
Setup a mysql database inside docker
```
docker run -e "MYSQL_DATABASE=test" -e "MYSQL_ALLOW_EMPTY_PASSWORD=yes" -p 3306:3306 --rm --name mysql mysql:latest #(just ctrl-c to stop db and clean the container)
-docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" --rm --name elasticsearch elasticsearch:7.6.0 #(in a secound terminal, just ctrl-c to stop db and clean the container)
+docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" --rm --name elasticsearch elasticsearch:7.6.0 #(in a second terminal, just ctrl-c to stop db and clean the container)
```
Start tests based on the database container
```
setting.API.DefaultPagingNum = 10
session := loginUser(t, "user2")
- // Create enaugh releases to have paging
+ // Create enough releases to have paging
for i := 0; i < 12; i++ {
version := fmt.Sprintf("v0.0.%d", i)
createNewRelease(t, session, "/user2/repo1", version, version, false, false)
expr = strings.TrimSpace(expr)
if expr != "" {
if g, err := glob.Compile(expr, '.', '/'); err != nil {
- log.Info("Invalid glob expresion '%s' (skipped): %v", expr, err)
+ log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
} else {
extarr = append(extarr, g)
}
return f(DBContext{x})
}
-// WithTx represents executing database operations on a trasaction
+// WithTx represents executing database operations on a transaction
func WithTx(f func(ctx DBContext) error) error {
sess := x.NewSession()
if err := sess.Begin(); err != nil {
}
func (err ErrUserDoesNotHaveAccessToRepo) Error() string {
- return fmt.Sprintf("user doesn't have acces to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName)
+ return fmt.Sprintf("user doesn't have access to repo [user_id: %d, repo_name: %s]", err.UserID, err.RepoName)
}
// ErrWontSign explains the first reason why a commit would not be signed
}
func (err ErrSHAOrCommitIDNotProvided) Error() string {
- return "a SHA or commmit ID must be proved when updating a file"
+ return "a SHA or commit ID must be proved when updating a file"
}
// __ __ ___. .__ __
var err error
// Find Committer account
committer, err = GetUserByEmail(c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
- if err != nil { // Skipping not user for commiter
+ if err != nil { // Skipping not user for committer
committer = &User{
Name: c.Committer.Name,
Email: c.Committer.Email,
}
for _, k := range keys {
- // Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate
+ // Pre-check (& optimization) that emails attached to key can be attached to the committer email and can validate
canValidate := false
email := ""
for _, e := range k.Emails {
RefRepoID int64 `xorm:"index"` // Repo where the referencing
RefIssueID int64 `xorm:"index"`
RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
- RefAction references.XRefAction `xorm:"SMALLINT"` // What hapens if RefIssueID resolves
+ RefAction references.XRefAction `xorm:"SMALLINT"` // What happens if RefIssueID resolves
RefIsPull bool
RefRepo *Repository `xorm:"-"`
return err
}
-// CreatePushPullComment create push code to pull base commend
+// CreatePushPullComment create push code to pull base comment
func CreatePushPullComment(pusher *User, pr *PullRequest, oldCommitID, newCommitID string) (comment *Comment, err error) {
if pr.HasMerged || oldCommitID == "" || newCommitID == "" {
return nil, nil
return
}
-// getCommitsFromRepo get commit IDs from repo in betwern oldCommitID and newCommitID
+// getCommitsFromRepo get commit IDs from repo in between oldCommitID and newCommitID
// isForcePush will be true if oldCommit isn't on the branch
// Commit on baseBranch will skip
func getCommitIDsFromRepo(repo *Repository, oldCommitID, newCommitID, baseBranch string) (commitIDs []string, isForcePush bool, err error) {
return !exists, err
}
-// IsDependenciesEnabled returns if dependecies are enabled and returns the default setting if not set.
+// IsDependenciesEnabled returns if dependencies are enabled and returns the default setting if not set.
func (repo *Repository) IsDependenciesEnabled() bool {
return repo.isDependenciesEnabled(x)
}
testSuccess(1, "default", []int64{1, 2})
}
-// Org vrsions
+// Org versions
func TestGetLabelInOrgByName(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
} else if has {
return ErrLoginSourceAlreadyExist{source.Name}
}
- // Synchronization is only aviable with LDAP for now
+ // Synchronization is only available with LDAP for now
if !source.IsLDAP() {
source.IsSyncEnabled = false
}
// v122 -> v123
NewMigration("Add Require Signed Commits to ProtectedBranch", addRequireSignedCommits),
// v123 -> v124
- NewMigration("Add original informations for reactions", addReactionOriginals),
+ NewMigration("Add original information for reactions", addReactionOriginals),
// v124 -> v125
NewMigration("Add columns to user and repository", addUserRepoMissingColumns),
// v125 -> v126
return perm, err
}
- // Prevent strangers from checking out public repo of private orginization
- // Allow user if they are collaborator of a repo within a private orginization but not a member of the orginization itself
+ // Prevent strangers from checking out public repo of private organization
+ // Allow user if they are collaborator of a repo within a private organization but not a member of the organization itself
hasOrgVisible := true
// Not SignedUser
if user == nil {
RefRepoID int64 `xorm:"index"` // Repo where the referencing
RefIssueID int64 `xorm:"index"`
RefCommentID int64 `xorm:"index"` // 0 if origin is Issue title or content (or PR's)
- RefAction int `xorm:"SMALLINT"` // What hapens if RefIssueID resolves
+ RefAction int `xorm:"SMALLINT"` // What happens if RefIssueID resolves
RefIsPull bool
}
return nil, err
}
if app.UID != opts.UserID {
- return nil, fmt.Errorf("UID missmatch")
+ return nil, fmt.Errorf("UID mismatch")
}
app.Name = opts.Name
//////////////////////////////////////////////////////
-// OAuth2Grant represents the permission of an user for a specifc application to access resources
+// OAuth2Grant represents the permission of an user for a specific application to access resources
type OAuth2Grant struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"INDEX unique(user_application)"`
jwt.StandardClaims
}
-// ParseOAuth2Token parses a singed jwt string
+// ParseOAuth2Token parses a signed jwt string
func ParseOAuth2Token(jwtToken string) (*OAuth2Token, error) {
parsedToken, err := jwt.ParseWithClaims(jwtToken, &OAuth2Token{}, func(token *jwt.Token) (interface{}, error) {
if token.Method == nil || token.Method.Alg() != oauth2.DefaultSigningKey.SigningMethod().Alg() {
return
}
-// FindOrgMembersOpts represensts find org members condtions
+// FindOrgMembersOpts represensts find org members conditions
type FindOrgMembersOpts struct {
ListOptions
OrgID int64
sess.Close()
// We should always delete the files after the database transaction succeed. If
- // we delete the file but the database rollback, the repository will be borken.
+ // we delete the file but the database rollback, the repository will be broken.
// Remove issue attachment files.
for i := range attachmentPaths {
AllLimited bool // Include also all public repositories of limited organisations
// None -> include public and private
// True -> include just private
- // False -> incude just public
+ // False -> include just public
IsPrivate util.OptionalBool
// None -> include collaborative AND non-collaborative
// True -> include just collaborative
- // False -> incude just non-collaborative
+ // False -> include just non-collaborative
Collaborate util.OptionalBool
// None -> include forks AND non-forks
// True -> include just forks
return
}
- // Prevent strangers from checking out public repo of private orginization/users
+ // Prevent strangers from checking out public repo of private organization/users
// Allow user if they are collaborator of a repo within a private user or a private organization but not a member of the organization itself
if !hasOrgOrUserVisible(e, repo.Owner, user) && !isCollaborator {
perm.AccessMode = AccessModeNone
return testMode <= mode, err
}
-// HasAccessUnit returns ture if user has testMode to the unit of the repository
+// HasAccessUnit returns true if user has testMode to the unit of the repository
func HasAccessUnit(user *User, repo *Repository, unitType UnitType, testMode AccessMode) (bool, error) {
return hasAccessUnit(x, user, repo, unitType, testMode)
}
return m, nil
}
-// GetPushMirrorsByRepoID returns push-mirror informations of a repository.
+// GetPushMirrorsByRepoID returns push-mirror information of a repository.
func GetPushMirrorsByRepoID(repoID int64) ([]*PushMirror, error) {
mirrors := make([]*PushMirror, 0, 10)
return mirrors, x.Where("repo_id=?", repoID).Find(&mirrors)
return nil, err
}
- // Get latest review of each reviwer, sorted in order they were made
+ // Get latest review of each reviewer, sorted in order they were made
if err := sess.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND dismissed = ? AND original_author_id = 0 GROUP BY issue_id, reviewer_id) ORDER BY review.updated_unix ASC",
issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest, false).
Find(&reviews); err != nil {
func GetReviewersFromOriginalAuthorsByIssueID(issueID int64) ([]*Review, error) {
reviews := make([]*Review, 0, 10)
- // Get latest review of each reviwer, sorted in order they were made
+ // Get latest review of each reviewer, sorted in order they were made
if err := x.SQL("SELECT * FROM review WHERE id IN (SELECT max(id) as id FROM review WHERE issue_id = ? AND reviewer_team_id = 0 AND type in (?, ?, ?) AND original_author_id <> 0 GROUP BY issue_id, original_author_id) ORDER BY review.updated_unix ASC",
issueID, ReviewTypeApprove, ReviewTypeReject, ReviewTypeRequest).
Find(&reviews); err != nil {
return topics, sess.Desc("topic.repo_count").Find(&topics)
}
-// GetRepoTopicByName retrives topic from name for a repo if it exist
+// GetRepoTopicByName retrieves topic from name for a repo if it exist
func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) {
return getRepoTopicByName(x, repoID, topicName)
}
// If Admin - they see all users!
if !opts.Actor.IsAdmin {
- // Force visiblity for privacy
+ // Force visibility for privacy
var accessCond builder.Cond
if !opts.Actor.IsRestricted {
accessCond = builder.Or(
}
} else {
- // Force visiblity for privacy
+ // Force visibility for privacy
// Not logged in - only public users
cond = cond.And(builder.In("visibility", structs.VisibleTypePublic))
}
return err
}
- // 3. update new primay email
+ // 3. update new primary email
email.IsPrimary = true
if _, err = sess.ID(email.ID).Cols("is_primary").Update(email); err != nil {
return err
".gitlab/issue_template",
}
-// PullRequest contains informations to make a pull request
+// PullRequest contains information to make a pull request
type PullRequest struct {
BaseRepo *models.Repository
Allowed bool
var shaLineRegex = regexp.MustCompile("^([a-z0-9]{40})")
-// NextPart returns next part of blame (sequencial code lines with the same commit)
+// NextPart returns next part of blame (sequential code lines with the same commit)
func (r *BlameReader) NextPart() (*BlamePart, error) {
var blamePart *BlamePart
}
}
- // add commiters if present in search query
+ // add committers if present in search query
if len(opts.Committers) > 0 {
for _, v := range opts.Committers {
args = append(args, "--committer="+v)
stdout = append(stdout, '\n')
}
- // if there are any keywords (ie not commiter:, author:, time:)
+ // if there are any keywords (ie not committer:, author:, time:)
// then let's iterate over them
if len(opts.Keywords) > 0 {
for _, v := range opts.Keywords {
return len(strings.TrimSpace(string(stdout))) > 0, nil
}
-// FileCommitsCount return the number of files at a revison
+// FileCommitsCount return the number of files at a revision
func (repo *Repository) FileCommitsCount(revision, file string) (int64, error) {
return CommitsCountFiles(repo.Path, []string{revision}, []string{file})
}
-// CommitsByFileAndRange return the commits according revison file and the page
+// CommitsByFileAndRange return the commits according revision file and the page
func (repo *Repository) CommitsByFileAndRange(revision, file string, page int) (*list.List, error) {
skip := (page - 1) * setting.Git.CommitsRangeSize
return repo.parsePrettyFormatLogToList(stdout)
}
-// CommitsByFileAndRangeNoFollow return the commits according revison file and the page
+// CommitsByFileAndRangeNoFollow return the commits according revision file and the page
func (repo *Repository) CommitsByFileAndRangeNoFollow(revision, file string, page int) (*list.List, error) {
stdout, err := NewCommand("log", revision, "--skip="+strconv.Itoa((page-1)*50),
"--max-count="+strconv.Itoa(setting.Git.CommitsRangeSize), prettyLogFormat, "--", file).RunInDirBytes(repo.Path)
Commits int64
}
-// GetCodeActivityStats returns code statistics for acitivity page
+// GetCodeActivityStats returns code statistics for activity page
func (repo *Repository) GetCodeActivityStats(fromTime time.Time, branch string) (*CodeActivityStats, error) {
stats := &CodeActivityStats{}
"sync"
)
-// ObjectCache provides thread-safe cache opeations.
+// ObjectCache provides thread-safe cache operations.
type ObjectCache struct {
lock sync.RWMutex
cache map[string]interface{}
}
// indexPos find words positions for start and the following end on content. It will
-// return the beginning position of the frist start and the ending position of the
+// return the beginning position of the first start and the ending position of the
// first end following the start string.
// If not found any of the positions, it will return -1, -1.
func indexPos(content, start, end string) (int, int) {
var startIndex, endIndex int = -1, -1
c, ok := hit.Highlight["content"]
if ok && len(c) > 0 {
- // FIXME: Since the high lighting content will include <em> and </em> for the keywords,
- // now we should find the poisitions. But how to avoid html content which contains the
+ // FIXME: Since the highlighting content will include <em> and </em> for the keywords,
+ // now we should find the positions. But how to avoid html content which contains the
// <em> and </em> tags? If elastic search has handled that?
startIndex, endIndex = indexPos(c[0], "<em>", "</em>")
if startIndex == -1 {
Error *ObjectError `json:"error,omitempty"`
}
-// Link provides a structure with informations about how to access a object.
+// Link provides a structure with information about how to access a object.
type Link struct {
Href string `json:"href"`
Header map[string]string `json:"header,omitempty"`
500: ColorBytes(Bold, BgRed),
}
-// ColoredStatus addes colors for HTTP status
+// ColoredStatus adds colors for HTTP status
func ColoredStatus(status int, s ...string) *ColoredValue {
color, ok := statusToColor[status]
if !ok {
"HEAD": ColorBytes(FgBlue, Faint),
}
-// ColoredMethod addes colors for HtTP methos on log
+// ColoredMethod adds colors for HTTP methods on log
func ColoredMethod(method string) *ColoredValue {
color, ok := methodToColor[method]
if !ok {
wayTooLong = ColorBytes(BgMagenta)
)
-// ColoredTime addes colors for time on log
+// ColoredTime adds colors for time on log
func ColoredTime(duration time.Duration) *ColoredValue {
for i, k := range durations {
if duration < k {
}
// RenderEmoji for when we want to just process emoji and shortcodes
-// in various places it isn't already run through the normal markdown procesor
+// in various places it isn't already run through the normal markdown processor
func RenderEmoji(
content string,
) (string, error) {
return ok
}
-// TaskCheckBoxListItem is a block that repressents a list item of a markdown block with a checkbox
+// TaskCheckBoxListItem is a block that represents a list item of a markdown block with a checkbox
type TaskCheckBoxListItem struct {
*ast.ListItem
IsChecked bool
This is [one](link) to paradise.
This **is emphasized**.
-This: should coallesce.
+This: should coalesce.
` + "```" + `
This is a code block.
"This",
"is emphasized",
".",
- "This: should coallesce.",
+ "This: should coalesce.",
"Bullet 1",
"Bullet 2",
"A HIDDEN",
PageSize int
}
-// Downloader downloads the site repo informations
+// Downloader downloads the site repo information
type Downloader interface {
SetContext(context.Context)
GetRepoInfo() (*Repository, error)
package base
-// Label defines a standard label informations
+// Label defines a standard label information
type Label struct {
Name string
Color string
package base
-// Uploader uploads all the informations of one repository
+// Uploader uploads all the information of one repository
type Uploader interface {
MaxBatchInsertSize(tp string) int
CreateRepo(repo *Repository, opts MigrateOptions) error
return structs.GithubService
}
-// GithubDownloaderV3 implements a Downloader interface to get repository informations
+// GithubDownloaderV3 implements a Downloader interface to get repository information
// from github via APIv3
type GithubDownloaderV3 struct {
base.NullDownloader
return structs.GitlabService
}
-// GitlabDownloader implements a Downloader interface to get repository informations
+// GitlabDownloader implements a Downloader interface to get repository information
// from gitlab via go-gitlab
// - issueCount is incremented in GetIssues() to ensure PR and Issue numbers do not overlap,
// because Gitlab has individual Issue and Pull Request numbers.
return structs.GogsService
}
-// GogsDownloader implements a Downloader interface to get repository informations
+// GogsDownloader implements a Downloader interface to get repository information
// from gogs via API
type GogsDownloader struct {
base.NullDownloader
func (*NullNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType, refFullName string) {
}
-// NotifyDeleteRef notifies branch or tag deleteion to notifiers
+// NotifyDeleteRef notifies branch or tag deletion to notifiers
func (*NullNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) {
}
{",$!", "", ""},
{"1234", "", ""},
} {
- // The patern only needs to match the part that precedes the reference.
+ // The pattern only needs to match the part that precedes the reference.
// getCrossReference() takes care of finding the reference itself.
pat := makeKeywordsPat([]string{test.pattern})
if test.expected == "" {
user := models.AssertExistsAndLoadBean(t, &models.User{ID: 10}).(*models.User)
// Test that a push with close reference *can not* close issue
- // If the commiter doesn't have push rights in that repo
+ // If the committer doesn't have push rights in that repo
pushCommits := []*repository.PushCommit{
{
Sha1: "abcdef3",
assert.NoError(t, err)
})
- t.Run("Get REAMDE.md contents with ref as empty string (should then use the repo's default branch) with GetContents()", func(t *testing.T) {
+ t.Run("Get README.md contents with ref as empty string (should then use the repo's default branch) with GetContents()", func(t *testing.T) {
fileContentResponse, err := GetContents(ctx.Repo.Repository, treePath, "", false)
assert.EqualValues(t, expectedContentsResponse, fileContentResponse)
assert.NoError(t, err)
assert.NoError(t, err)
})
- t.Run("Get REAMDE.md contents with ref as empty string (should then use the repo's default branch) with GetContentsOrList()", func(t *testing.T) {
+ t.Run("Get README.md contents with ref as empty string (should then use the repo's default branch) with GetContentsOrList()", func(t *testing.T) {
fileContentResponse, err := GetContentsOrList(ctx.Repo.Repository, treePath, "")
assert.EqualValues(t, expectedContentsResponse, fileContentResponse)
assert.NoError(t, err)
var err error
globUser, err = glob.Compile(qsplit[0])
if err != nil {
- log.Info("Invalid glob expresion '%s' (skipped): %v", qsplit[0], err)
+ log.Info("Invalid glob expression '%s' (skipped): %v", qsplit[0], err)
}
if len(qsplit) > 1 {
globRepo, err = glob.Compile(qsplit[1])
if err != nil {
- log.Info("Invalid glob expresion '%s' (skipped): %v", qsplit[1], err)
+ log.Info("Invalid glob expression '%s' (skipped): %v", qsplit[1], err)
}
}
}
"code.gitea.io/gitea/modules/util"
)
-// New creats a new secret
+// New creates a new secret
func New() (string, error) {
return NewWithLength(44)
}
expr = strings.TrimSpace(expr)
if expr != "" {
if g, err := glob.Compile(expr, '.', '/'); err != nil {
- log.Info("Invalid glob expresion '%s' (skipped): %v", expr, err)
+ log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
} else {
extarr = append(extarr, g)
}
)
var (
- // SessionConfig difines Session settings
+ // SessionConfig defines Session settings
SessionConfig = struct {
Provider string
// Provider configuration, it's corresponding to provider.
IterateObjects(func(path string, obj Object) error) error
}
-// Copy copys a file from source ObjectStorage to dest ObjectStorage
+// Copy copies a file from source ObjectStorage to dest ObjectStorage
func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) {
f, err := srcStorage.Open(srcPath)
if err != nil {
return
}
- // if repository is ready, then just finsih the task
+ // if repository is ready, then just finish the task
if t.Repo.Status == models.RepositoryReady {
return nil
}
var svgTagRegex = regexp.MustCompile(`(?si)\A\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`)
var svgTagInXMLRegex = regexp.MustCompile(`(?si)\A<\?xml\b.*?\?>\s*(?:(<!--.*?-->|<!DOCTYPE\s+svg([\s:]+.*?>|>))\s*)*<svg[\s>\/]`)
-// SniffedType contains informations about a blobs type.
+// SniffedType contains information about a blobs type.
type SniffedType struct {
contentType string
}
return statDir(rootPath, "", isIncludeDir, false, false)
}
-// FileURLToPath extracts the path informations from a file://... url.
+// FileURLToPath extracts the path information from a file://... url.
func FileURLToPath(u *url.URL) (string, error) {
if u.Scheme != "file" {
return "", errors.New("URL scheme is not 'file': " + u.String())
"~git/Gitea v1.13/gitea",
`~git/"Gitea v1.13/gitea"`,
}, {
- "Bangs are unforutunately not predictable so need to be singlequoted",
+ "Bangs are unfortunately not predictable so need to be singlequoted",
"C:/Program Files/Gitea!/gitea",
`'C:/Program Files/Gitea!/gitea'`,
}, {
"/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea",
"'/home/git/Gitea\n\nWHY-WOULD-YOU-DO-THIS\n\nGitea/gitea'",
}, {
- "Similarly we should nicely handle mutiple single quotes if we have to single-quote",
+ "Similarly we should nicely handle multiple single quotes if we have to single-quote",
"'!''!'''!''!'!'",
`\''!'\'\''!'\'\'\''!'\'\''!'\''!'\'`,
}, {
return false
}
- // TODO: Later it should be added to allow local network IP addreses
+ // TODO: Later it should be added to allow local network IP addresses
// only if allowed by special setting
return true
valid: false,
},
{
- description: "Loobpack IPv4 URL",
+ description: "Loopback IPv4 URL",
url: "http://127.0.1.1:5678/",
valid: true,
},
{
- description: "Loobpack IPv6 URL",
+ description: "Loopback IPv6 URL",
url: "https://[::1]/",
valid: true,
},
valid: true,
},
{
- description: "Loobpack IPv4 URL",
+ description: "Loopback IPv4 URL",
url: "http://127.0.1.1:5678/",
valid: false,
},
var gitRefNameValidationTestCases = []validationTestCase{
{
- description: "Referece contains only characters",
+ description: "Reference name contains only characters",
data: TestForm{
BranchName: "test",
},
if opts.UserID == 0 {
opts.UserID = ctx.User.ID
} else {
- ctx.Error(http.StatusForbidden, "", fmt.Errorf("query user not allowed not enouth rights"))
+ ctx.Error(http.StatusForbidden, "", fmt.Errorf("query by user not allowed; not enough rights"))
return
}
}
case 1:
tokenID = tokens[0].ID
default:
- ctx.Error(http.StatusUnprocessableEntity, "DeleteAccessTokenByID", fmt.Errorf("multible matches for token name '%s'", token))
+ ctx.Error(http.StatusUnprocessableEntity, "DeleteAccessTokenByID", fmt.Errorf("multiple matches for token name '%s'", token))
return
}
}
return u
}
-// EditUser show editting user page
+// EditUser show editing user page
func EditUser(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("admin.users.edit_account")
ctx.Data["PageIsAdmin"] = true
ctx.HTML(http.StatusOK, tplUserEdit)
}
-// EditUserPost response for editting user
+// EditUserPost response for editing user
func EditUserPost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.AdminEditUserForm)
ctx.Data["Title"] = ctx.Tr("admin.users.edit_account")
ctx.HTML(http.StatusOK, tplSettingsOptions)
}
-// SettingsPost response for settings change submited
+// SettingsPost response for settings change submitted
func SettingsPost(ctx *context.Context) {
form := web.GetForm(ctx).(*forms.UpdateOrgSettingForm)
ctx.Data["Title"] = ctx.Tr("org.settings")
ctx.Redirect(ctx.Org.OrgLink + "/settings")
}
-// SettingsDeleteAvatar response for delete avatar on setings page
+// SettingsDeleteAvatar response for delete avatar on settings page
func SettingsDeleteAvatar(ctx *context.Context) {
if err := ctx.Org.Organization.DeleteAvatar(); err != nil {
ctx.Flash.Error(err.Error())
repo_service "code.gitea.io/gitea/services/repository"
)
-// httpBase implmentation git smart HTTP protocol
+// httpBase implementation git smart HTTP protocol
func httpBase(ctx *context.Context) (h *serviceHandler) {
if setting.Repository.DisableHTTPGit {
ctx.Resp.WriteHeader(http.StatusForbidden)
ctx.HTML(http.StatusOK, tplIssueChoose)
}
-// ValidateRepoMetas check and returns repository's meta informations
+// ValidateRepoMetas check and returns repository's meta information
func ValidateRepoMetas(ctx *context.Context, form forms.CreateIssueForm, isPull bool) ([]int64, []int64, int64, int64) {
var (
repo = ctx.Repo.Repository
ctx.HTML(http.StatusOK, tplRepoHome)
}
-// RenderUserCards render a page show users according the input templaet
+// RenderUserCards render a page show users according the input template
func RenderUserCards(ctx *context.Context, total int, getter func(opts models.ListOptions) ([]*models.User, error), tpl base.TplName) {
page := ctx.QueryInt("page")
if page <= 0 {
ctx.Redirect(setting.AppSubURL + "/user/settings/account")
}
-// ForgotPasswd render the forget pasword page
+// ForgotPasswd render the forget password page
func ForgotPasswd(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("auth.forgot_password_title")
routes.Get("/metrics", append(common, Metrics)...)
}
- // Removed: toolbox.Toolboxer middleware will provide debug informations which seems unnecessary
+ // Removed: toolbox.Toolboxer middleware will provide debug information which seems unnecessary
common = append(common, context.Contexter())
// Get user from session if logged in.
}
// Verify uses SSPI (Windows implementation of SPNEGO) to authenticate the request.
-// If authentication is successful, returs the corresponding user object.
+// If authentication is successful, returns the corresponding user object.
// If negotiation should continue or authentication fails, immediately returns a 401 HTTP
// response code, as required by the SPNEGO protocol.
func (s *SSPI) Verify(req *http.Request, w http.ResponseWriter, store DataStore, sess SessionStore) *models.User {
}
if !found {
- // This function also does comments and hooks, which is why we call it seperatly instead of directly removing the assignees here
+ // This function also does comments and hooks, which is why we call it separately instead of directly removing the assignees here
if _, _, err := ToggleAssignee(issue, doer, assignee.ID); err != nil {
return err
}
// NewContext start mail queue service
func NewContext() {
// Need to check if mailQueue is nil because in during reinstall (user had installed
- // before but swithed install lock off), this function will be called again
+ // before but switched install lock off), this function will be called again
// while mail queue is already processing tasks, and produces a race condition.
if setting.MailService == nil || mailQueue != nil {
return
wikiPath := NameToFilename(wikiName)
entry, err := masterTree.GetTreeEntryByPath(wikiPath)
assert.NoError(t, err)
- assert.Equal(t, wikiPath, entry.Name(), "%s not addded correctly", wikiName)
+ assert.Equal(t, wikiPath, entry.Name(), "%s not added correctly", wikiName)
})
}
List
---------------*/
-/* Menu divider shouldnt apply */
+/* Menu divider shouldn't apply */
.ui.menu .list .item:before {
background: none !important;
opacity: 1;
}
-/* Icon Gylph */
+/* Icon Glyph */
.ui.icon.menu i.icon:before {
opacity: 1;
response = JSON.parse(response);
}
catch(e) {
- // isnt json string
+ // isn't json string
}
}
return response;
event: {
click: function(event) {
- module.verbose('Determining if event occured on dimmer', event);
+ module.verbose('Determining if event occurred on dimmer', event);
if( $dimmer.find(event.target).length === 0 || $(event.target).is(selector.content) ) {
module.hide();
event.stopImmediatePropagation();
if(settings.onHide.call(element) !== false) {
module.animate.hide(function() {
module.remove.visible();
- // hidding search focus
+ // hiding search focus
if ( module.is.focusedOnSearch() && preventBlur !== true ) {
$search.blur();
}
*
* @param min A minimum value within multiple values
* @param total A total amount of multiple values
- * @returns {number} A precison. Could be 1, 10, 100, ... 1e+10.
+ * @returns {number} A precision. Could be 1, 10, 100, ... 1e+10.
*/
derivePrecision: function(min, total) {
var precisionPower = 0
nonNumeric : 'Progress value is non numeric',
tooHigh : 'Value specified is above 100%',
tooLow : 'Value specified is below 0%',
- sumExceedsTotal : 'Sum of multple values exceed total',
+ sumExceedsTotal : 'Sum of multiple values exceed total',
},
regExp: {
// possible errors
error: {
- noAnimation : 'Element is no longer attached to DOM. Unable to animate. Use silent setting to surpress this warning in production.',
+ noAnimation : 'Element is no longer attached to DOM. Unable to animate. Use silent setting to suppress this warning in production.',
repeated : 'That animation is already occurring, cancelling repeated animation',
method : 'The method you called is not defined',
support : 'This browser does not support CSS animations'
// Silence fomantic's error logging when tabs are used without a target content element
$.fn.tab.settings.silent = true;
-// Silence Vue's console advertisments in dev mode
+// Silence Vue's console advertisements in dev mode
// To use the Vue browser extension, enable the devtools option temporarily
Vue.config.productionTip = false;
Vue.config.devtools = false;
}
// TODO: Which thing should be done for choosing review requests
- // to make choosed items be shown on time here?
+ // to make chosen items be shown on time here?
if (selector === 'select-reviewers-modify' || selector === 'select-assignees-modify') {
return false;
}
code,
kbd,
samp {
- font-size: .9em; /* compensate for monospace fonts being usually slighty larger */
+ font-size: .9em; /* compensate for monospace fonts being usually slightly larger */
font-family: var(--fonts-monospace);
}
box-shadow: none;
}
- /* Overide semantic selector '.ui.menu:not(.vertical) .item > .button' */
+ /* Override semantic selector '.ui.menu:not(.vertical) .item > .button' */
/* This fixes the commit graph button on the commits page */
.menu:not(.vertical) .item > .button.compact {
.mono {
font-family: var(--fonts-monospace) !important;
- font-size: .9em !important; /* compensate for monospace fonts being usually slighty larger */
+ font-size: .9em !important; /* compensate for monospace fonts being usually slightly larger */
}
.bold { font-weight: 600 !important; }