* chore: rewrite format.
* chore: update format
Signed-off-by: Bo-Yi Wu <appleboy.tw@gmail.com>
* chore: update format
Signed-off-by: Bo-Yi Wu <appleboy.tw@gmail.com>
* chore: Adjacent parameters with the same type should be grouped together
* chore: update format.
}
defer rows.Close()
- var repos = make(map[*Repository]AccessMode, 10)
- var ownerCache = make(map[int64]*User, 10)
+ repos := make(map[*Repository]AccessMode, 10)
+ ownerCache := make(map[int64]*User, 10)
for rows.Next() {
var repo repoAccess
err = rows.Scan(&repo)
}
// GetRepositoryFromMatch returns a *Repository from a username and repo strings
-func GetRepositoryFromMatch(ownerName string, repoName string) (*Repository, error) {
+func GetRepositoryFromMatch(ownerName, repoName string) (*Repository, error) {
var err error
refRepo, err := GetRepositoryByOwnerAndName(ownerName, repoName)
if err != nil {
if len(a.GetIssueInfos()) == 0 {
return "#"
}
- //Return link to issue
+ // Return link to issue
issueIDString := a.GetIssueInfos()[0]
issueID, err := strconv.ParseInt(issueIDString, 10, 64)
if err != nil {
return actions, nil
}
-func activityReadable(user *User, doer *User) bool {
+func activityReadable(user, doer *User) bool {
var doerID int64
if doer != nil {
doerID = doer.ID
"code.gitea.io/gitea/modules/util"
)
-//NoticeType describes the notice type
+// NoticeType describes the notice type
type NoticeType int
const (
- //NoticeRepository type
+ // NoticeRepository type
NoticeRepository NoticeType = iota + 1
// NoticeTask type
NoticeTask
return 0, nil
}
- var ids = make([]int64, 0, len(attachments))
+ ids := make([]int64, 0, len(attachments))
for _, a := range attachments {
ids = append(ids, a.ID)
}
// DeleteAttachmentsByIssue deletes all attachments associated with the given issue.
func DeleteAttachmentsByIssue(issueID int64, remove bool) (int, error) {
attachments, err := GetAttachmentsByIssueID(issueID)
-
if err != nil {
return 0, err
}
// DeleteAttachmentsByComment deletes all attachments associated with the given comment.
func DeleteAttachmentsByComment(commentID int64, remove bool) (int, error) {
attachments, err := GetAttachmentsByCommentID(commentID)
-
if err != nil {
return 0, err
}
var start int
const batchSize = 100
for {
- var attachments = make([]*Attachment, 0, batchSize)
+ attachments := make([]*Attachment, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&attachments); err != nil {
return err
}
user := AssertExistsAndLoadBean(t, &User{ID: 1}).(*User)
- var fPath = "./attachment_test.go"
+ fPath := "./attachment_test.go"
f, err := os.Open(fPath)
assert.NoError(t, err)
defer f.Close()
- var buf = make([]byte, 1024)
+ buf := make([]byte, 1024)
n, err := f.Read(buf)
assert.NoError(t, err)
buf = buf[:n]
assert.Equal(t, tc.expectedRepo.ID, repo.ID)
}
assert.Equal(t, tc.expectedUnitType, unitType)
-
})
}
}
return nil, err
}
- var contexts = make([]string, 0, len(ids))
+ contexts := make([]string, 0, len(ids))
if len(ids) == 0 {
return contexts, nil
}
return contexts, x.Select("context").Table("commit_status").In("id", ids).Find(&contexts)
-
}
// NewCommitStatusOptions holds options for creating a CommitStatus
}
// ErrSSHDisabled represents an "SSH disabled" error.
-type ErrSSHDisabled struct {
-}
+type ErrSSHDisabled struct{}
// IsErrSSHDisabled checks if an error is a ErrSSHDisabled.
func IsErrSSHDisabled(err error) bool {
}
// ErrUserNotAllowedCreateOrg represents a "UserNotAllowedCreateOrg" kind of error.
-type ErrUserNotAllowedCreateOrg struct {
-}
+type ErrUserNotAllowedCreateOrg struct{}
// IsErrUserNotAllowedCreateOrg checks if an error is an ErrUserNotAllowedCreateOrg.
func IsErrUserNotAllowedCreateOrg(err error) bool {
}
// ErrAccessTokenEmpty represents a "AccessTokenEmpty" kind of error.
-type ErrAccessTokenEmpty struct {
-}
+type ErrAccessTokenEmpty struct{}
// IsErrAccessTokenEmpty checks if an error is a ErrAccessTokenEmpty.
func IsErrAccessTokenEmpty(err error) bool {
err := x.Where("user_id=?", user.ID).
Desc("login_source_id").
Find(&externalAccounts)
-
if err != nil {
return nil, err
}
}
// GetUserIDByExternalUserID get user id according to provider and userID
-func GetUserIDByExternalUserID(provider string, userID string) (int64, error) {
+func GetUserIDByExternalUserID(provider, userID string) (int64, error) {
var id int64
_, err := x.Table("external_login_user").
Select("user_id").
}
func (opts FindExternalUserOptions) toConds() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if len(opts.Provider) > 0 {
cond = cond.And(builder.Eq{"provider": opts.Provider})
}
// GetYamlFixturesAccess returns a string containing the contents
// for the access table, as recalculated using repo.RecalculateAccesses()
func GetYamlFixturesAccess() (string, error) {
-
repos := make([]*Repository, 0, 50)
if err := x.Find(&repos); err != nil {
return "", err
CanCertify bool
}
-//GPGKeyImport the original import of key
+// GPGKeyImport the original import of key
type GPGKeyImport struct {
KeyID string `xorm:"pk CHAR(16) NOT NULL"`
Content string `xorm:"TEXT NOT NULL"`
return list, nil
}
-//addGPGKey add key, import and subkeys to database
+// addGPGKey add key, import and subkeys to database
func addGPGKey(e Engine, key *GPGKey, content string) (err error) {
- //Add GPGKeyImport
+ // Add GPGKeyImport
if _, err = e.Insert(GPGKeyImport{
KeyID: key.KeyID,
Content: content,
return nil
}
-//addGPGSubKey add subkeys to database
+// addGPGSubKey add subkeys to database
func addGPGSubKey(e Engine, key *GPGKey) (err error) {
// Save GPG primary key.
if _, err = e.Insert(key); err != nil {
return nil, ErrGPGKeyIDAlreadyUsed{ekey.PrimaryKey.KeyIdString()}
}
- //Get DB session
+ // Get DB session
key, err := parseGPGKey(ownerID, ekey)
if err != nil {
return keys, sess.Commit()
}
-//base64EncPubKey encode public key content to base 64
+// base64EncPubKey encode public key content to base 64
func base64EncPubKey(pubkey *packet.PublicKey) (string, error) {
var w bytes.Buffer
err := pubkey.Serialize(&w)
return base64.StdEncoding.EncodeToString(w.Bytes()), nil
}
-//base64DecPubKey decode public key content from base 64
+// base64DecPubKey decode public key content from base 64
func base64DecPubKey(content string) (*packet.PublicKey, error) {
b, err := readerFromBase64(content)
if err != nil {
return nil, err
}
- //Read key
+ // Read key
p, err := packet.Read(b)
if err != nil {
return nil, err
}
- //Check type
+ // Check type
pkey, ok := p.(*packet.PublicKey)
if !ok {
return nil, fmt.Errorf("key is not a public key")
return pkey, nil
}
-//GPGKeyToEntity retrieve the imported key and the traducted entity
+// GPGKeyToEntity retrieve the imported key and the traducted entity
func GPGKeyToEntity(k *GPGKey) (*openpgp.Entity, error) {
impKey, err := GetGPGImportByKeyID(k.KeyID)
if err != nil {
return keys[0], err
}
-//parseSubGPGKey parse a sub Key
+// parseSubGPGKey parse a sub Key
func parseSubGPGKey(ownerID int64, primaryID string, pubkey *packet.PublicKey, expiry time.Time) (*GPGKey, error) {
content, err := base64EncPubKey(pubkey)
if err != nil {
}, nil
}
-//getExpiryTime extract the expire time of primary key based on sig
+// getExpiryTime extract the expire time of primary key based on sig
func getExpiryTime(e *openpgp.Entity) time.Time {
expiry := time.Time{}
- //Extract self-sign for expire date based on : https://github.com/golang/crypto/blob/master/openpgp/keys.go#L165
+ // Extract self-sign for expire date based on : https://github.com/golang/crypto/blob/master/openpgp/keys.go#L165
var selfSig *packet.Signature
for _, ident := range e.Identities {
if selfSig == nil {
return expiry
}
-//parseGPGKey parse a PrimaryKey entity (primary key + subs keys + self-signature)
+// parseGPGKey parse a PrimaryKey entity (primary key + subs keys + self-signature)
func parseGPGKey(ownerID int64, e *openpgp.Entity) (*GPGKey, error) {
pubkey := e.PrimaryKey
expiry := getExpiryTime(e)
- //Parse Subkeys
+ // Parse Subkeys
subkeys := make([]*GPGKey, len(e.Subkeys))
for i, k := range e.Subkeys {
subs, err := parseSubGPGKey(ownerID, pubkey.KeyIdString(), k.PublicKey, expiry)
subkeys[i] = subs
}
- //Check emails
+ // Check emails
userEmails, err := GetEmailAddresses(ownerID)
if err != nil {
return nil, err
}
}
- //In the case no email as been found
+ // In the case no email as been found
if len(emails) == 0 {
failedEmails := make([]string, 0, len(e.Identities))
for _, ident := range e.Identities {
// deleteGPGKey does the actual key deletion
func deleteGPGKey(e *xorm.Session, keyID string) (int64, error) {
if keyID == "" {
- return 0, fmt.Errorf("empty KeyId forbidden") //Should never happen but just to be sure
+ return 0, fmt.Errorf("empty KeyId forbidden") // Should never happen but just to be sure
}
- //Delete imported key
+ // Delete imported key
n, err := e.Where("key_id=?", keyID).Delete(new(GPGKeyImport))
if err != nil {
return n, err
}
func verifySign(s *packet.Signature, h hash.Hash, k *GPGKey) error {
- //Check if key can sign
+ // Check if key can sign
if !k.CanSign {
return fmt.Errorf("key can not sign")
}
- //Decode key
+ // Decode key
pkey, err := base64DecPubKey(k.Content)
if err != nil {
return err
}
func hashAndVerify(sig *packet.Signature, payload string, k *GPGKey, committer, signer *User, email string) *CommitVerification {
- //Generating hash of commit
+ // Generating hash of commit
hash, err := populateHash(sig.Hash, []byte(payload))
- if err != nil { //Skipping failed to generate hash
+ if err != nil { // Skipping failed to generate hash
log.Error("PopulateHash: %v", err)
return &CommitVerification{
CommittingUser: committer,
}
if err := verifySign(sig, hash, k); err == nil {
- return &CommitVerification{ //Everything is ok
+ return &CommitVerification{ // Everything is ok
CommittingUser: committer,
Verified: true,
Reason: fmt.Sprintf("%s / %s", signer.Name, k.KeyID),
return commitVerification
}
- //And test also SubsKey
+ // And test also SubsKey
for _, sk := range k.SubsKey {
commitVerification := hashAndVerify(sig, payload, sk, committer, signer, email)
if commitVerification != nil {
var committer *User
if c.Committer != nil {
var err error
- //Find Committer account
- committer, err = GetUserByEmail(c.Committer.Email) //This finds the user by primary email or activated email so commit will not be valid if email is not
- if err != nil { //Skipping not user for commiter
+ // Find Committer account
+ committer, err = GetUserByEmail(c.Committer.Email) // This finds the user by primary email or activated email so commit will not be valid if email is not
+ if err != nil { // Skipping not user for commiter
committer = &User{
Name: c.Committer.Name,
Email: c.Committer.Email,
if c.Signature == nil {
return &CommitVerification{
CommittingUser: committer,
- Verified: false, //Default value
- Reason: "gpg.error.not_signed_commit", //Default value
+ Verified: false, // Default value
+ Reason: "gpg.error.not_signed_commit", // Default value
}
}
- //Parsing signature
+ // Parsing signature
sig, err := extractSignature(c.Signature.Signature)
- if err != nil { //Skipping failed to extract sign
+ if err != nil { // Skipping failed to extract sign
log.Error("SignatureRead err: %v", err)
return &CommitVerification{
CommittingUser: committer,
// Now try to associate the signature with the committer, if present
if committer.ID != 0 {
keys, err := ListGPGKeys(committer.ID, ListOptions{})
- if err != nil { //Skipping failed to get gpg keys of user
+ if err != nil { // Skipping failed to get gpg keys of user
log.Error("ListGPGKeys: %v", err)
return &CommitVerification{
CommittingUser: committer,
}
for _, k := range keys {
- //Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate
+ // Pre-check (& optimization) that emails attached to key can be attached to the commiter email and can validate
canValidate := false
email := ""
for _, e := range k.Emails {
}
}
if !canValidate {
- continue //Skip this key
+ continue // Skip this key
}
commitVerification := hashAndVerifyWithSubKeys(sig, c.Signature.Payload, k, committer, committer, email)
}
}
- return &CommitVerification{ //Default at this stage
+ return &CommitVerification{ // Default at this stage
CommittingUser: committer,
Verified: false,
Warning: defaultReason != NoKeyFound,
key, err := checkArmoredGPGKeyString(testGPGArmor)
assert.NoError(t, err, "Could not parse a valid GPG public armored rsa key", key)
- //TODO verify value of key
+ // TODO verify value of key
}
func TestCheckArmoredbrainpoolP256r1GPGKeyString(t *testing.T) {
key, err := checkArmoredGPGKeyString(testGPGArmor)
assert.NoError(t, err, "Could not parse a valid GPG public armored brainpoolP256r1 key", key)
- //TODO verify value of key
+ // TODO verify value of key
}
func TestExtractSignature(t *testing.T) {
Unknown GPG key with good email
`
- //Reading Sign
+ // Reading Sign
goodSig, err := extractSignature(testGoodSigArmor)
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testGoodSigArmor)
badSig, err := extractSignature(testBadSigArmor)
assert.NoError(t, err, "Could not parse a valid GPG armored signature", testBadSigArmor)
- //Generating hash of commit
+ // Generating hash of commit
goodHash, err := populateHash(goodSig.Hash, []byte(testGoodPayload))
assert.NoError(t, err, "Could not generate a valid hash of payload", testGoodPayload)
badHash, err := populateHash(badSig.Hash, []byte(testBadPayload))
assert.NoError(t, err, "Could not generate a valid hash of payload", testBadPayload)
- //Verify
+ // Verify
err = verifySign(goodSig, goodHash, key)
assert.NoError(t, err, "Could not validate a good signature")
err = verifySign(badSig, badHash, key)
package models
func keysInt64(m map[int64]struct{}) []int64 {
- var keys = make([]int64, 0, len(m))
+ keys := make([]int64, 0, len(m))
for k := range m {
keys = append(keys, k)
}
}
func valuesRepository(m map[int64]*Repository) []*Repository {
- var values = make([]*Repository, 0, len(m))
+ values := make([]*Repository, 0, len(m))
for _, v := range m {
values = append(values, v)
}
}
func valuesUser(m map[int64]*User) []*User {
- var values = make([]*User, 0, len(m))
+ values := make([]*User, 0, len(m))
for _, v := range m {
values = append(values, v)
}
issueTasksDonePat *regexp.Regexp
)
-const issueTasksRegexpStr = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)`
-const issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)`
-const issueMaxDupIndexAttempts = 3
+const (
+ issueTasksRegexpStr = `(^\s*[-*]\s\[[\sxX]\]\s.)|(\n\s*[-*]\s\[[\sxX]\]\s.)`
+ issueTasksDoneRegexpStr = `(^\s*[-*]\s\[[xX]\]\s.)|(\n\s*[-*]\s\[[xX]\]\s.)`
+ issueMaxDupIndexAttempts = 3
+)
func init() {
issueTasksPat = regexp.MustCompile(issueTasksRegexpStr)
return fmt.Errorf("loadRepo: %v", err)
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeChangeTitle,
Doer: doer,
Repo: issue.Repo,
if err := sess.Begin(); err != nil {
return err
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeDeleteBranch,
Doer: doer,
Repo: repo,
return err
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeMilestone,
Doer: doer,
Repo: opts.Repo,
}
func getIssueIDsByRepoID(e Engine, repoID int64) ([]int64, error) {
- var ids = make([]int64, 0, 10)
+ ids := make([]int64, 0, 10)
err := e.Table("issue").Where("repo_id = ?", repoID).Find(&ids)
return ids, err
}
}
// GetRepoIssueStats returns number of open and closed repository issues by given filter mode.
-func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen int64, numClosed int64) {
+func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen, numClosed int64) {
countSession := func(isClosed, isPull bool, repoID int64) *xorm.Session {
sess := x.
Where("is_closed = ?", isClosed).
// SearchIssueIDsByKeyword search issues on database
func SearchIssueIDsByKeyword(kw string, repoIDs []int64, limit, start int) (int64, []int64, error) {
- var repoCond = builder.In("repo_id", repoIDs)
- var subQuery = builder.Select("id").From("issue").Where(repoCond)
+ repoCond := builder.In("repo_id", repoIDs)
+ subQuery := builder.Select("id").From("issue").Where(repoCond)
kw = strings.ToUpper(kw)
- var cond = builder.And(
+ cond := builder.And(
repoCond,
builder.Or(
builder.Like{"UPPER(name)", kw},
),
)
- var ids = make([]int64, 0, limit)
- var res = make([]struct {
+ ids := make([]int64, 0, limit)
+ res := make([]struct {
ID int64
UpdatedUnix int64
}, 0, limit)
titleChanged = currentIssue.Title != issue.Title
if titleChanged {
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeChangeTitle,
Doer: doer,
Repo: issue.Repo,
// UpdateIssueDeadline updates an issue deadline and adds comments. Setting a deadline to 0 means deleting it.
func UpdateIssueDeadline(issue *Issue, deadlineUnix timeutil.TimeStamp, doer *User) (err error) {
-
// if the deadline hasn't changed do nothing
if issue.DeadlineUnix == deadlineUnix {
return nil
Join("INNER", "repository", "repository.id = issue.repo_id").
Join("INNER", "issue_dependency", "issue_dependency.dependency_id = issue.id").
Where("issue_id = ?", issue.ID).
- //sort by repo id then created date, with the issues of the same repo at the beginning of the list
+ // sort by repo id then created date, with the issues of the same repo at the beginning of the list
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
Find(&issueDeps)
}
Join("INNER", "repository", "repository.id = issue.repo_id").
Join("INNER", "issue_dependency", "issue_dependency.issue_id = issue.id").
Where("dependency_id = ?", issue.ID).
- //sort by repo id then created date, with the issues of the same repo at the beginning of the list
+ // sort by repo id then created date, with the issues of the same repo at the beginning of the list
OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC").
Find(&issueDeps)
}
return false, nil, fmt.Errorf("loadRepo: %v", err)
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeAssignees,
Doer: doer,
Repo: issue.Repo,
// toggles user assignee state in database
func toggleUserAssignee(e *xorm.Session, issue *Issue, assigneeID int64) (removed bool, err error) {
-
// Check if the user exists
assignee, err := getUserByID(e, assigneeID)
if err != nil {
// MakeIDsFromAPIAssigneesToAdd returns an array with all assignee IDs
func MakeIDsFromAPIAssigneesToAdd(oneAssignee string, multipleAssignees []string) (assigneeIDs []int64, err error) {
-
var requestAssignees []string
// Keeping the old assigning method for compatibility reasons
requestAssignees = append(requestAssignees, oneAssignee)
}
- //Prevent empty assignees
+ // Prevent empty assignees
if len(multipleAssignees) > 0 && multipleAssignees[0] != "" {
requestAssignees = append(requestAssignees, multipleAssignees...)
}
}
_, err := DeleteAttachmentsByComment(c.ID, true)
-
if err != nil {
log.Info("Could not delete files for comment %d on issue #%d: %s", c.ID, c.IssueID, err)
}
// LoadProject if comment.Type is CommentTypeProject, then load project.
func (c *Comment) LoadProject() error {
-
if c.OldProjectID > 0 {
var oldProject Project
has, err := x.ID(c.OldProjectID).Get(&oldProject)
return nil, err
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: commentType,
Doer: doer,
Repo: issue.Repo,
}
// Creates issue dependency comment
-func createIssueDependencyComment(e *xorm.Session, doer *User, issue *Issue, dependentIssue *Issue, add bool) (err error) {
+func createIssueDependencyComment(e *xorm.Session, doer *User, issue, dependentIssue *Issue, add bool) (err error) {
cType := CommentTypeAddDependency
if !add {
cType = CommentTypeRemoveDependency
}
// Make two comments, one in each issue
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: cType,
Doer: doer,
Repo: issue.Repo,
}
func (opts *FindCommentsOptions) toConds() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"issue.repo_id": opts.RepoID})
}
// Find all reviews by ReviewID
reviews := make(map[int64]*Review)
- var ids = make([]int64, 0, len(comments))
+ ids := make([]int64, 0, len(comments))
for _, comment := range comments {
if comment.ReviewID != 0 {
ids = append(ids, comment.ReviewID)
posterIDs := comments.getPosterIDs()
posterMaps := make(map[int64]*User, len(posterIDs))
- var left = len(posterIDs)
+ left := len(posterIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getCommentIDs() []int64 {
- var ids = make([]int64, 0, len(comments))
+ ids := make([]int64, 0, len(comments))
for _, comment := range comments {
ids = append(ids, comment.ID)
}
}
func (comments CommentList) getLabelIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if _, ok := ids[comment.LabelID]; !ok {
ids[comment.LabelID] = struct{}{}
return nil
}
- var labelIDs = comments.getLabelIDs()
- var commentLabels = make(map[int64]*Label, len(labelIDs))
- var left = len(labelIDs)
+ labelIDs := comments.getLabelIDs()
+ commentLabels := make(map[int64]*Label, len(labelIDs))
+ left := len(labelIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getMilestoneIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if _, ok := ids[comment.MilestoneID]; !ok {
ids[comment.MilestoneID] = struct{}{}
}
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
- var left = len(milestoneIDs)
+ left := len(milestoneIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getOldMilestoneIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if _, ok := ids[comment.OldMilestoneID]; !ok {
ids[comment.OldMilestoneID] = struct{}{}
}
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
- var left = len(milestoneIDs)
+ left := len(milestoneIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getAssigneeIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if _, ok := ids[comment.AssigneeID]; !ok {
ids[comment.AssigneeID] = struct{}{}
return nil
}
- var assigneeIDs = comments.getAssigneeIDs()
- var assignees = make(map[int64]*User, len(assigneeIDs))
- var left = len(assigneeIDs)
+ assigneeIDs := comments.getAssigneeIDs()
+ assignees := make(map[int64]*User, len(assigneeIDs))
+ left := len(assigneeIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
// getIssueIDs returns all the issue ids on this comment list which issue hasn't been loaded
func (comments CommentList) getIssueIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if comment.Issue != nil {
continue
// Issues returns all the issues of comments
func (comments CommentList) Issues() IssueList {
- var issues = make(map[int64]*Issue, len(comments))
+ issues := make(map[int64]*Issue, len(comments))
for _, comment := range comments {
if comment.Issue != nil {
if _, ok := issues[comment.Issue.ID]; !ok {
}
}
- var issueList = make([]*Issue, 0, len(issues))
+ issueList := make([]*Issue, 0, len(issues))
for _, issue := range issues {
issueList = append(issueList, issue)
}
return nil
}
- var issueIDs = comments.getIssueIDs()
- var issues = make(map[int64]*Issue, len(issueIDs))
- var left = len(issueIDs)
+ issueIDs := comments.getIssueIDs()
+ issues := make(map[int64]*Issue, len(issueIDs))
+ left := len(issueIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getDependentIssueIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if comment.DependentIssue != nil {
continue
return nil
}
- var issueIDs = comments.getDependentIssueIDs()
- var issues = make(map[int64]*Issue, len(issueIDs))
- var left = len(issueIDs)
+ issueIDs := comments.getDependentIssueIDs()
+ issues := make(map[int64]*Issue, len(issueIDs))
+ left := len(issueIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
return nil
}
- var attachments = make(map[int64][]*Attachment, len(comments))
- var commentsIDs = comments.getCommentIDs()
- var left = len(commentsIDs)
+ attachments := make(map[int64][]*Attachment, len(comments))
+ commentsIDs := comments.getCommentIDs()
+ left := len(commentsIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (comments CommentList) getReviewIDs() []int64 {
- var ids = make(map[int64]struct{}, len(comments))
+ ids := make(map[int64]struct{}, len(comments))
for _, comment := range comments {
if _, ok := ids[comment.ReviewID]; !ok {
ids[comment.ReviewID] = struct{}{}
return nil
}
- var reviewIDs = comments.getReviewIDs()
- var reviews = make(map[int64]*Review, len(reviewIDs))
- var left = len(reviewIDs)
+ reviewIDs := comments.getReviewIDs()
+ reviews := make(map[int64]*Review, len(reviewIDs))
+ left := len(reviewIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
// RemoveIssueDependency removes a dependency from an issue
-func RemoveIssueDependency(user *User, issue *Issue, dep *Issue, depType DependencyType) (err error) {
+func RemoveIssueDependency(user *User, issue, dep *Issue, depType DependencyType) (err error) {
sess := x.NewSession()
defer sess.Close()
if err = sess.Begin(); err != nil {
}
// Check if the dependency already exists
-func issueDepExists(e Engine, issueID int64, depID int64) (bool, error) {
+func issueDepExists(e Engine, issueID, depID int64) (bool, error) {
return e.Where("(issue_id = ? AND dependency_id = ?)", issueID, depID).Exist(&IssueDependency{})
}
// DeleteLabel delete a label
func DeleteLabel(id, labelID int64) error {
-
label, err := GetLabelByID(labelID)
if err != nil {
if IsErrLabelNotExist(err) {
return
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeLabel,
Doer: doer,
Repo: issue.Repo,
return
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeLabel,
Doer: doer,
Repo: issue.Repo,
_, err = GetLabelsByOrgID(-1, "leastissues", ListOptions{})
assert.True(t, IsErrOrgLabelNotExist(err))
-
}
//
repoIDs := issues.getRepoIDs()
repoMaps := make(map[int64]*Repository, len(repoIDs))
- var left = len(repoIDs)
+ left := len(repoIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
posterIDs := issues.getPosterIDs()
posterMaps := make(map[int64]*User, len(posterIDs))
- var left = len(posterIDs)
+ left := len(posterIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (issues IssueList) getIssueIDs() []int64 {
- var ids = make([]int64, 0, len(issues))
+ ids := make([]int64, 0, len(issues))
for _, issue := range issues {
ids = append(ids, issue.ID)
}
IssueLabel *IssueLabel `xorm:"extends"`
}
- var issueLabels = make(map[int64][]*Label, len(issues)*3)
- var issueIDs = issues.getIssueIDs()
- var left = len(issueIDs)
+ issueLabels := make(map[int64][]*Label, len(issues)*3)
+ issueIDs := issues.getIssueIDs()
+ left := len(issueIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (issues IssueList) getMilestoneIDs() []int64 {
- var ids = make(map[int64]struct{}, len(issues))
+ ids := make(map[int64]struct{}, len(issues))
for _, issue := range issues {
if _, ok := ids[issue.MilestoneID]; !ok {
ids[issue.MilestoneID] = struct{}{}
}
milestoneMaps := make(map[int64]*Milestone, len(milestoneIDs))
- var left = len(milestoneIDs)
+ left := len(milestoneIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
Assignee *User `xorm:"extends"`
}
- var assignees = make(map[int64][]*User, len(issues))
- var issueIDs = issues.getIssueIDs()
- var left = len(issueIDs)
+ assignees := make(map[int64][]*User, len(issues))
+ issueIDs := issues.getIssueIDs()
+ left := len(issueIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (issues IssueList) getPullIssueIDs() []int64 {
- var ids = make([]int64, 0, len(issues))
+ ids := make([]int64, 0, len(issues))
for _, issue := range issues {
if issue.IsPull && issue.PullRequest == nil {
ids = append(ids, issue.ID)
}
pullRequestMaps := make(map[int64]*PullRequest, len(issuesIDs))
- var left = len(issuesIDs)
+ left := len(issuesIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
return nil
}
- var attachments = make(map[int64][]*Attachment, len(issues))
- var issuesIDs = issues.getIssueIDs()
- var left = len(issuesIDs)
+ attachments := make(map[int64][]*Attachment, len(issues))
+ issuesIDs := issues.getIssueIDs()
+ left := len(issuesIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
return nil
}
- var comments = make(map[int64][]*Comment, len(issues))
- var issuesIDs = issues.getIssueIDs()
- var left = len(issuesIDs)
+ comments := make(map[int64][]*Comment, len(issues))
+ issuesIDs := issues.getIssueIDs()
+ left := len(issuesIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
if len(issues) == 0 {
return nil
}
- var trackedTimes = make(map[int64]int64, len(issues))
+ trackedTimes := make(map[int64]int64, len(issues))
- var ids = make([]int64, 0, len(issues))
+ ids := make([]int64, 0, len(issues))
for _, issue := range issues {
if issue.Repo.IsTimetrackerEnabled() {
ids = append(ids, issue.ID)
}
}
- var left = len(ids)
+ left := len(ids)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
return err
}
- var opt = &CreateCommentOptions{
+ opt := &CreateCommentOptions{
Doer: opts.Doer,
Issue: opts.Issue,
Repo: opts.Issue.Repo,
return err
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: CommentTypeMilestone,
Doer: doer,
Repo: issue.Repo,
type MilestoneList []*Milestone
func (milestones MilestoneList) getMilestoneIDs() []int64 {
- var ids = make([]int64, 0, len(milestones))
+ ids := make([]int64, 0, len(milestones))
for _, ms := range milestones {
ids = append(ids, ms.ID)
}
if len(milestones) == 0 {
return nil
}
- var trackedTimes = make(map[int64]int64, len(milestones))
+ trackedTimes := make(map[int64]int64, len(milestones))
// Get total tracked time by milestone_id
rows, err := e.Table("issue").
}
func (opts *FindReactionsOptions) toConds() builder.Cond {
- //If Issue ID is set add to Query
- var cond = builder.NewCond()
+ // If Issue ID is set add to Query
+ cond := builder.NewCond()
if opts.IssueID > 0 {
cond = cond.And(builder.Eq{"reaction.issue_id": opts.IssueID})
}
- //If CommentID is > 0 add to Query
- //If it is 0 Query ignore CommentID to select
- //If it is -1 it explicit search of Issue Reactions where CommentID = 0
+ // If CommentID is > 0 add to Query
+ // If it is 0 Query ignore CommentID to select
+ // If it is -1 it explicit search of Issue Reactions where CommentID = 0
if opts.CommentID > 0 {
cond = cond.And(builder.Eq{"reaction.comment_id": opts.CommentID})
} else if opts.CommentID == -1 {
func FindCommentReactions(comment *Comment) (ReactionList, error) {
return findReactions(x, FindReactionsOptions{
IssueID: comment.IssueID,
- CommentID: comment.ID})
+ CommentID: comment.ID,
+ })
}
// FindIssueReactions returns a ReactionList of all reactions from an issue
// GroupByType returns reactions grouped by type
func (list ReactionList) GroupByType() map[string]ReactionList {
- var reactions = make(map[string]ReactionList)
+ reactions := make(map[string]ReactionList)
for _, reaction := range list {
reactions[reaction.Type] = append(reactions[reaction.Type], reaction)
}
// GetFirstUsers returns first reacted user display names separated by comma
func (list ReactionList) GetFirstUsers() string {
var buffer bytes.Buffer
- var rem = setting.UI.ReactionMaxUserNum
+ rem := setting.UI.ReactionMaxUserNum
for _, reaction := range list {
if buffer.Len() > 0 {
buffer.WriteString(", ")
}
// StopwatchExists returns true if the stopwatch exists
-func StopwatchExists(userID int64, issueID int64) bool {
+func StopwatchExists(userID, issueID int64) bool {
_, exists, _ := getStopwatch(x, userID, issueID)
return exists
}
return err
}
} else {
- //if another stopwatch is running: stop it
+ // if another stopwatch is running: stop it
exists, sw, err := HasUserStopwatch(user.ID)
if err != nil {
return err
func TestGetIssuesByIDs(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
- testSuccess := func(expectedIssueIDs []int64, nonExistentIssueIDs []int64) {
+ testSuccess := func(expectedIssueIDs, nonExistentIssueIDs []int64) {
issues, err := GetIssuesByIDs(append(expectedIssueIDs, nonExistentIssueIDs...))
assert.NoError(t, err)
actualIssueIDs := make([]int64, len(issues))
actualIssueIDs[i] = issue.ID
}
assert.Equal(t, expectedIssueIDs, actualIssueIDs)
-
}
testSuccess([]int64{1, 2, 3}, []int64{})
testSuccess([]int64{1, 2, 3}, []int64{NonexistentID})
}
func TestIssue_ClearLabels(t *testing.T) {
- var tests = []struct {
+ tests := []struct {
issueID int64
doerID int64
}{
repo := AssertExistsAndLoadBean(t, &Repository{ID: 1}).(*Repository)
user := AssertExistsAndLoadBean(t, &User{ID: 2}).(*User)
- var issue = Issue{
+ issue := Issue{
RepoID: repo.ID,
PosterID: user.ID,
Title: title,
if err != nil {
return nil, err
}
- //Adding total time per user ID
+ // Adding total time per user ID
totalTimesByUser := make(map[int64]int64)
for _, t := range trackedTimes {
totalTimesByUser[t.UserID] += t.Time
}
totalTimes := make(map[*User]string)
- //Fetching User and making time human readable
+ // Fetching User and making time human readable
for userID, total := range totalTimesByUser {
user, err := GetUserByID(userID)
if err != nil {
}
func deleteTimes(e Engine, opts FindTrackedTimesOptions) (removedTime int64, err error) {
-
removedTime, err = getTrackedSeconds(e, opts)
if err != nil || removedTime == 0 {
return
issue1, err := GetIssueByID(1)
assert.NoError(t, err)
- //3661 = 1h 1min 1s
+ // 3661 = 1h 1min 1s
trackedTime, err := AddTime(user3, issue1, 3661, time.Now())
assert.NoError(t, err)
assert.Equal(t, int64(3), trackedTime.UserID)
return watches, sess.Find(&watches)
}
-func removeIssueWatchersByRepoID(e Engine, userID int64, repoID int64) error {
+func removeIssueWatchersByRepoID(e Engine, userID, repoID int64) error {
_, err := e.
Join("INNER", "issue", "`issue`.id = `issue_watch`.issue_id AND `issue`.repo_id = ?", repoID).
Where("`issue_watch`.user_id = ?", userID).
RemoveOld bool
}
-func findOldCrossReferences(e Engine, issueID int64, commentID int64) ([]*Comment, error) {
+func findOldCrossReferences(e Engine, issueID, commentID int64) ([]*Comment, error) {
active := make([]*Comment, 0, 10)
return active, e.Where("`ref_action` IN (?, ?, ?)", references.XRefActionNone, references.XRefActionCloses, references.XRefActionReopens).
And("`ref_issue_id` = ?", issueID).
Find(&active)
}
-func neuterCrossReferences(e Engine, issueID int64, commentID int64) error {
+func neuterCrossReferences(e Engine, issueID, commentID int64) error {
active, err := findOldCrossReferences(e, issueID, commentID)
if err != nil {
return err
if ctx.OrigComment != nil {
refCommentID = ctx.OrigComment.ID
}
- var opts = &CreateCommentOptions{
+ opts := &CreateCommentOptions{
Type: ctx.Type,
Doer: ctx.Doer,
Repo: xref.Issue.Repo,
// verifyReferencedIssue will check if the referenced issue exists, and whether the doer has permission to do what
func (issue *Issue) verifyReferencedIssue(e Engine, ctx *crossReferencesContext, repo *Repository,
ref references.IssueReference) (*Issue, references.XRefAction, error) {
-
refIssue := &Issue{RepoID: repo.ID, Index: ref.Index}
refAction := ref.Action
Href string `json:"href"`
}
-var (
- // ErrLFSObjectNotExist is returned from lfs models functions in order
- // to differentiate between database and missing object errors.
- ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
-)
+// ErrLFSObjectNotExist is returned from lfs models functions in order
+// to differentiate between database and missing object errors.
+var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
const (
// LFSMetaFileIdentifier is the string appearing at the first line of LFS pointer files.
var start int
const batchSize = 100
for {
- var mos = make([]*LFSMetaObject, 0, batchSize)
+ mos := make([]*LFSMetaObject, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&mos); err != nil {
return err
}
return lock, err
}
-//CheckLFSAccessForRepo check needed access mode base on action
+// CheckLFSAccessForRepo check needed access mode base on action
func CheckLFSAccessForRepo(u *User, repo *Repository, mode AccessMode) error {
if u == nil {
return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
return nil, ErrUserNotExist{0, login, 0}
}
- var isAttributeSSHPublicKeySet = len(strings.TrimSpace(source.LDAP().AttributeSSHPublicKey)) > 0
+ isAttributeSSHPublicKeySet := len(strings.TrimSpace(source.LDAP().AttributeSSHPublicKey)) > 0
// Update User admin flag if exist
if isExist, err := IsUserExist(0, sr.Username); err != nil {
if _, err := sess.NoAutoTime().Insert(issue); err != nil {
return err
}
- var issueLabels = make([]IssueLabel, 0, len(issue.Labels))
- var labelIDs = make([]int64, 0, len(issue.Labels))
+ issueLabels := make([]IssueLabel, 0, len(issue.Labels))
+ labelIDs := make([]int64, 0, len(issue.Labels))
for _, label := range issue.Labels {
issueLabels = append(issueLabels, IssueLabel{
IssueID: issue.ID,
return nil
}
- var issueIDs = make(map[int64]bool)
+ issueIDs := make(map[int64]bool)
for _, comment := range comments {
issueIDs[comment.IssueID] = true
}
var last int
const batchSize = 50
for {
- var results = make([]Repository, 0, batchSize)
+ results := make([]Repository, 0, batchSize)
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
And("original_service_type = 0 OR original_service_type IS NULL").
OrderBy("id").
if err != nil {
return err
}
- var serviceType = PlainGitService
+ serviceType := PlainGitService
if strings.EqualFold(u.Host, "github.com") {
serviceType = GithubService
}
)
func removeLabelUneededCols(x *xorm.Engine) error {
-
// Make sure the columns exist before dropping them
type Label struct {
QueryString string
)
func addTeamIncludesAllRepositories(x *xorm.Engine) error {
-
type Team struct {
ID int64 `xorm:"pk autoincr"`
IncludesAllRepositories bool `xorm:"NOT NULL DEFAULT false"`
)
func addTemplateToRepo(x *xorm.Engine) error {
-
type Repository struct {
IsTemplate bool `xorm:"INDEX NOT NULL DEFAULT false"`
TemplateID int64 `xorm:"INDEX"`
}
totalPages := totalIssues / pageSize
- var executeBody = func(page, pageSize int64) error {
+ executeBody := func(page, pageSize int64) error {
// Find latest review of each user in each pull request, and set official field if appropriate
reviews := []*Review{}
)
func sanitizeOriginalURL(x *xorm.Engine) error {
-
type Repository struct {
ID int64
OriginalURL string `xorm:"VARCHAR(2048)"`
var last int
const batchSize = 50
for {
- var results = make([]Repository, 0, batchSize)
+ results := make([]Repository, 0, batchSize)
err := x.Where("original_url <> '' AND original_url IS NOT NULL").
And("original_service_type = 0 OR original_service_type IS NULL").
OrderBy("id").
return newAvatar, nil
}
- if err := ioutil.WriteFile(filepath.Join(setting.Avatar.Path, newAvatar), data, 0666); err != nil {
+ if err := ioutil.WriteFile(filepath.Join(setting.Avatar.Path, newAvatar), data, 0o666); err != nil {
return "", fmt.Errorf("ioutil.WriteFile: %v", err)
}
)
func extendTrackedTimes(x *xorm.Engine) error {
-
type TrackedTime struct {
Time int64 `xorm:"NOT NULL"`
Deleted bool `xorm:"NOT NULL DEFAULT false"`
)
func addRequireSignedCommits(x *xorm.Engine) error {
-
type ProtectedBranch struct {
RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
}
)
func addUserRepoMissingColumns(x *xorm.Engine) error {
-
type VisibleType int
type User struct {
PasswdHashAlgo string `xorm:"NOT NULL DEFAULT 'pbkdf2'"`
MergedCommitID string `xorm:"VARCHAR(40)"`
}
- var limit = setting.Database.IterateBufferSize
+ limit := setting.Database.IterateBufferSize
if limit <= 0 {
limit = 50
}
)
func purgeUnusedDependencies(x *xorm.Engine) error {
-
if _, err := x.Exec("DELETE FROM issue_dependency WHERE issue_id NOT IN (SELECT id FROM issue)"); err != nil {
return err
}
)
func expandWebhooks(x *xorm.Engine) error {
-
type HookEvents struct {
Create bool `json:"create"`
Delete bool `json:"delete"`
if err := sess.Begin(); err != nil {
return err
}
- var results = make([]Webhook, 0, batchSize)
+ results := make([]Webhook, 0, batchSize)
err := x.OrderBy("id").
Limit(batchSize, last).
Find(&results)
MergedCommitID string `xorm:"VARCHAR(40)"`
}
- var limit = setting.Database.IterateBufferSize
+ limit := setting.Database.IterateBufferSize
if limit <= 0 {
limit = 50
}
if err := sess.Begin(); err != nil {
return err
}
- var results = make([]*PullRequest, 0, batchSize)
+ results := make([]*PullRequest, 0, batchSize)
err := sess.Where("has_merged = ?", false).OrderBy("id").Limit(batchSize, last).Find(&results)
if err != nil {
return err
)
func updateMatrixWebhookHTTPMethod(x *xorm.Engine) error {
- var matrixHookTaskType = 9 // value comes from the models package
+ matrixHookTaskType := 9 // value comes from the models package
type Webhook struct {
HTTPMethod string
}
)
func addProjectsInfo(x *xorm.Engine) error {
-
// Create new tables
type (
ProjectType uint8
return err
}
- var updateComment = func(comments []*Comment) error {
+ updateComment := func(comments []*Comment) error {
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
return sess.Commit()
}
- var start = 0
- var batchSize = 100
+ start := 0
+ batchSize := 100
for {
- var comments = make([]*Comment, 0, batchSize)
+ comments := make([]*Comment, 0, batchSize)
if err := x.Where("review_id = 0 and type = 21").Limit(batchSize, start).Find(&comments); err != nil {
return err
}
}
func fixPublisherIDforTagReleases(x *xorm.Engine) error {
-
type Release struct {
ID int64
RepoID int64
)
func fixRepoTopics(x *xorm.Engine) error {
-
type Topic struct {
ID int64 `xorm:"pk autoincr"`
Name string `xorm:"UNIQUE VARCHAR(25)"`
AND comment.id != first.id
AND comment.commit_sha != first.commit_sha`
- var sqlCmd string
- var start = 0
- var batchSize = 100
- sess := x.NewSession()
+ var (
+ sqlCmd string
+ start = 0
+ batchSize = 100
+ sess = x.NewSession()
+ )
defer sess.Close()
for {
if err := sess.Begin(); err != nil {
}
}
- var comments = make([]*Comment, 0, batchSize)
+ comments := make([]*Comment, 0, batchSize)
switch {
case setting.Database.UseMySQL:
MATRIX
)
- var hookTaskTypes = map[int]string{
+ hookTaskTypes := map[int]string{
GITEA: "gitea",
GOGS: "gogs",
SLACK: "slack",
MATRIX
)
- var hookTaskTypes = map[int]string{
+ hookTaskTypes := map[int]string{
GITEA: "gitea",
GOGS: "gogs",
SLACK: "slack",
)
func addIssueDependencies(x *xorm.Engine) (err error) {
-
type IssueDependency struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"NOT NULL"`
Created time.Time `xorm:"-"`
}
- //Updating existing issue units
+ // Updating existing issue units
units := make([]*RepoUnit, 0, 100)
err = x.Where("`type` = ?", v16UnitTypeIssues).Find(&units)
if err != nil {
return err
}
return sess.Commit()
-
}
func hashToken(token, salt string) string {
return err
}
- //Updating existing issue units
+ // Updating existing issue units
units := make([]*RepoUnit, 0, 100)
if err := sess.Where("`type` = ?", v16UnitTypePRs).Find(&units); err != nil {
return fmt.Errorf("Query repo units: %v", err)
)
func addCanCloseIssuesViaCommitInAnyBranch(x *xorm.Engine) error {
-
type Repository struct {
ID int64 `xorm:"pk autoincr"`
CloseIssuesViaCommitInAnyBranch bool `xorm:"NOT NULL DEFAULT false"`
sess := x.NewSession()
defer sess.Close()
- var start = 0
+ start := 0
for {
- var statuses = make([]*CommitStatus, 0, 100)
+ statuses := make([]*CommitStatus, 0, 100)
err := sess.OrderBy("id").Limit(100, start).Find(&statuses)
if err != nil {
return err
)
func removeLingeringIndexStatus(x *xorm.Engine) error {
-
_, err := x.Exec(builder.Delete(builder.NotIn("`repo_id`", builder.Select("`id`").From("`repository`"))).From("`repo_indexer_status`"))
return err
}
sess := x.NewSession()
defer sess.Close()
- var limit = setting.Database.IterateBufferSize
+ limit := setting.Database.IterateBufferSize
if limit <= 0 {
limit = 50
}
return nil
}
- var ids = make([]int64, 0, limit)
+ ids := make([]int64, 0, limit)
for _, attachment := range attachements {
ids = append(ids, attachment.ID)
}
}
// DumpDatabase dumps all data from database according the special database SQL syntax to file system.
-func DumpDatabase(filePath string, dbType string) error {
+func DumpDatabase(filePath, dbType string) error {
var tbs []*schemas.Table
for _, t := range tables {
t, err := x.TableInfo(t)
// init
var toNotify map[int64]struct{}
notifications, err := getNotificationsByIssueID(e, issueID)
-
if err != nil {
return err
}
}
func (nl NotificationList) getPendingRepoIDs() []int64 {
- var ids = make(map[int64]struct{}, len(nl))
+ ids := make(map[int64]struct{}, len(nl))
for _, notification := range nl {
if notification.Repository != nil {
continue
return RepositoryList{}, []int{}, nil
}
- var repoIDs = nl.getPendingRepoIDs()
- var repos = make(map[int64]*Repository, len(repoIDs))
- var left = len(repoIDs)
+ repoIDs := nl.getPendingRepoIDs()
+ repos := make(map[int64]*Repository, len(repoIDs))
+ left := len(repoIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
failed := []int{}
- var reposList = make(RepositoryList, 0, len(repoIDs))
+ reposList := make(RepositoryList, 0, len(repoIDs))
for i, notification := range nl {
if notification.Repository == nil {
notification.Repository = repos[notification.RepoID]
}
func (nl NotificationList) getPendingIssueIDs() []int64 {
- var ids = make(map[int64]struct{}, len(nl))
+ ids := make(map[int64]struct{}, len(nl))
for _, notification := range nl {
if notification.Issue != nil {
continue
return []int{}, nil
}
- var issueIDs = nl.getPendingIssueIDs()
- var issues = make(map[int64]*Issue, len(issueIDs))
- var left = len(issueIDs)
+ issueIDs := nl.getPendingIssueIDs()
+ issues := make(map[int64]*Issue, len(issueIDs))
+ left := len(issueIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
}
func (nl NotificationList) getPendingCommentIDs() []int64 {
- var ids = make(map[int64]struct{}, len(nl))
+ ids := make(map[int64]struct{}, len(nl))
for _, notification := range nl {
if notification.CommentID == 0 || notification.Comment != nil {
continue
return []int{}, nil
}
- var commentIDs = nl.getPendingCommentIDs()
- var comments = make(map[int64]*Comment, len(commentIDs))
- var left = len(commentIDs)
+ commentIDs := nl.getPendingCommentIDs()
+ comments := make(map[int64]*Comment, len(commentIDs))
+ left := len(commentIDs)
for left > 0 {
- var limit = defaultMaxInSize
+ limit := defaultMaxInSize
if left < limit {
limit = left
}
ok, err := e.
Where("id = ?", notificationID).
Get(notification)
-
if err != nil {
return nil, err
}
}
// UpdateNotificationStatuses updates the statuses of all of a user's notifications that are of the currentStatus type to the desiredStatus
-func UpdateNotificationStatuses(user *User, currentStatus NotificationStatus, desiredStatus NotificationStatus) error {
+func UpdateNotificationStatuses(user *User, currentStatus, desiredStatus NotificationStatus) error {
n := &Notification{Status: desiredStatus, UpdatedBy: user.ID}
_, err := x.
Where("user_id = ? AND status = ?", user.ID, currentStatus).
"bitbucket": {Name: "bitbucket", DisplayName: "Bitbucket", Image: "/img/auth/bitbucket.png"},
"dropbox": {Name: "dropbox", DisplayName: "Dropbox", Image: "/img/auth/dropbox.png"},
"facebook": {Name: "facebook", DisplayName: "Facebook", Image: "/img/auth/facebook.png"},
- "github": {Name: "github", DisplayName: "GitHub", Image: "/img/auth/github.png",
+ "github": {
+ Name: "github", DisplayName: "GitHub", Image: "/img/auth/github.png",
CustomURLMapping: &oauth2.CustomURLMapping{
TokenURL: oauth2.GetDefaultTokenURL("github"),
AuthURL: oauth2.GetDefaultAuthURL("github"),
EmailURL: oauth2.GetDefaultEmailURL("github"),
},
},
- "gitlab": {Name: "gitlab", DisplayName: "GitLab", Image: "/img/auth/gitlab.png",
+ "gitlab": {
+ Name: "gitlab", DisplayName: "GitLab", Image: "/img/auth/gitlab.png",
CustomURLMapping: &oauth2.CustomURLMapping{
TokenURL: oauth2.GetDefaultTokenURL("gitlab"),
AuthURL: oauth2.GetDefaultAuthURL("gitlab"),
"openidConnect": {Name: "openidConnect", DisplayName: "OpenID Connect", Image: "/img/auth/openid_connect.svg"},
"twitter": {Name: "twitter", DisplayName: "Twitter", Image: "/img/auth/twitter.png"},
"discord": {Name: "discord", DisplayName: "Discord", Image: "/img/auth/discord.png"},
- "gitea": {Name: "gitea", DisplayName: "Gitea", Image: "/img/auth/gitea.png",
+ "gitea": {
+ Name: "gitea", DisplayName: "Gitea", Image: "/img/auth/gitea.png",
CustomURLMapping: &oauth2.CustomURLMapping{
TokenURL: oauth2.GetDefaultTokenURL("gitea"),
AuthURL: oauth2.GetDefaultAuthURL("gitea"),
ProfileURL: oauth2.GetDefaultProfileURL("gitea"),
},
},
- "nextcloud": {Name: "nextcloud", DisplayName: "Nextcloud", Image: "/img/auth/nextcloud.png",
+ "nextcloud": {
+ Name: "nextcloud", DisplayName: "Nextcloud", Image: "/img/auth/nextcloud.png",
CustomURLMapping: &oauth2.CustomURLMapping{
TokenURL: oauth2.GetDefaultTokenURL("nextcloud"),
AuthURL: oauth2.GetDefaultAuthURL("nextcloud"),
},
},
"yandex": {Name: "yandex", DisplayName: "Yandex", Image: "/img/auth/yandex.png"},
- "mastodon": {Name: "mastodon", DisplayName: "Mastodon", Image: "/img/auth/mastodon.png",
+ "mastodon": {
+ Name: "mastodon", DisplayName: "Mastodon", Image: "/img/auth/mastodon.png",
CustomURLMapping: &oauth2.CustomURLMapping{
AuthURL: oauth2.GetDefaultAuthURL("mastodon"),
},
return nil, nil, err
}
- var ids = make([]int64, len(ous))
- var idsIsPublic = make(map[int64]bool, len(ous))
+ ids := make([]int64, len(ous))
+ idsIsPublic := make(map[int64]bool, len(ous))
for i, ou := range ous {
ids[i] = ou.UID
idsIsPublic[ou.UID] = ou.IsPublic
}
// insert units for team
- var units = make([]TeamUnit, 0, len(AllRepoUnitTypes))
+ units := make([]TeamUnit, 0, len(AllRepoUnitTypes))
for _, tp := range AllRepoUnitTypes {
units = append(units, TeamUnit{
OrgID: org.ID,
}
// HasOrgVisible tells if the given user can see the given org
-func HasOrgVisible(org *User, user *User) bool {
+func HasOrgVisible(org, user *User) bool {
return hasOrgVisible(x, org, user)
}
-func hasOrgVisible(e Engine, org *User, user *User) bool {
+func hasOrgVisible(e Engine, org, user *User) bool {
// Not SignedUser
if user == nil {
return org.Visibility == structs.VisibleTypePublic
}
func (env *accessibleReposEnv) cond() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if env.team != nil {
cond = cond.And(builder.Eq{"team_repo.team_id": env.team.ID})
} else {
opts.PageSize = 10
}
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if len(opts.Keyword) > 0 {
lowerKeyword := strings.ToLower(opts.Keyword)
count, err := sess.
Where(cond).
Count(new(Team))
-
if err != nil {
return nil, 0, err
}
t.Name,
log.NewColoredIDValue(t.OrgID),
t.Authorize)
-
}
// GetUnits return a list of available units for a team
}
// UpdateTeam updates information of team.
-func UpdateTeam(t *Team, authChanged bool, includeAllChanged bool) (err error) {
+func UpdateTeam(t *Team, authChanged, includeAllChanged bool) (err error) {
if len(t.Name) == 0 {
return errors.New("empty team name")
}
}
// UsersInTeamsCount counts the number of users which are in userIDs and teamIDs
-func UsersInTeamsCount(userIDs []int64, teamIDs []int64) (int64, error) {
+func UsersInTeamsCount(userIDs, teamIDs []int64) (int64, error) {
var ids []int64
if err := x.In("uid", userIDs).In("team_id", teamIDs).
Table("team_user").
func TestUsersInTeamsCount(t *testing.T) {
assert.NoError(t, PrepareTestDatabase())
- test := func(teamIDs []int64, userIDs []int64, expected int64) {
+ test := func(teamIDs, userIDs []int64, expected int64) {
count, err := UsersInTeamsCount(teamIDs, userIDs)
assert.NoError(t, err)
assert.Equal(t, expected, count)
ID: orgUsers[0].ID,
OrgID: 6,
UID: 5,
- IsPublic: true}, *orgUsers[0])
+ IsPublic: true,
+ }, *orgUsers[0])
assert.Equal(t, OrgUser{
ID: orgUsers[1].ID,
OrgID: 7,
UID: 5,
- IsPublic: false}, *orgUsers[1])
+ IsPublic: false,
+ }, *orgUsers[1])
}
publicOrgUsers, err := GetOrgUsersByUserID(5, &SearchOrganizationsOptions{All: false})
ID: orgUsers[0].ID,
OrgID: 3,
UID: 2,
- IsPublic: true}, *orgUsers[0])
+ IsPublic: true,
+ }, *orgUsers[0])
assert.Equal(t, OrgUser{
ID: orgUsers[1].ID,
OrgID: 3,
UID: 4,
- IsPublic: false}, *orgUsers[1])
+ IsPublic: false,
+ }, *orgUsers[1])
}
orgUsers, err = GetOrgUsersByOrgID(&FindOrgMembersOpts{
}
func getProjects(e Engine, opts ProjectSearchOptions) ([]*Project, int64, error) {
-
projects := make([]*Project, 0, setting.UI.IssuePagingNum)
var cond builder.Cond = builder.Eq{"repo_id": opts.RepoID}
}
func createBoardsForProjectsType(sess *xorm.Session, project *Project) error {
-
var items []string
switch project.BoardType {
return nil
}
- var boards = make([]ProjectBoard, 0, len(items))
+ boards := make([]ProjectBoard, 0, len(items))
for _, v := range items {
boards = append(boards, ProjectBoard{
}
func getProjectBoards(e Engine, projectID int64) ([]*ProjectBoard, error) {
- var boards = make([]*ProjectBoard, 0, 5)
+ boards := make([]*ProjectBoard, 0, 5)
if err := e.Where("project_id=? AND `default`=?", projectID, false).OrderBy("Sorting").Find(&boards); err != nil {
return nil, err
_, err := x.ID(bs[i].ID).Cols(
"sorting",
).Update(bs[i])
-
if err != nil {
return err
}
// ChangeProjectAssign changes the project associated with an issue
func ChangeProjectAssign(issue *Issue, doer *User, newProjectID int64) error {
-
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
}
func addUpdateIssueProject(e *xorm.Session, issue *Issue, doer *User, newProjectID int64) error {
-
oldProjectID := issue.projectID(e)
if _, err := e.Where("project_issue.issue_id=?", issue.ID).Delete(&ProjectIssue{}); err != nil {
// MoveIssueAcrossProjectBoards move a card from one board to another
func MoveIssueAcrossProjectBoards(issue *Issue, board *ProjectBoard) error {
-
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
func TestIsProjectTypeValid(t *testing.T) {
const UnknownType ProjectType = 15
- var cases = []struct {
+ cases := []struct {
typ ProjectType
valid bool
}{
// GetApprovers returns the approvers of the pull request
func (pr *PullRequest) GetApprovers() string {
-
stringBuilder := strings.Builder{}
if err := pr.getReviewedByLines(&stringBuilder); err != nil {
log.Error("Unable to getReviewedByLines: Error: %v", err)
}
// GetPullRequestByIndex returns a pull request by the given index
-func GetPullRequestByIndex(repoID int64, index int64) (*PullRequest, error) {
+func GetPullRequestByIndex(repoID, index int64) (*PullRequest, error) {
pr := &PullRequest{
BaseRepoID: repoID,
Index: index,
}
func (opts *FindReleasesOptions) toConds(repoID int64) builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
cond = cond.And(builder.Eq{"repo_id": repoID})
if !opts.IncludeDrafts {
func (s releaseMetaSearch) Len() int {
return len(s.ID)
}
+
func (s releaseMetaSearch) Swap(i, j int) {
s.ID[i], s.ID[j] = s.ID[j], s.ID[i]
s.Rel[i], s.Rel[j] = s.Rel[j], s.Rel[i]
}
+
func (s releaseMetaSearch) Less(i, j int) bool {
return s.ID[i] < s.ID[j]
}
// then merge join them
// Sort
- var sortedRels = releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
+ sortedRels := releaseMetaSearch{ID: make([]int64, len(rels)), Rel: make([]*Release, len(rels))}
var attachments []*Attachment
for index, element := range rels {
element.Attachments = []*Attachment{}
}
// merge join
- var currentIndex = 0
+ currentIndex := 0
for _, attachment := range attachments {
for sortedRels.ID[currentIndex] < attachment.ReleaseID {
currentIndex++
}
// insert units for repo
- var units = make([]RepoUnit, 0, len(DefaultRepoUnits))
+ units := make([]RepoUnit, 0, len(DefaultRepoUnits))
for _, tp := range DefaultRepoUnits {
if tp == UnitTypeIssues {
units = append(units, RepoUnit{
// GetRepositoriesMapByIDs returns the repositories by given id slice.
func GetRepositoriesMapByIDs(ids []int64) (map[int64]*Repository, error) {
- var repos = make(map[int64]*Repository, len(ids))
+ repos := make(map[int64]*Repository, len(ids))
return repos, x.In("id", ids).Find(&repos)
}
opts.OrderBy = "updated_unix DESC"
}
- var cond = builder.NewCond()
+ cond := builder.NewCond()
cond = cond.And(builder.Eq{"owner_id": opts.Actor.ID})
if !opts.Private {
cond = cond.And(builder.Eq{"is_private": false})
// IterateRepository iterate repositories
func IterateRepository(f func(repo *Repository) error) error {
var start int
- var batchSize = setting.Database.IterateBufferSize
+ batchSize := setting.Database.IterateBufferSize
for {
- var repos = make([]*Repository, 0, batchSize)
+ repos := make([]*Repository, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&repos); err != nil {
return err
}
RepoLang[i].RepoID = destRepo.ID
RepoLang[i].CreatedUnix = timeutil.TimeStampNow()
}
- //update destRepo's indexer status
+ // update destRepo's indexer status
tmpCommitID := RepoLang[0].CommitID
if err := destRepo.updateIndexerStatus(sess, RepoIndexerTypeStats, tmpCommitID); err != nil {
return err
LowerNames []string
}
-//SearchOrderBy is used to sort the result
+// SearchOrderBy is used to sort the result
type SearchOrderBy string
func (s SearchOrderBy) String() string {
// SearchRepositoryCondition creates a query condition according search repository options
func SearchRepositoryCondition(opts *SearchRepoOptions) builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if opts.Private {
if opts.Actor != nil && !opts.Actor.IsAdmin && opts.Actor.ID != opts.OwnerID {
// Restrict repositories to those the OwnerID owns or contributes to as per opts.Collaborate
if opts.OwnerID > 0 {
- var accessCond = builder.NewCond()
+ accessCond := builder.NewCond()
if opts.Collaborate != util.OptionalBoolTrue {
accessCond = builder.Eq{"owner_id": opts.OwnerID}
}
if opts.Keyword != "" {
// separate keyword
- var subQueryCond = builder.NewCond()
+ subQueryCond := builder.NewCond()
for _, v := range strings.Split(opts.Keyword, ",") {
if opts.TopicOnly {
subQueryCond = subQueryCond.Or(builder.Eq{"topic.name": strings.ToLower(v)})
Where(subQueryCond).
GroupBy("repo_topic.repo_id")
- var keywordCond = builder.In("id", subQuery)
+ keywordCond := builder.In("id", subQuery)
if !opts.TopicOnly {
- var likes = builder.NewCond()
+ likes := builder.NewCond()
for _, v := range strings.Split(opts.Keyword, ",") {
likes = likes.Or(builder.Like{"lower_name", strings.ToLower(v)})
if opts.IncludeDescription {
count, err := sess.
Where(cond).
Count(new(Repository))
-
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
// accessibleRepositoryCondition takes a user a returns a condition for checking if a repository is accessible
func accessibleRepositoryCondition(user *User) builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if user == nil || !user.IsRestricted || user.ID <= 0 {
orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
opts *SearchRepoOptions
count int
}{
- {name: "PublicRepositoriesByName",
+ {
+ name: "PublicRepositoriesByName",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, Collaborate: util.OptionalBoolFalse},
- count: 7},
- {name: "PublicAndPrivateRepositoriesByName",
+ count: 7,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByName",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFirstPage",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitSecondPage",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 2, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitThirdPage",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
+ count: 14,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesByNameWithPagesizeLimitFourthPage",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 3, PageSize: 5}, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "PublicRepositoriesOfUser",
+ count: 14,
+ },
+ {
+ name: "PublicRepositoriesOfUser",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Collaborate: util.OptionalBoolFalse},
- count: 2},
- {name: "PublicRepositoriesOfUser2",
+ count: 2,
+ },
+ {
+ name: "PublicRepositoriesOfUser2",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Collaborate: util.OptionalBoolFalse},
- count: 0},
- {name: "PublicRepositoriesOfUser3",
+ count: 0,
+ },
+ {
+ name: "PublicRepositoriesOfUser3",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Collaborate: util.OptionalBoolFalse},
- count: 2},
- {name: "PublicAndPrivateRepositoriesOfUser",
+ count: 2,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 4},
- {name: "PublicAndPrivateRepositoriesOfUser2",
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 0},
- {name: "PublicAndPrivateRepositoriesOfUser3",
+ count: 0,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser3",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 4},
- {name: "PublicRepositoriesOfUserIncludingCollaborative",
+ count: 4,
+ },
+ {
+ name: "PublicRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15},
- count: 5},
- {name: "PublicRepositoriesOfUser2IncludingCollaborative",
+ count: 5,
+ },
+ {
+ name: "PublicRepositoriesOfUser2IncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18},
- count: 1},
- {name: "PublicRepositoriesOfUser3IncludingCollaborative",
+ count: 1,
+ },
+ {
+ name: "PublicRepositoriesOfUser3IncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20},
- count: 3},
- {name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ count: 3,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true},
- count: 9},
- {name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
+ count: 9,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser2IncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true},
- count: 4},
- {name: "PublicAndPrivateRepositoriesOfUser3IncludingCollaborative",
+ count: 4,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfUser3IncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 20, Private: true},
- count: 7},
- {name: "PublicRepositoriesOfOrganization",
+ count: 7,
+ },
+ {
+ name: "PublicRepositoriesOfOrganization",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Collaborate: util.OptionalBoolFalse},
- count: 1},
- {name: "PublicAndPrivateRepositoriesOfOrganization",
+ count: 1,
+ },
+ {
+ name: "PublicAndPrivateRepositoriesOfOrganization",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, Private: true, Collaborate: util.OptionalBoolFalse},
- count: 2},
- {name: "AllPublic/PublicRepositoriesByName",
+ count: 2,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesByName",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{PageSize: 10}, AllPublic: true, Collaborate: util.OptionalBoolFalse},
- count: 7},
- {name: "AllPublic/PublicAndPrivateRepositoriesByName",
+ count: 7,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesByName",
opts: &SearchRepoOptions{Keyword: "big_test_", ListOptions: ListOptions{Page: 1, PageSize: 10}, Private: true, AllPublic: true, Collaborate: util.OptionalBoolFalse},
- count: 14},
- {name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
+ count: 14,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, AllPublic: true, Template: util.OptionalBoolFalse},
- count: 28},
- {name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
+ count: 28,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborative",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true, AllLimited: true, Template: util.OptionalBoolFalse},
- count: 33},
- {name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
+ count: 33,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUserIncludingCollaborativeByName",
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 15, Private: true, AllPublic: true},
- count: 15},
- {name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
+ count: 15,
+ },
+ {
+ name: "AllPublic/PublicAndPrivateRepositoriesOfUser2IncludingCollaborativeByName",
opts: &SearchRepoOptions{Keyword: "test", ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 18, Private: true, AllPublic: true},
- count: 13},
- {name: "AllPublic/PublicRepositoriesOfOrganization",
+ count: 13,
+ },
+ {
+ name: "AllPublic/PublicRepositoriesOfOrganization",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, OwnerID: 17, AllPublic: true, Collaborate: util.OptionalBoolFalse, Template: util.OptionalBoolFalse},
- count: 28},
- {name: "AllTemplates",
+ count: 28,
+ },
+ {
+ name: "AllTemplates",
opts: &SearchRepoOptions{ListOptions: ListOptions{Page: 1, PageSize: 10}, Template: util.OptionalBoolTrue},
- count: 2},
+ count: 2,
+ },
}
for _, testCase := range testCases {
if page <= 0 {
page = 1
}
- var expectedLen = testCase.opts.PageSize
+ expectedLen := testCase.opts.PageSize
if testCase.opts.PageSize*page > testCase.count+testCase.opts.PageSize {
expectedLen = 0
} else if testCase.opts.PageSize*page > testCase.count {
opts *SearchRepoOptions
count int
}{
- {name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
+ {
+ name: "AllPublic/SearchPublicRepositoriesFromTopicAndName",
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql"},
- count: 2},
- {name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
+ count: 2,
+ },
+ {
+ name: "AllPublic/OnlySearchPublicRepositoriesFromTopic",
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql", TopicOnly: true},
- count: 1},
- {name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
+ count: 1,
+ },
+ {
+ name: "AllPublic/OnlySearchMultipleKeywordPublicRepositoriesFromTopic",
opts: &SearchRepoOptions{OwnerID: 21, AllPublic: true, Keyword: "graphql,golang", TopicOnly: true},
- count: 2},
+ count: 2,
+ },
}
for _, testCase := range testCases {
}
func TestUploadAvatar(t *testing.T) {
-
// Generate image
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
var buff bytes.Buffer
}
func TestUploadBigAvatar(t *testing.T) {
-
// Generate BIG image
myImage := image.NewRGBA(image.Rect(0, 0, 5000, 1))
var buff bytes.Buffer
}
func TestDeleteAvatar(t *testing.T) {
-
// Generate image
myImage := image.NewRGBA(image.Rect(0, 0, 1, 1))
var buff bytes.Buffer
}
if r.Recipient.IsOrganization() && len(r.TeamIDs) != len(r.Teams) {
-
for _, v := range r.TeamIDs {
team, err := GetTeamByID(v)
if err != nil {
// GetPendingRepositoryTransfer fetches the most recent and ongoing transfer
// process for the repository
func GetPendingRepositoryTransfer(repo *Repository) (*RepoTransfer, error) {
- var transfer = new(RepoTransfer)
+ transfer := new(RepoTransfer)
has, err := x.Where("repo_id = ? ", repo.ID).Get(transfer)
if err != nil {
)
func TestRepositoryTransfer(t *testing.T) {
-
assert.NoError(t, PrepareTestDatabase())
doer := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User)
}
// UnitConfig describes common unit config
-type UnitConfig struct {
-}
+type UnitConfig struct{}
// FromDB fills up a UnitConfig from serialized format.
func (cfg *UnitConfig) FromDB(bs []byte) error {
}
// WatchIfAuto subscribes to repo if AutoWatchOnChanges is set
-func WatchIfAuto(userID int64, repoID int64, isWrite bool) error {
+func WatchIfAuto(userID, repoID int64, isWrite bool) error {
return watchIfAuto(x, userID, repoID, isWrite)
}
}
func (opts *FindReviewOptions) toCond() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if opts.IssueID > 0 {
cond = cond.And(builder.Eq{"issue_id": opts.IssueID})
}
}
// ContentEmptyErr represents an content empty error
-type ContentEmptyErr struct {
-}
+type ContentEmptyErr struct{}
func (ContentEmptyErr) Error() string {
return "Review content is empty"
return nil, nil, err
}
- var official = false
+ official := false
review, err := getCurrentReview(sess, doer, issue)
if err != nil {
return comment, sess.Commit()
}
-//RemoveReviewRequest remove a review request from one reviewer
+// RemoveReviewRequest remove a review request from one reviewer
func RemoveReviewRequest(issue *Issue, reviewer, doer *User) (*Comment, error) {
sess := x.NewSession()
defer sess.Close()
return comment, sess.Commit()
}
-//RemoveTeamReviewRequest remove a review request from one team
+// RemoveTeamReviewRequest remove a review request from one team
func RemoveTeamReviewRequest(issue *Issue, reviewer *Team, doer *User) (*Comment, error) {
sess := x.NewSession()
defer sess.Close()
invalidReview2 := AssertExistsAndLoadBean(t, &Review{ID: 3}).(*Review)
assert.Error(t, invalidReview2.LoadAttributes())
-
}
func TestReview_LoadCodeComments(t *testing.T) {
// This of course doesn't guarantee that this is the right directory for authorized_keys
// but at least if it's supposed to be this directory and it doesn't exist and we're the
// right user it will at least be created properly.
- err := os.MkdirAll(setting.SSH.RootPath, 0700)
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
if err != nil {
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
return err
}
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
- f, err := os.OpenFile(fPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+ f, err := os.OpenFile(fPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600)
if err != nil {
return err
}
}
// .ssh directory should have mode 700, and authorized_keys file should have mode 600.
- if fi.Mode().Perm() > 0600 {
+ if fi.Mode().Perm() > 0o600 {
log.Error("authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
- if err = f.Chmod(0600); err != nil {
+ if err = f.Chmod(0o600); err != nil {
return err
}
}
}
func calcFingerprint(publicKeyContent string) (string, error) {
- //Call the method based on configuration
+ // Call the method based on configuration
var (
fnName, fp string
err error
}
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source.
-func ListPublicLdapSSHKeys(uid int64, loginSourceID int64) ([]*PublicKey, error) {
+func ListPublicLdapSSHKeys(uid, loginSourceID int64) ([]*PublicKey, error) {
keys := make([]*PublicKey, 0, 5)
return keys, x.
Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID).
}
func rewriteAllPublicKeys(e Engine) error {
- //Don't rewrite key if internal server
+ // Don't rewrite key if internal server
if setting.SSH.StartBuiltinServer || !setting.SSH.CreateAuthorizedKeysFile {
return nil
}
// This of course doesn't guarantee that this is the right directory for authorized_keys
// but at least if it's supposed to be this directory and it doesn't exist and we're the
// right user it will at least be created properly.
- err := os.MkdirAll(setting.SSH.RootPath, 0700)
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
if err != nil {
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
return err
fPath := filepath.Join(setting.SSH.RootPath, "authorized_keys")
tmpPath := fPath + ".tmp"
- t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+ t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
}
// SearchDeployKeys returns a list of deploy keys matching the provided arguments.
-func SearchDeployKeys(repoID int64, keyID int64, fingerprint string) ([]*DeployKey, error) {
+func SearchDeployKeys(repoID, keyID int64, fingerprint string) ([]*DeployKey, error) {
keys := make([]*DeployKey, 0, 5)
cond := builder.NewCond()
if repoID != 0 {
// This of course doesn't guarantee that this is the right directory for authorized_keys
// but at least if it's supposed to be this directory and it doesn't exist and we're the
// right user it will at least be created properly.
- err := os.MkdirAll(setting.SSH.RootPath, 0700)
+ err := os.MkdirAll(setting.SSH.RootPath, 0o700)
if err != nil {
log.Error("Unable to MkdirAll(%s): %v", setting.SSH.RootPath, err)
return err
fPath := filepath.Join(setting.SSH.RootPath, authorizedPrincipalsFile)
tmpPath := fPath + ".tmp"
- t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+ t, err := os.OpenFile(tmpPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
// GetMigratingTask returns the migrating task by repo's id
func GetMigratingTask(repoID int64) (*Task, error) {
- var task = Task{
+ task := Task{
RepoID: repoID,
Type: structs.TaskTypeMigrateRepo,
}
// GetMigratingTaskByID returns the migrating task by repo's id
func GetMigratingTaskByID(id, doerID int64) (*Task, *migration.MigrateOptions, error) {
- var task = Task{
+ task := Task{
ID: id,
DoerID: doerID,
Type: structs.TaskTypeMigrateRepo,
// ToConds generates conditions for database operation.
func (opts FindTaskOptions) ToConds() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if opts.Status >= 0 {
cond = cond.And(builder.Eq{"status": opts.Status})
}
// FindTasks find all tasks
func FindTasks(opts FindTaskOptions) ([]*Task, error) {
- var tasks = make([]*Task, 0, 10)
+ tasks := make([]*Task, 0, 10)
err := x.Where(opts.ToConds()).Find(&tasks)
return tasks, err
}
}
func TestAccessTokenByNameExists(t *testing.T) {
-
name := "Token Gitea"
assert.NoError(t, PrepareTestDatabase())
}
// SanitizeAndValidateTopics sanitizes and checks an array or topics
-func SanitizeAndValidateTopics(topics []string) (validTopics []string, invalidTopics []string) {
+func SanitizeAndValidateTopics(topics []string) (validTopics, invalidTopics []string) {
validTopics = make([]string, 0)
mValidTopics := make(map[string]struct{})
invalidTopics = make([]string, 0)
}
func (opts *FindTopicOptions) toConds() builder.Cond {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"repo_topic.repo_id": opts.RepoID})
}
func GetRepoTopicByName(repoID int64, topicName string) (*Topic, error) {
return getRepoTopicByName(x, repoID, topicName)
}
+
func getRepoTopicByName(e Engine, repoID int64, topicName string) (*Topic, error) {
- var cond = builder.NewCond()
+ cond := builder.NewCond()
var topic Topic
cond = cond.And(builder.Eq{"repo_topic.repo_id": repoID}).And(builder.Eq{"topic.name": topicName})
sess := e.Table("topic").Where(cond)
}
// AssertCount assert the count of a bean
-func AssertCount(t testing.TB, bean interface{}, expected interface{}) {
+func AssertCount(t testing.TB, bean, expected interface{}) {
assert.EqualValues(t, expected, GetCount(t, bean))
}
Find(&users); err != nil {
return fmt.Errorf("get all inactive users: %v", err)
}
-
}
// FIXME: should only update authorized_keys file once after all deletions.
for _, u := range users {
func (opts *SearchUserOptions) toConds() builder.Cond {
var cond builder.Cond = builder.Eq{"type": opts.Type}
-
if len(opts.Keyword) > 0 {
lowerKeyword := strings.ToLower(opts.Keyword)
keywordCond := builder.Or(
} else {
exprCond = builder.Expr("org_user.org_id = \"user\".id")
}
- var accessCond = builder.NewCond()
+
+ var accessCond builder.Cond
if !opts.Actor.IsRestricted {
accessCond = builder.Or(
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.Actor.ID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
log.Trace("Doing: SyncExternalUsers[%s]", s.Name)
var existingUsers []int64
- var isAttributeSSHPublicKeySet = len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
+ isAttributeSSHPublicKeySet := len(strings.TrimSpace(s.LDAP().AttributeSSHPublicKey)) > 0
var sshKeysNeedUpdate bool
// Find all users with this login type
// IterateUser iterate users
func IterateUser(f func(user *User) error) error {
var start int
- var batchSize = setting.Database.IterateBufferSize
+ batchSize := setting.Database.IterateBufferSize
for {
- var users = make([]*User, 0, batchSize)
+ users := make([]*User, 0, batchSize)
if err := x.Limit(batchSize, start).Find(&users); err != nil {
return err
}
}
// GetUserHeatmapDataByUser returns an array of UserHeatmapData
-func GetUserHeatmapDataByUser(user *User, doer *User) ([]*UserHeatmapData, error) {
+func GetUserHeatmapDataByUser(user, doer *User) ([]*UserHeatmapData, error) {
return getUserHeatmapData(user, nil, doer)
}
}
var groupBy string
- var groupByName = "timestamp" // We need this extra case because mssql doesn't allow grouping by alias
+ groupByName := "timestamp" // We need this extra case because mssql doesn't allow grouping by alias
switch {
case setting.Database.UseSQLite3:
groupBy = "strftime('%s', strftime('%Y-%m-%d', created_unix, 'unixepoch'))"
assert.Equal(t, len(actions), len(heatmap), "invalid action count: did the test data became too old?")
assert.Equal(t, tc.CountResult, len(heatmap), fmt.Sprintf("testcase %d", i))
- //Test JSON rendering
+ // Test JSON rendering
json := jsoniter.ConfigCompatibleWithStandardLibrary
jsonData, err := json.Marshal(heatmap)
assert.NoError(t, err)
"xorm.io/builder"
)
-var (
- // ErrEmailAddressNotExist email address not exist
- ErrEmailAddressNotExist = errors.New("Email address does not exist")
-)
+// ErrEmailAddressNotExist email address not exist
+var ErrEmailAddressNotExist = errors.New("Email address does not exist")
// EmailAddress is the list of all email addresses of a user. Can contain the
// primary email address, but is not obligatory.
func DeleteEmailAddress(email *EmailAddress) (err error) {
var deleted int64
// ask to check UID
- var address = EmailAddress{
+ address := EmailAddress{
UID: email.UID,
}
if email.ID > 0 {
"code.gitea.io/gitea/modules/log"
)
-var (
- // ErrOpenIDNotExist openid is not known
- ErrOpenIDNotExist = errors.New("OpenID is unknown")
-)
+// ErrOpenIDNotExist openid is not known
+var ErrOpenIDNotExist = errors.New("OpenID is unknown")
// UserOpenID is the list of all OpenID identities of a user.
type UserOpenID struct {
func DeleteUserOpenID(openid *UserOpenID) (err error) {
var deleted int64
// ask to check UID
- var address = UserOpenID{
+ address := UserOpenID{
UID: openid.UID,
}
if openid.ID > 0 {
}
}
-func testUserIsPublicMember(t *testing.T, uid int64, orgID int64, expected bool) {
+func testUserIsPublicMember(t *testing.T, uid, orgID int64, expected bool) {
user, err := GetUserByID(uid)
assert.NoError(t, err)
assert.Equal(t, expected, user.IsPublicMember(orgID))
}
}
-func testIsUserOrgOwner(t *testing.T, uid int64, orgID int64, expected bool) {
+func testIsUserOrgOwner(t *testing.T, uid, orgID int64, expected bool) {
user, err := GetUserByID(uid)
assert.NoError(t, err)
assert.Equal(t, expected, user.IsUserOrgOwner(orgID))
}
func TestCreateUser_Issue5882(t *testing.T) {
-
// Init settings
_ = setting.Admin
}
func TestGetUserIDsByNames(t *testing.T) {
-
- //ignore non existing
+ // ignore non existing
IDs, err := GetUserIDsByNames([]string{"user1", "user2", "none_existing_user"}, true)
assert.NoError(t, err)
assert.Equal(t, []int64{1, 2}, IDs)
- //ignore non existing
+ // ignore non existing
IDs, err = GetUserIDsByNames([]string{"user1", "do_not_exist"}, false)
assert.Error(t, err)
assert.Equal(t, []int64(nil), IDs)
"code.gitea.io/gitea/modules/log"
)
-//UserList is a list of user.
+// UserList is a list of user.
// This type provide valuable methods to retrieve information for a group of users efficiently.
type UserList []*User
func (users UserList) getUserIDs() []int64 {
userIDs := make([]int64, len(users))
for _, user := range users {
- userIDs = append(userIDs, user.ID) //Considering that user id are unique in the list
+ userIDs = append(userIDs, user.ID) // Considering that user id are unique in the list
}
return userIDs
}
func (users UserList) IsUserOrgOwner(orgID int64) map[int64]bool {
results := make(map[int64]bool, len(users))
for _, user := range users {
- results[user.ID] = false //Set default to false
+ results[user.ID] = false // Set default to false
}
ownerMaps, err := users.loadOrganizationOwners(x, orgID)
if err == nil {
func (users UserList) GetTwoFaStatus() map[int64]bool {
results := make(map[int64]bool, len(users))
for _, user := range users {
- results[user.ID] = false //Set default to false
+ results[user.ID] = false // Set default to false
}
tokenMaps, err := users.loadTwoFactorStatus(x)
if err == nil {
})
}
}
+
func testUserListIsPublicMember(t *testing.T, orgID int64, expected map[int64]bool) {
org, err := GetUserByID(orgID)
assert.NoError(t, err)
assert.NoError(t, org.GetMembers())
assert.Equal(t, expected, org.MembersIsPublic)
-
}
func TestUserListIsUserOrgOwner(t *testing.T) {
func deleteDeliveredHookTasksByWebhook(hookID int64, numberDeliveriesToKeep int) error {
log.Trace("Deleting hook_task rows for webhook %d, keeping the most recent %d deliveries", hookID, numberDeliveriesToKeep)
- var deliveryDates = make([]int64, 0, 10)
+ deliveryDates := make([]int64, 0, 10)
err := x.Table("hook_task").
Where("hook_task.hook_id = ? AND hook_task.is_delivered = ? AND hook_task.delivered is not null", hookID, true).
Cols("hook_task.delivered").
}
func TestWebhook_EventsArray(t *testing.T) {
- assert.Equal(t, []string{"create", "delete", "fork", "push",
+ assert.Equal(t, []string{
+ "create", "delete", "fork", "push",
"issues", "issue_assign", "issue_label", "issue_milestone", "issue_comment",
"pull_request", "pull_request_assign", "pull_request_label", "pull_request_milestone",
"pull_request_comment", "pull_request_review_approved", "pull_request_review_rejected",
- "pull_request_review_comment", "pull_request_sync", "repository", "release"},
+ "pull_request_review_comment", "pull_request_sync", "repository", "release",
+ },
(&Webhook{
HookEvent: &HookEvent{SendEverything: true},
}).EventsArray(),
assert.Equal(t, int64(3), hooks[0].ID)
assert.True(t, hooks[0].IsActive)
}
-
}
func TestUpdateWebhook(t *testing.T) {