aboutsummaryrefslogtreecommitdiffstats
path: root/services/packages
diff options
context:
space:
mode:
Diffstat (limited to 'services/packages')
-rw-r--r--services/packages/alpine/repository.go2
-rw-r--r--services/packages/arch/repository.go51
-rw-r--r--services/packages/arch/vercmp.go108
-rw-r--r--services/packages/arch/vercmp_test.go27
-rw-r--r--services/packages/auth.go5
-rw-r--r--services/packages/cargo/index.go40
-rw-r--r--services/packages/cleanup/cleanup.go263
-rw-r--r--services/packages/container/blob_uploader.go21
-rw-r--r--services/packages/container/cleanup.go4
-rw-r--r--services/packages/container/common.go31
-rw-r--r--services/packages/debian/repository.go8
-rw-r--r--services/packages/package_update.go79
-rw-r--r--services/packages/packages.go67
-rw-r--r--services/packages/rpm/repository.go7
14 files changed, 494 insertions, 219 deletions
diff --git a/services/packages/alpine/repository.go b/services/packages/alpine/repository.go
index 27e6391980..277c188874 100644
--- a/services/packages/alpine/repository.go
+++ b/services/packages/alpine/repository.go
@@ -290,7 +290,7 @@ func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *package
privPem, _ := pem.Decode([]byte(priv))
if privPem == nil {
- return fmt.Errorf("failed to decode private key pem")
+ return errors.New("failed to decode private key pem")
}
privKey, err := x509.ParsePKCS1PrivateKey(privPem.Bytes)
diff --git a/services/packages/arch/repository.go b/services/packages/arch/repository.go
index 6731d9a1ac..438bb10837 100644
--- a/services/packages/arch/repository.go
+++ b/services/packages/arch/repository.go
@@ -13,6 +13,7 @@ import (
"fmt"
"io"
"os"
+ "strconv"
"strings"
packages_model "code.gitea.io/gitea/models/packages"
@@ -26,9 +27,9 @@ import (
"code.gitea.io/gitea/modules/util"
packages_service "code.gitea.io/gitea/services/packages"
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/armor"
- "github.com/keybase/go-crypto/openpgp/packet"
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
)
const (
@@ -235,6 +236,28 @@ func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *package
return packages_service.DeletePackageFile(ctx, pf)
}
+ vpfs := make(map[int64]*entryOptions)
+ for _, pf := range pfs {
+ current := &entryOptions{
+ File: pf,
+ }
+ current.Version, err = packages_model.GetVersionByID(ctx, pf.VersionID)
+ if err != nil {
+ return err
+ }
+
+ // here we compare the versions but not using SearchLatestVersions because we shouldn't allow "downgrading" to a older version by "latest" one.
+ // https://wiki.archlinux.org/title/Downgrading_packages : randomly downgrading can mess up dependencies:
+ // If a downgrade involves a soname change, all dependencies may need downgrading or rebuilding too.
+ if old, ok := vpfs[current.Version.PackageID]; ok {
+ if compareVersions(old.Version.Version, current.Version.Version) == -1 {
+ vpfs[current.Version.PackageID] = current
+ }
+ } else {
+ vpfs[current.Version.PackageID] = current
+ }
+ }
+
indexContent, _ := packages_module.NewHashedBuffer()
defer indexContent.Close()
@@ -243,15 +266,7 @@ func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *package
cache := make(map[int64]*packages_model.Package)
- for _, pf := range pfs {
- opts := &entryOptions{
- File: pf,
- }
-
- opts.Version, err = packages_model.GetVersionByID(ctx, pf.VersionID)
- if err != nil {
- return err
- }
+ for _, opts := range vpfs {
if err := json.Unmarshal([]byte(opts.Version.MetadataJSON), &opts.VersionMetadata); err != nil {
return err
}
@@ -263,12 +278,12 @@ func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *package
}
cache[opts.Package.ID] = opts.Package
}
- opts.Blob, err = packages_model.GetBlobByID(ctx, pf.BlobID)
+ opts.Blob, err = packages_model.GetBlobByID(ctx, opts.File.BlobID)
if err != nil {
return err
}
- sig, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, pf.ID, arch_module.PropertySignature)
+ sig, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, opts.File.ID, arch_module.PropertySignature)
if err != nil {
return err
}
@@ -277,7 +292,7 @@ func buildPackagesIndex(ctx context.Context, ownerID int64, repoVersion *package
}
opts.Signature = sig[0].Value
- meta, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, pf.ID, arch_module.PropertyMetadata)
+ meta, err := packages_model.GetPropertiesByName(ctx, packages_model.PropertyTypeFile, opts.File.ID, arch_module.PropertyMetadata)
if err != nil {
return err
}
@@ -358,8 +373,8 @@ func writeDescription(tw *tar.Writer, opts *entryOptions) error {
{"MD5SUM", opts.Blob.HashMD5},
{"SHA256SUM", opts.Blob.HashSHA256},
{"PGPSIG", opts.Signature},
- {"CSIZE", fmt.Sprintf("%d", opts.Blob.Size)},
- {"ISIZE", fmt.Sprintf("%d", opts.FileMetadata.InstalledSize)},
+ {"CSIZE", strconv.FormatInt(opts.Blob.Size, 10)},
+ {"ISIZE", strconv.FormatInt(opts.FileMetadata.InstalledSize, 10)},
{"NAME", opts.Package.Name},
{"BASE", opts.FileMetadata.Base},
{"ARCH", opts.FileMetadata.Architecture},
@@ -368,7 +383,7 @@ func writeDescription(tw *tar.Writer, opts *entryOptions) error {
{"URL", opts.VersionMetadata.ProjectURL},
{"LICENSE", strings.Join(opts.VersionMetadata.Licenses, "\n")},
{"GROUPS", strings.Join(opts.FileMetadata.Groups, "\n")},
- {"BUILDDATE", fmt.Sprintf("%d", opts.FileMetadata.BuildDate)},
+ {"BUILDDATE", strconv.FormatInt(opts.FileMetadata.BuildDate, 10)},
{"PACKAGER", opts.FileMetadata.Packager},
{"PROVIDES", strings.Join(opts.FileMetadata.Provides, "\n")},
{"REPLACES", strings.Join(opts.FileMetadata.Replaces, "\n")},
diff --git a/services/packages/arch/vercmp.go b/services/packages/arch/vercmp.go
new file mode 100644
index 0000000000..d44aa530f0
--- /dev/null
+++ b/services/packages/arch/vercmp.go
@@ -0,0 +1,108 @@
+// Copyright 2025 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package arch
+
+import (
+ "strings"
+ "unicode"
+)
+
+// https://gitlab.archlinux.org/pacman/pacman/-/blob/d55b47e5512808b67bc944feb20c2bcc6c1a4c45/lib/libalpm/version.c
+
+import (
+ "strconv"
+)
+
+func parseEVR(evr string) (epoch, version, release string) {
+ if before, after, f := strings.Cut(evr, ":"); f {
+ epoch = before
+ evr = after
+ } else {
+ epoch = "0"
+ }
+
+ if before, after, f := strings.Cut(evr, "-"); f {
+ version = before
+ release = after
+ } else {
+ version = evr
+ release = "1"
+ }
+ return epoch, version, release
+}
+
+func compareSegments(a, b []string) int {
+ lenA, lenB := len(a), len(b)
+ l := min(lenA, lenB)
+ for i := range l {
+ if r := compare(a[i], b[i]); r != 0 {
+ return r
+ }
+ }
+ if lenA == lenB {
+ return 0
+ } else if l == lenA {
+ return -1
+ }
+ return 1
+}
+
+func compare(a, b string) int {
+ if a == b {
+ return 0
+ }
+
+ aNumeric := isNumeric(a)
+ bNumeric := isNumeric(b)
+
+ if aNumeric && bNumeric {
+ aInt, _ := strconv.Atoi(a)
+ bInt, _ := strconv.Atoi(b)
+ switch {
+ case aInt < bInt:
+ return -1
+ case aInt > bInt:
+ return 1
+ default:
+ return 0
+ }
+ }
+
+ if aNumeric {
+ return 1
+ }
+ if bNumeric {
+ return -1
+ }
+
+ return strings.Compare(a, b)
+}
+
+func isNumeric(s string) bool {
+ for _, c := range s {
+ if !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareVersions(a, b string) int {
+ if a == b {
+ return 0
+ }
+
+ epochA, versionA, releaseA := parseEVR(a)
+ epochB, versionB, releaseB := parseEVR(b)
+
+ if res := compareSegments([]string{epochA}, []string{epochB}); res != 0 {
+ return res
+ }
+
+ if res := compareSegments(strings.Split(versionA, "."), strings.Split(versionB, ".")); res != 0 {
+ return res
+ }
+
+ return compareSegments([]string{releaseA}, []string{releaseB})
+}
diff --git a/services/packages/arch/vercmp_test.go b/services/packages/arch/vercmp_test.go
new file mode 100644
index 0000000000..2014a6d429
--- /dev/null
+++ b/services/packages/arch/vercmp_test.go
@@ -0,0 +1,27 @@
+// Copyright 2025 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package arch
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCompareVersions(t *testing.T) {
+ // https://man.archlinux.org/man/vercmp.8.en
+ checks := [][]string{
+ {"1.0a", "1.0b", "1.0beta", "1.0p", "1.0pre", "1.0rc", "1.0", "1.0.a", "1.0.1"},
+ {"1", "1.0", "1.1", "1.1.1", "1.2", "2.0", "3.0.0"},
+ }
+ for _, check := range checks {
+ for i := 0; i < len(check)-1; i++ {
+ require.Equal(t, -1, compareVersions(check[i], check[i+1]))
+ require.Equal(t, 1, compareVersions(check[i+1], check[i]))
+ }
+ }
+ require.Equal(t, 1, compareVersions("1.0-2", "1.0"))
+ require.Equal(t, 0, compareVersions("0:1.0-1", "1.0"))
+ require.Equal(t, 1, compareVersions("1:1.0-1", "2.0"))
+}
diff --git a/services/packages/auth.go b/services/packages/auth.go
index 4526a8e303..6e87643e29 100644
--- a/services/packages/auth.go
+++ b/services/packages/auth.go
@@ -4,6 +4,7 @@
package packages
import (
+ "errors"
"fmt"
"net/http"
"strings"
@@ -58,7 +59,7 @@ func ParseAuthorizationRequest(req *http.Request) (*PackageMeta, error) {
parts := strings.SplitN(h, " ", 2)
if len(parts) != 2 {
log.Error("split token failed: %s", h)
- return nil, fmt.Errorf("split token failed")
+ return nil, errors.New("split token failed")
}
return ParseAuthorizationToken(parts[1])
@@ -77,7 +78,7 @@ func ParseAuthorizationToken(tokenStr string) (*PackageMeta, error) {
c, ok := token.Claims.(*packageClaims)
if !token.Valid || !ok {
- return nil, fmt.Errorf("invalid token claim")
+ return nil, errors.New("invalid token claim")
}
return &c.PackageMeta, nil
diff --git a/services/packages/cargo/index.go b/services/packages/cargo/index.go
index e8a8313625..605335d0f1 100644
--- a/services/packages/cargo/index.go
+++ b/services/packages/cargo/index.go
@@ -11,7 +11,6 @@ import (
"io"
"path"
"strconv"
- "time"
packages_model "code.gitea.io/gitea/models/packages"
repo_model "code.gitea.io/gitea/models/repo"
@@ -79,7 +78,7 @@ func RebuildIndex(ctx context.Context, doer, owner *user_model.User) error {
"Rebuild Cargo Index",
func(t *files_service.TemporaryUploadRepository) error {
// Remove all existing content but the Cargo config
- files, err := t.LsFiles()
+ files, err := t.LsFiles(ctx)
if err != nil {
return err
}
@@ -90,7 +89,7 @@ func RebuildIndex(ctx context.Context, doer, owner *user_model.User) error {
break
}
}
- if err := t.RemoveFilesFromIndex(files...); err != nil {
+ if err := t.RemoveFilesFromIndex(ctx, files...); err != nil {
return err
}
@@ -205,7 +204,7 @@ func addOrUpdatePackageIndex(ctx context.Context, t *files_service.TemporaryUplo
return nil
}
- return writeObjectToIndex(t, BuildPackagePath(p.LowerName), b)
+ return writeObjectToIndex(ctx, t, BuildPackagePath(p.LowerName), b)
}
func getOrCreateIndexRepository(ctx context.Context, doer, owner *user_model.User) (*repo_model.Repository, error) {
@@ -214,7 +213,7 @@ func getOrCreateIndexRepository(ctx context.Context, doer, owner *user_model.Use
if errors.Is(err, util.ErrNotExist) {
repo, err = repo_service.CreateRepositoryDirectly(ctx, doer, owner, repo_service.CreateRepoOptions{
Name: IndexRepositoryName,
- })
+ }, true)
if err != nil {
return nil, fmt.Errorf("CreateRepository: %w", err)
}
@@ -248,34 +247,34 @@ func createOrUpdateConfigFile(ctx context.Context, repo *repo_model.Repository,
"Initialize Cargo Config",
func(t *files_service.TemporaryUploadRepository) error {
var b bytes.Buffer
- err := json.NewEncoder(&b).Encode(BuildConfig(owner, setting.Service.RequireSignInView || owner.Visibility != structs.VisibleTypePublic || repo.IsPrivate))
+ err := json.NewEncoder(&b).Encode(BuildConfig(owner, setting.Service.RequireSignInViewStrict || owner.Visibility != structs.VisibleTypePublic || repo.IsPrivate))
if err != nil {
return err
}
- return writeObjectToIndex(t, ConfigFileName, &b)
+ return writeObjectToIndex(ctx, t, ConfigFileName, &b)
},
)
}
// This is a shorter version of CreateOrUpdateRepoFile which allows to perform multiple actions on a git repository
func alterRepositoryContent(ctx context.Context, doer *user_model.User, repo *repo_model.Repository, commitMessage string, fn func(*files_service.TemporaryUploadRepository) error) error {
- t, err := files_service.NewTemporaryUploadRepository(ctx, repo)
+ t, err := files_service.NewTemporaryUploadRepository(repo)
if err != nil {
return err
}
defer t.Close()
var lastCommitID string
- if err := t.Clone(repo.DefaultBranch, true); err != nil {
+ if err := t.Clone(ctx, repo.DefaultBranch, true); err != nil {
if !git.IsErrBranchNotExist(err) || !repo.IsEmpty {
return err
}
- if err := t.Init(repo.ObjectFormatName); err != nil {
+ if err := t.Init(ctx, repo.ObjectFormatName); err != nil {
return err
}
} else {
- if err := t.SetDefaultIndex(); err != nil {
+ if err := t.SetDefaultIndex(ctx); err != nil {
return err
}
@@ -291,25 +290,30 @@ func alterRepositoryContent(ctx context.Context, doer *user_model.User, repo *re
return err
}
- treeHash, err := t.WriteTree()
+ treeHash, err := t.WriteTree(ctx)
if err != nil {
return err
}
- now := time.Now()
- commitHash, err := t.CommitTreeWithDate(lastCommitID, doer, doer, treeHash, commitMessage, false, now, now)
+ commitOpts := &files_service.CommitTreeUserOptions{
+ ParentCommitID: lastCommitID,
+ TreeHash: treeHash,
+ CommitMessage: commitMessage,
+ DoerUser: doer,
+ }
+ commitHash, err := t.CommitTree(ctx, commitOpts)
if err != nil {
return err
}
- return t.Push(doer, commitHash, repo.DefaultBranch)
+ return t.Push(ctx, doer, commitHash, repo.DefaultBranch)
}
-func writeObjectToIndex(t *files_service.TemporaryUploadRepository, path string, r io.Reader) error {
- hash, err := t.HashObject(r)
+func writeObjectToIndex(ctx context.Context, t *files_service.TemporaryUploadRepository, path string, r io.Reader) error {
+ hash, err := t.HashObjectAndWrite(ctx, r)
if err != nil {
return err
}
- return t.AddObjectToIndex("100644", hash, path)
+ return t.AddObjectToIndex(ctx, "100644", hash, path)
}
diff --git a/services/packages/cleanup/cleanup.go b/services/packages/cleanup/cleanup.go
index b7ba2b6ac4..ec860db1bb 100644
--- a/services/packages/cleanup/cleanup.go
+++ b/services/packages/cleanup/cleanup.go
@@ -32,165 +32,170 @@ func CleanupTask(ctx context.Context, olderThan time.Duration) error {
return CleanupExpiredData(ctx, olderThan)
}
-func ExecuteCleanupRules(outerCtx context.Context) error {
- ctx, committer, err := db.TxContext(outerCtx)
+func executeCleanupOneRulePackage(ctx context.Context, pcr *packages_model.PackageCleanupRule, p *packages_model.Package) (versionDeleted bool, err error) {
+ olderThan := time.Now().AddDate(0, 0, -pcr.RemoveDays)
+ pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
+ PackageID: p.ID,
+ IsInternal: optional.Some(false),
+ Sort: packages_model.SortCreatedDesc,
+ })
if err != nil {
- return err
+ return false, fmt.Errorf("CleanupRule [%d]: SearchVersions failed: %w", pcr.ID, err)
}
- defer committer.Close()
-
- err = packages_model.IterateEnabledCleanupRules(ctx, func(ctx context.Context, pcr *packages_model.PackageCleanupRule) error {
- select {
- case <-outerCtx.Done():
- return db.ErrCancelledf("While processing package cleanup rules")
- default:
+ if pcr.KeepCount > 0 {
+ if pcr.KeepCount < len(pvs) {
+ pvs = pvs[pcr.KeepCount:]
+ } else {
+ pvs = nil
}
-
- if err := pcr.CompiledPattern(); err != nil {
- return fmt.Errorf("CleanupRule [%d]: CompilePattern failed: %w", pcr.ID, err)
+ }
+ for _, pv := range pvs {
+ if pcr.Type == packages_model.TypeContainer {
+ if skip, err := container_service.ShouldBeSkipped(ctx, pcr, p, pv); err != nil {
+ return false, fmt.Errorf("CleanupRule [%d]: container.ShouldBeSkipped failed: %w", pcr.ID, err)
+ } else if skip {
+ log.Debug("Rule[%d]: keep '%s/%s' (container)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
}
-
- olderThan := time.Now().AddDate(0, 0, -pcr.RemoveDays)
-
- packages, err := packages_model.GetPackagesByType(ctx, pcr.OwnerID, pcr.Type)
- if err != nil {
- return fmt.Errorf("CleanupRule [%d]: GetPackagesByType failed: %w", pcr.ID, err)
+ toMatch := pv.LowerVersion
+ if pcr.MatchFullName {
+ toMatch = p.LowerName + "/" + pv.LowerVersion
+ }
+ if pcr.KeepPatternMatcher != nil && pcr.KeepPatternMatcher.MatchString(toMatch) {
+ log.Debug("Rule[%d]: keep '%s/%s' (keep pattern)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+ if pv.CreatedUnix.AsLocalTime().After(olderThan) {
+ log.Debug("Rule[%d]: keep '%s/%s' (remove days) %v", pcr.ID, p.Name, pv.Version, pv.CreatedUnix.FormatDate())
+ continue
}
+ if pcr.RemovePatternMatcher != nil && !pcr.RemovePatternMatcher.MatchString(toMatch) {
+ log.Debug("Rule[%d]: keep '%s/%s' (remove pattern)", pcr.ID, p.Name, pv.Version)
+ continue
+ }
+ log.Debug("Rule[%d]: remove '%s/%s'", pcr.ID, p.Name, pv.Version)
+ if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
+ log.Error("CleanupRule [%d]: DeletePackageVersionAndReferences failed: %v", pcr.ID, err)
+ continue
+ }
+ versionDeleted = true
+ }
+ return versionDeleted, nil
+}
- anyVersionDeleted := false
- for _, p := range packages {
- pvs, _, err := packages_model.SearchVersions(ctx, &packages_model.PackageSearchOptions{
- PackageID: p.ID,
- IsInternal: optional.Some(false),
- Sort: packages_model.SortCreatedDesc,
- Paginator: db.NewAbsoluteListOptions(pcr.KeepCount, 200),
- })
- if err != nil {
- return fmt.Errorf("CleanupRule [%d]: SearchVersions failed: %w", pcr.ID, err)
- }
- versionDeleted := false
- for _, pv := range pvs {
- if pcr.Type == packages_model.TypeContainer {
- if skip, err := container_service.ShouldBeSkipped(ctx, pcr, p, pv); err != nil {
- return fmt.Errorf("CleanupRule [%d]: container.ShouldBeSkipped failed: %w", pcr.ID, err)
- } else if skip {
- log.Debug("Rule[%d]: keep '%s/%s' (container)", pcr.ID, p.Name, pv.Version)
- continue
- }
- }
+func executeCleanupOneRule(ctx context.Context, pcr *packages_model.PackageCleanupRule) error {
+ if err := pcr.CompiledPattern(); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: CompilePattern failed: %w", pcr.ID, err)
+ }
- toMatch := pv.LowerVersion
- if pcr.MatchFullName {
- toMatch = p.LowerName + "/" + pv.LowerVersion
- }
+ packages, err := packages_model.GetPackagesByType(ctx, pcr.OwnerID, pcr.Type)
+ if err != nil {
+ return fmt.Errorf("CleanupRule [%d]: GetPackagesByType failed: %w", pcr.ID, err)
+ }
- if pcr.KeepPatternMatcher != nil && pcr.KeepPatternMatcher.MatchString(toMatch) {
- log.Debug("Rule[%d]: keep '%s/%s' (keep pattern)", pcr.ID, p.Name, pv.Version)
- continue
- }
- if pv.CreatedUnix.AsLocalTime().After(olderThan) {
- log.Debug("Rule[%d]: keep '%s/%s' (remove days)", pcr.ID, p.Name, pv.Version)
- continue
- }
- if pcr.RemovePatternMatcher != nil && !pcr.RemovePatternMatcher.MatchString(toMatch) {
- log.Debug("Rule[%d]: keep '%s/%s' (remove pattern)", pcr.ID, p.Name, pv.Version)
- continue
+ anyVersionDeleted := false
+ for _, p := range packages {
+ versionDeleted := false
+ err = db.WithTx(ctx, func(ctx context.Context) (err error) {
+ versionDeleted, err = executeCleanupOneRulePackage(ctx, pcr, p)
+ return err
+ })
+ if err != nil {
+ log.Error("CleanupRule [%d]: executeCleanupOneRulePackage(%d) failed: %v", pcr.ID, p.ID, err)
+ continue
+ }
+ anyVersionDeleted = anyVersionDeleted || versionDeleted
+ if versionDeleted {
+ if pcr.Type == packages_model.TypeCargo {
+ owner, err := user_model.GetUserByID(ctx, pcr.OwnerID)
+ if err != nil {
+ return fmt.Errorf("GetUserByID failed: %w", err)
}
-
- log.Debug("Rule[%d]: remove '%s/%s'", pcr.ID, p.Name, pv.Version)
-
- if err := packages_service.DeletePackageVersionAndReferences(ctx, pv); err != nil {
- return fmt.Errorf("CleanupRule [%d]: DeletePackageVersionAndReferences failed: %w", pcr.ID, err)
+ if err := cargo_service.UpdatePackageIndexIfExists(ctx, owner, owner, p.ID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: cargo.UpdatePackageIndexIfExists failed: %w", pcr.ID, err)
}
+ }
+ }
+ }
- versionDeleted = true
- anyVersionDeleted = true
+ if anyVersionDeleted {
+ switch pcr.Type {
+ case packages_model.TypeDebian:
+ if err := debian_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: debian.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ case packages_model.TypeAlpine:
+ if err := alpine_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: alpine.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
+ }
+ case packages_model.TypeRpm:
+ if err := rpm_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: rpm.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
}
+ case packages_model.TypeArch:
+ release, err := arch_service.AquireRegistryLock(ctx, pcr.OwnerID)
+ if err != nil {
+ return err
+ }
+ defer release()
- if versionDeleted {
- if pcr.Type == packages_model.TypeCargo {
- owner, err := user_model.GetUserByID(ctx, pcr.OwnerID)
- if err != nil {
- return fmt.Errorf("GetUserByID failed: %w", err)
- }
- if err := cargo_service.UpdatePackageIndexIfExists(ctx, owner, owner, p.ID); err != nil {
- return fmt.Errorf("CleanupRule [%d]: cargo.UpdatePackageIndexIfExists failed: %w", pcr.ID, err)
- }
- }
+ if err := arch_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
+ return fmt.Errorf("CleanupRule [%d]: arch.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
}
}
+ }
+ return nil
+}
- if anyVersionDeleted {
- switch pcr.Type {
- case packages_model.TypeDebian:
- if err := debian_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
- return fmt.Errorf("CleanupRule [%d]: debian.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
- }
- case packages_model.TypeAlpine:
- if err := alpine_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
- return fmt.Errorf("CleanupRule [%d]: alpine.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
- }
- case packages_model.TypeRpm:
- if err := rpm_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
- return fmt.Errorf("CleanupRule [%d]: rpm.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
- }
- case packages_model.TypeArch:
- release, err := arch_service.AquireRegistryLock(ctx, pcr.OwnerID)
- if err != nil {
- return err
- }
- defer release()
+func ExecuteCleanupRules(ctx context.Context) error {
+ return packages_model.IterateEnabledCleanupRules(ctx, func(ctx context.Context, pcr *packages_model.PackageCleanupRule) error {
+ select {
+ case <-ctx.Done():
+ return db.ErrCancelledf("While processing package cleanup rules")
+ default:
+ }
- if err := arch_service.BuildAllRepositoryFiles(ctx, pcr.OwnerID); err != nil {
- return fmt.Errorf("CleanupRule [%d]: arch.BuildAllRepositoryFiles failed: %w", pcr.ID, err)
- }
- }
+ err := executeCleanupOneRule(ctx, pcr)
+ if err != nil {
+ log.Error("CleanupRule [%d]: executeCleanupOneRule failed: %v", pcr.ID, err)
}
return nil
})
- if err != nil {
- return err
- }
-
- return committer.Commit()
}
-func CleanupExpiredData(outerCtx context.Context, olderThan time.Duration) error {
- ctx, committer, err := db.TxContext(outerCtx)
- if err != nil {
- return err
- }
- defer committer.Close()
-
- if err := container_service.Cleanup(ctx, olderThan); err != nil {
- return err
- }
-
- ps, err := packages_model.FindUnreferencedPackages(ctx)
- if err != nil {
- return err
- }
- for _, p := range ps {
- if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypePackage, p.ID); err != nil {
+func CleanupExpiredData(ctx context.Context, olderThan time.Duration) error {
+ pbs := make([]*packages_model.PackageBlob, 0, 100)
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ if err := container_service.Cleanup(ctx, olderThan); err != nil {
return err
}
- if err := packages_model.DeletePackageByID(ctx, p.ID); err != nil {
+
+ ps, err := packages_model.FindUnreferencedPackages(ctx)
+ if err != nil {
return err
}
- }
-
- pbs, err := packages_model.FindExpiredUnreferencedBlobs(ctx, olderThan)
- if err != nil {
- return err
- }
+ for _, p := range ps {
+ if err := packages_model.DeleteAllProperties(ctx, packages_model.PropertyTypePackage, p.ID); err != nil {
+ return err
+ }
+ if err := packages_model.DeletePackageByID(ctx, p.ID); err != nil {
+ return err
+ }
+ }
- for _, pb := range pbs {
- if err := packages_model.DeleteBlobByID(ctx, pb.ID); err != nil {
+ pbs, err = packages_model.FindExpiredUnreferencedBlobs(ctx, olderThan)
+ if err != nil {
return err
}
- }
- if err := committer.Commit(); err != nil {
+ for _, pb := range pbs {
+ if err := packages_model.DeleteBlobByID(ctx, pb.ID); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
return err
}
diff --git a/services/packages/container/blob_uploader.go b/services/packages/container/blob_uploader.go
index bae2e2d6af..27bc4a5421 100644
--- a/services/packages/container/blob_uploader.go
+++ b/services/packages/container/blob_uploader.go
@@ -12,7 +12,7 @@ import (
packages_model "code.gitea.io/gitea/models/packages"
packages_module "code.gitea.io/gitea/modules/packages"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/util"
+ "code.gitea.io/gitea/modules/tempdir"
)
var (
@@ -30,8 +30,12 @@ type BlobUploader struct {
reading bool
}
-func buildFilePath(id string) string {
- return util.FilePathJoinAbs(setting.Packages.ChunkedUploadPath, id)
+func uploadPathTempDir() *tempdir.TempDir {
+ return setting.AppDataTempDir("package-upload")
+}
+
+func buildFilePath(uploadPath *tempdir.TempDir, id string) string {
+ return uploadPath.JoinPath(id)
}
// NewBlobUploader creates a new blob uploader for the given id
@@ -48,7 +52,12 @@ func NewBlobUploader(ctx context.Context, id string) (*BlobUploader, error) {
}
}
- f, err := os.OpenFile(buildFilePath(model.ID), os.O_RDWR|os.O_CREATE, 0o666)
+ uploadPath := uploadPathTempDir()
+ _, err = uploadPath.MkdirAllSub("")
+ if err != nil {
+ return nil, err
+ }
+ f, err := os.OpenFile(buildFilePath(uploadPath, model.ID), os.O_RDWR|os.O_CREATE, 0o666)
if err != nil {
return nil, err
}
@@ -118,13 +127,13 @@ func (u *BlobUploader) Read(p []byte) (int, error) {
return u.file.Read(p)
}
-// Remove deletes the data and the model of a blob upload
+// RemoveBlobUploadByID Remove deletes the data and the model of a blob upload
func RemoveBlobUploadByID(ctx context.Context, id string) error {
if err := packages_model.DeleteBlobUploadByID(ctx, id); err != nil {
return err
}
- err := os.Remove(buildFilePath(id))
+ err := os.Remove(buildFilePath(uploadPathTempDir(), id))
if err != nil && !os.IsNotExist(err) {
return err
}
diff --git a/services/packages/container/cleanup.go b/services/packages/container/cleanup.go
index 3f5f43bbc0..263562a396 100644
--- a/services/packages/container/cleanup.go
+++ b/services/packages/container/cleanup.go
@@ -13,7 +13,7 @@ import (
container_module "code.gitea.io/gitea/modules/packages/container"
packages_service "code.gitea.io/gitea/services/packages"
- digest "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/go-digest"
)
// Cleanup removes expired container data
@@ -57,7 +57,7 @@ func cleanupExpiredUploadedBlobs(ctx context.Context, olderThan time.Duration) e
Type: packages_model.TypeContainer,
Version: packages_model.SearchValue{
ExactMatch: true,
- Value: container_model.UploadVersion,
+ Value: container_module.UploadVersion,
},
IsInternal: optional.Some(true),
HasFiles: optional.Some(false),
diff --git a/services/packages/container/common.go b/services/packages/container/common.go
index 5a14ed5b7a..02cbff2286 100644
--- a/services/packages/container/common.go
+++ b/services/packages/container/common.go
@@ -5,11 +5,17 @@ package container
import (
"context"
+ "io"
"strings"
packages_model "code.gitea.io/gitea/models/packages"
+ container_service "code.gitea.io/gitea/models/packages/container"
user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/json"
+ "code.gitea.io/gitea/modules/packages"
container_module "code.gitea.io/gitea/modules/packages/container"
+
+ "github.com/opencontainers/image-spec/specs-go/v1"
)
// UpdateRepositoryNames updates the repository name property for all packages of the specific owner
@@ -22,7 +28,7 @@ func UpdateRepositoryNames(ctx context.Context, owner *user_model.User, newOwner
newOwnerName = strings.ToLower(newOwnerName)
for _, p := range ps {
- if err := packages_model.DeletePropertyByName(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository); err != nil {
+ if err := packages_model.DeletePropertiesByName(ctx, packages_model.PropertyTypePackage, p.ID, container_module.PropertyRepository); err != nil {
return err
}
@@ -33,3 +39,26 @@ func UpdateRepositoryNames(ctx context.Context, owner *user_model.User, newOwner
return nil
}
+
+func ParseManifestMetadata(ctx context.Context, rd io.Reader, ownerID int64, imageName string) (*v1.Manifest, *packages_model.PackageFileDescriptor, *container_module.Metadata, error) {
+ var manifest v1.Manifest
+ if err := json.NewDecoder(rd).Decode(&manifest); err != nil {
+ return nil, nil, nil, err
+ }
+ configDescriptor, err := container_service.GetContainerBlob(ctx, &container_service.BlobSearchOptions{
+ OwnerID: ownerID,
+ Image: imageName,
+ Digest: manifest.Config.Digest.String(),
+ })
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ configReader, err := packages.NewContentStore().OpenBlob(packages.BlobHash256Key(configDescriptor.Blob.HashSHA256))
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ defer configReader.Close()
+ metadata, err := container_module.ParseImageConfig(manifest.Config.MediaType, configReader)
+ return &manifest, configDescriptor, metadata, err
+}
diff --git a/services/packages/debian/repository.go b/services/packages/debian/repository.go
index 13e98a820e..34b52b45cf 100644
--- a/services/packages/debian/repository.go
+++ b/services/packages/debian/repository.go
@@ -23,10 +23,10 @@ import (
"code.gitea.io/gitea/modules/util"
packages_service "code.gitea.io/gitea/services/packages"
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/armor"
- "github.com/keybase/go-crypto/openpgp/clearsign"
- "github.com/keybase/go-crypto/openpgp/packet"
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/clearsign"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
"github.com/ulikunitz/xz"
)
diff --git a/services/packages/package_update.go b/services/packages/package_update.go
new file mode 100644
index 0000000000..4a22ee7a62
--- /dev/null
+++ b/services/packages/package_update.go
@@ -0,0 +1,79 @@
+// Copyright 2025 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packages
+
+import (
+ "context"
+ "fmt"
+
+ org_model "code.gitea.io/gitea/models/organization"
+ packages_model "code.gitea.io/gitea/models/packages"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/util"
+)
+
+func LinkToRepository(ctx context.Context, pkg *packages_model.Package, repo *repo_model.Repository, doer *user_model.User) error {
+ if pkg.OwnerID != repo.OwnerID {
+ return util.ErrPermissionDenied
+ }
+ if pkg.RepoID > 0 {
+ return util.ErrInvalidArgument
+ }
+
+ perms, err := access_model.GetUserRepoPermission(ctx, repo, doer)
+ if err != nil {
+ return fmt.Errorf("error getting permissions for user %d on repository %d: %w", doer.ID, repo.ID, err)
+ }
+ if !perms.CanWrite(unit.TypePackages) {
+ return util.ErrPermissionDenied
+ }
+
+ if err := packages_model.SetRepositoryLink(ctx, pkg.ID, repo.ID); err != nil {
+ return fmt.Errorf("error while linking package '%v' to repo '%v' : %w", pkg.Name, repo.FullName(), err)
+ }
+ return nil
+}
+
+func UnlinkFromRepository(ctx context.Context, pkg *packages_model.Package, doer *user_model.User) error {
+ if pkg.RepoID == 0 {
+ return util.ErrInvalidArgument
+ }
+
+ repo, err := repo_model.GetRepositoryByID(ctx, pkg.RepoID)
+ if err != nil && !repo_model.IsErrRepoNotExist(err) {
+ return fmt.Errorf("error getting repository %d: %w", pkg.RepoID, err)
+ }
+ if err == nil {
+ perms, err := access_model.GetUserRepoPermission(ctx, repo, doer)
+ if err != nil {
+ return fmt.Errorf("error getting permissions for user %d on repository %d: %w", doer.ID, repo.ID, err)
+ }
+ if !perms.CanWrite(unit.TypePackages) {
+ return util.ErrPermissionDenied
+ }
+ }
+
+ user, err := user_model.GetUserByID(ctx, pkg.OwnerID)
+ if err != nil {
+ return err
+ }
+ if !doer.IsAdmin {
+ if !user.IsOrganization() {
+ if doer.ID != pkg.OwnerID {
+ return fmt.Errorf("no permission to unlink package '%v' from its repository, or packages are disabled", pkg.Name)
+ }
+ } else {
+ isOrgAdmin, err := org_model.OrgFromUser(user).IsOrgAdmin(ctx, doer.ID)
+ if err != nil {
+ return err
+ } else if !isOrgAdmin {
+ return fmt.Errorf("no permission to unlink package '%v' from its repository, or packages are disabled", pkg.Name)
+ }
+ }
+ }
+ return packages_model.UnlinkRepository(ctx, pkg.ID)
+}
diff --git a/services/packages/packages.go b/services/packages/packages.go
index bd1d460fd3..22b26b6563 100644
--- a/services/packages/packages.go
+++ b/services/packages/packages.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "net/http"
"net/url"
"strings"
@@ -468,24 +469,15 @@ func RemovePackageVersionByNameAndVersion(ctx context.Context, doer *user_model.
// RemovePackageVersion deletes the package version and all associated files
func RemovePackageVersion(ctx context.Context, doer *user_model.User, pv *packages_model.PackageVersion) error {
- dbCtx, committer, err := db.TxContext(ctx)
- if err != nil {
- return err
- }
- defer committer.Close()
-
- pd, err := packages_model.GetPackageDescriptor(dbCtx, pv)
+ pd, err := packages_model.GetPackageDescriptor(ctx, pv)
if err != nil {
return err
}
- log.Trace("Deleting package: %v", pv.ID)
-
- if err := DeletePackageVersionAndReferences(dbCtx, pv); err != nil {
- return err
- }
-
- if err := committer.Commit(); err != nil {
+ if err := db.WithTx(ctx, func(ctx context.Context) error {
+ log.Trace("Deleting package: %v", pv.ID)
+ return DeletePackageVersionAndReferences(ctx, pv)
+ }); err != nil {
return err
}
@@ -563,8 +555,8 @@ func DeletePackageFile(ctx context.Context, pf *packages_model.PackageFile) erro
return packages_model.DeleteFileByID(ctx, pf.ID)
}
-// GetFileStreamByPackageNameAndVersion returns the content of the specific package file
-func GetFileStreamByPackageNameAndVersion(ctx context.Context, pvi *PackageInfo, pfi *PackageFileInfo) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+// OpenFileForDownloadByPackageNameAndVersion returns the content of the specific package file and increases the download counter.
+func OpenFileForDownloadByPackageNameAndVersion(ctx context.Context, pvi *PackageInfo, pfi *PackageFileInfo, method string) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
log.Trace("Getting package file stream: %v, %v, %s, %s, %s, %s", pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version, pfi.Filename, pfi.CompositeKey)
pv, err := packages_model.GetVersionByNameAndVersion(ctx, pvi.Owner.ID, pvi.PackageType, pvi.Name, pvi.Version)
@@ -576,32 +568,38 @@ func GetFileStreamByPackageNameAndVersion(ctx context.Context, pvi *PackageInfo,
return nil, nil, nil, err
}
- return GetFileStreamByPackageVersion(ctx, pv, pfi)
+ return OpenFileForDownloadByPackageVersion(ctx, pv, pfi, method)
}
-// GetFileStreamByPackageVersion returns the content of the specific package file
-func GetFileStreamByPackageVersion(ctx context.Context, pv *packages_model.PackageVersion, pfi *PackageFileInfo) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+// OpenFileForDownloadByPackageVersion returns the content of the specific package file and increases the download counter.
+func OpenFileForDownloadByPackageVersion(ctx context.Context, pv *packages_model.PackageVersion, pfi *PackageFileInfo, method string) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
pf, err := packages_model.GetFileForVersionByName(ctx, pv.ID, pfi.Filename, pfi.CompositeKey)
if err != nil {
return nil, nil, nil, err
}
- return GetPackageFileStream(ctx, pf)
+ return OpenFileForDownload(ctx, pf, method)
}
-// GetPackageFileStream returns the content of the specific package file
-func GetPackageFileStream(ctx context.Context, pf *packages_model.PackageFile) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+// OpenFileForDownload returns the content of the specific package file and increases the download counter.
+func OpenFileForDownload(ctx context.Context, pf *packages_model.PackageFile, method string) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
pb, err := packages_model.GetBlobByID(ctx, pf.BlobID)
if err != nil {
return nil, nil, nil, err
}
- return GetPackageBlobStream(ctx, pf, pb, nil)
+ return OpenBlobForDownload(ctx, pf, pb, method, nil)
+}
+
+func OpenBlobStream(pb *packages_model.PackageBlob) (io.ReadSeekCloser, error) {
+ cs := packages_module.NewContentStore()
+ key := packages_module.BlobHash256Key(pb.HashSHA256)
+ return cs.OpenBlob(key)
}
-// GetPackageBlobStream returns the content of the specific package blob
+// OpenBlobForDownload returns the content of the specific package blob and increases the download counter.
// If the storage supports direct serving and it's enabled, only the direct serving url is returned.
-func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, pb *packages_model.PackageBlob, serveDirectReqParams url.Values) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
+func OpenBlobForDownload(ctx context.Context, pf *packages_model.PackageFile, pb *packages_model.PackageBlob, method string, serveDirectReqParams url.Values) (io.ReadSeekCloser, *url.URL, *packages_model.PackageFile, error) {
key := packages_module.BlobHash256Key(pb.HashSHA256)
cs := packages_module.NewContentStore()
@@ -611,23 +609,24 @@ func GetPackageBlobStream(ctx context.Context, pf *packages_model.PackageFile, p
var err error
if cs.ShouldServeDirect() {
- u, err = cs.GetServeDirectURL(key, pf.Name, serveDirectReqParams)
+ u, err = cs.GetServeDirectURL(key, pf.Name, method, serveDirectReqParams)
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
- log.Error("Error getting serve direct url: %v", err)
+ log.Error("Error getting serve direct url (fallback to local reader): %v", err)
}
}
if u == nil {
- s, err = cs.Get(key)
+ s, err = cs.OpenBlob(key)
+ }
+ if err != nil {
+ return nil, nil, nil, err
}
- if err == nil {
- if pf.IsLead {
- if err := packages_model.IncrementDownloadCounter(ctx, pf.VersionID); err != nil {
- log.Error("Error incrementing download counter: %v", err)
- }
+ if pf.IsLead && method == http.MethodGet {
+ if err := packages_model.IncrementDownloadCounter(ctx, pf.VersionID); err != nil {
+ log.Error("Error incrementing download counter: %v", err)
}
}
- return s, u, pf, err
+ return s, u, pf, nil
}
// RemoveAllPackages for User
diff --git a/services/packages/rpm/repository.go b/services/packages/rpm/repository.go
index a7d196c15c..fbbf8d7dad 100644
--- a/services/packages/rpm/repository.go
+++ b/services/packages/rpm/repository.go
@@ -408,7 +408,6 @@ func buildPrimary(ctx context.Context, pv *packages_model.PackageVersion, pfs []
files = append(files, f)
}
}
- packageVersion := fmt.Sprintf("%s-%s", pd.FileMetadata.Version, pd.FileMetadata.Release)
packages = append(packages, &Package{
Type: "rpm",
Name: pd.Package.Name,
@@ -437,7 +436,7 @@ func buildPrimary(ctx context.Context, pv *packages_model.PackageVersion, pfs []
Archive: pd.FileMetadata.ArchiveSize,
},
Location: Location{
- Href: fmt.Sprintf("package/%s/%s/%s/%s-%s.%s.rpm", pd.Package.Name, packageVersion, pd.FileMetadata.Architecture, pd.Package.Name, packageVersion, pd.FileMetadata.Architecture),
+ Href: fmt.Sprintf("package/%s/%s/%s/%s-%s.%s.rpm", pd.Package.Name, pd.Version.Version, pd.FileMetadata.Architecture, pd.Package.Name, pd.Version.Version, pd.FileMetadata.Architecture),
},
Format: Format{
License: pd.VersionMetadata.License,
@@ -471,7 +470,7 @@ func buildPrimary(ctx context.Context, pv *packages_model.PackageVersion, pfs []
}
// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#filelists-xml
-func buildFilelists(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl
+func buildFilelists(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl // duplicates with buildOther
type Version struct {
Epoch string `xml:"epoch,attr"`
Version string `xml:"ver,attr"`
@@ -518,7 +517,7 @@ func buildFilelists(ctx context.Context, pv *packages_model.PackageVersion, pfs
}
// https://docs.pulpproject.org/en/2.19/plugins/pulp_rpm/tech-reference/rpm.html#other-xml
-func buildOther(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl
+func buildOther(ctx context.Context, pv *packages_model.PackageVersion, pfs []*packages_model.PackageFile, c packageCache, group string) (*repoData, error) { //nolint:dupl // duplicates with buildFilelists
type Version struct {
Epoch string `xml:"epoch,attr"`
Version string `xml:"ver,attr"`