summaryrefslogtreecommitdiffstats
path: root/routers
diff options
context:
space:
mode:
authorGiteabot <teabot@gitea.io>2023-12-21 15:38:39 +0800
committerGitHub <noreply@github.com>2023-12-21 15:38:39 +0800
commit8ca32dc8733c0dad147cc7f734097dbe1113e9a9 (patch)
treed4b6d02fa596baddce9678c72325f76d23070dcf /routers
parent47f9b3f48474ba6577642cd15fe07928dd90aaa7 (diff)
downloadgitea-8ca32dc8733c0dad147cc7f734097dbe1113e9a9.tar.gz
gitea-8ca32dc8733c0dad147cc7f734097dbe1113e9a9.zip
Fix merging artifact chunks error when minio storage basepath is set (#28555) (#28568)
Backport #28555 by @fuxiaohei Related to https://github.com/go-gitea/gitea/issues/28279 When merging artifact chunks, it lists chunks from storage. When storage is minio, chunk's path contains `MINIO_BASE_PATH` that makes merging break. <del>So trim the `MINIO_BASE_PATH` when handle chunks.</del> Update the chunk file's basename to retain necessary information. It ensures that the directory in the chunk's path remains unaffected. Co-authored-by: FuXiaoHei <fuxiaohei@vip.qq.com>
Diffstat (limited to 'routers')
-rw-r--r--routers/api/actions/artifacts_chunks.go13
1 files changed, 9 insertions, 4 deletions
diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go
index 458d671cff..0e0cf8a3aa 100644
--- a/routers/api/actions/artifacts_chunks.go
+++ b/routers/api/actions/artifacts_chunks.go
@@ -25,10 +25,11 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
contentRange := ctx.Req.Header.Get("Content-Range")
start, end, length := int64(0), int64(0), int64(0)
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
+ log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
return -1, fmt.Errorf("parse content range error: %v", err)
}
// build chunk store path
- storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end)
+ storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
// use io.TeeReader to avoid reading all body to md5 sum.
// it writes data to hasher after reading end
// if hash is not matched, delete the read-end result
@@ -57,6 +58,7 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
}
type chunkFileItem struct {
+ RunID int64
ArtifactID int64
Start int64
End int64
@@ -66,9 +68,12 @@ type chunkFileItem struct {
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
storageDir := fmt.Sprintf("tmp%d", runID)
var chunks []*chunkFileItem
- if err := st.IterateObjects(storageDir, func(path string, obj storage.Object) error {
- item := chunkFileItem{Path: path}
- if _, err := fmt.Sscanf(path, filepath.Join(storageDir, "%d-%d-%d.chunk"), &item.ArtifactID, &item.Start, &item.End); err != nil {
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName}
+ if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
return fmt.Errorf("parse content range error: %v", err)
}
chunks = append(chunks, &item)