aboutsummaryrefslogtreecommitdiffstats
path: root/routers/api
diff options
context:
space:
mode:
authorFuXiaoHei <fuxiaohei@vip.qq.com>2023-12-21 15:04:50 +0800
committerGitHub <noreply@github.com>2023-12-21 07:04:50 +0000
commitfe5a61639237138d6bb87cde17aedca3eb5bdd12 (patch)
treec42263e88d4ac1d2c5fca048d7cbe1c325ababd0 /routers/api
parentcaceb43313a586842e208249b2c67f90d4bf7139 (diff)
downloadgitea-fe5a61639237138d6bb87cde17aedca3eb5bdd12.tar.gz
gitea-fe5a61639237138d6bb87cde17aedca3eb5bdd12.zip
Fix merging artifact chunks error when minio storage basepath is set (#28555)
Related to https://github.com/go-gitea/gitea/issues/28279 When merging artifact chunks, it lists chunks from storage. When storage is minio, chunk's path contains `MINIO_BASE_PATH` that makes merging break. <del>So trim the `MINIO_BASE_PATH` when handle chunks.</del> Update the chunk file's basename to retain necessary information. It ensures that the directory in the chunk's path remains unaffected.
Diffstat (limited to 'routers/api')
-rw-r--r--routers/api/actions/artifacts_chunks.go13
1 files changed, 9 insertions, 4 deletions
diff --git a/routers/api/actions/artifacts_chunks.go b/routers/api/actions/artifacts_chunks.go
index c7ab70afa9..36432a0ca0 100644
--- a/routers/api/actions/artifacts_chunks.go
+++ b/routers/api/actions/artifacts_chunks.go
@@ -26,10 +26,11 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
contentRange := ctx.Req.Header.Get("Content-Range")
start, end, length := int64(0), int64(0), int64(0)
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
+ log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
return -1, fmt.Errorf("parse content range error: %v", err)
}
// build chunk store path
- storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end)
+ storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
// use io.TeeReader to avoid reading all body to md5 sum.
// it writes data to hasher after reading end
// if hash is not matched, delete the read-end result
@@ -58,6 +59,7 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
}
type chunkFileItem struct {
+ RunID int64
ArtifactID int64
Start int64
End int64
@@ -67,9 +69,12 @@ type chunkFileItem struct {
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
storageDir := fmt.Sprintf("tmp%d", runID)
var chunks []*chunkFileItem
- if err := st.IterateObjects(storageDir, func(path string, obj storage.Object) error {
- item := chunkFileItem{Path: path}
- if _, err := fmt.Sscanf(path, filepath.Join(storageDir, "%d-%d-%d.chunk"), &item.ArtifactID, &item.Start, &item.End); err != nil {
+ if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
+ baseName := filepath.Base(fpath)
+ // when read chunks from storage, it only contains storage dir and basename,
+ // no matter the subdirectory setting in storage config
+ item := chunkFileItem{Path: storageDir + "/" + baseName}
+ if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
return fmt.Errorf("parse content range error: %v", err)
}
chunks = append(chunks, &item)