current actions artifacts implementation only support single file artifact. To support multiple files uploading, it needs: - save each file to each db record with same run-id, same artifact-name and proper artifact-path - need change artifact uploading url without artifact-id, multiple files creates multiple artifact-ids - support `path` in download-artifact action. artifact should download to `{path}/{artifact-path}`. - in repo action view, it provides zip download link in artifacts list in summary page, no matter this artifact contains single or multiple files.tags/v1.21.0-rc0
@@ -31,7 +31,7 @@ func init() { | |||
// ActionArtifact is a file that is stored in the artifact storage. | |||
type ActionArtifact struct { | |||
ID int64 `xorm:"pk autoincr"` | |||
RunID int64 `xorm:"index UNIQUE(runid_name)"` // The run id of the artifact | |||
RunID int64 `xorm:"index unique(runid_name_path)"` // The run id of the artifact | |||
RunnerID int64 | |||
RepoID int64 `xorm:"index"` | |||
OwnerID int64 | |||
@@ -40,27 +40,28 @@ type ActionArtifact struct { | |||
FileSize int64 // The size of the artifact in bytes | |||
FileCompressedSize int64 // The size of the artifact in bytes after gzip compression | |||
ContentEncoding string // The content encoding of the artifact | |||
ArtifactPath string // The path to the artifact when runner uploads it | |||
ArtifactName string `xorm:"UNIQUE(runid_name)"` // The name of the artifact when runner uploads it | |||
Status int64 `xorm:"index"` // The status of the artifact, uploading, expired or need-delete | |||
ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it | |||
ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when runner uploads it | |||
Status int64 `xorm:"index"` // The status of the artifact, uploading, expired or need-delete | |||
CreatedUnix timeutil.TimeStamp `xorm:"created"` | |||
UpdatedUnix timeutil.TimeStamp `xorm:"updated index"` | |||
} | |||
// CreateArtifact create a new artifact with task info or get same named artifact in the same run | |||
func CreateArtifact(ctx context.Context, t *ActionTask, artifactName string) (*ActionArtifact, error) { | |||
func CreateArtifact(ctx context.Context, t *ActionTask, artifactName, artifactPath string) (*ActionArtifact, error) { | |||
if err := t.LoadJob(ctx); err != nil { | |||
return nil, err | |||
} | |||
artifact, err := getArtifactByArtifactName(ctx, t.Job.RunID, artifactName) | |||
artifact, err := getArtifactByNameAndPath(ctx, t.Job.RunID, artifactName, artifactPath) | |||
if errors.Is(err, util.ErrNotExist) { | |||
artifact := &ActionArtifact{ | |||
RunID: t.Job.RunID, | |||
RunnerID: t.RunnerID, | |||
RepoID: t.RepoID, | |||
OwnerID: t.OwnerID, | |||
CommitSHA: t.CommitSHA, | |||
Status: ArtifactStatusUploadPending, | |||
ArtifactName: artifactName, | |||
ArtifactPath: artifactPath, | |||
RunID: t.Job.RunID, | |||
RunnerID: t.RunnerID, | |||
RepoID: t.RepoID, | |||
OwnerID: t.OwnerID, | |||
CommitSHA: t.CommitSHA, | |||
Status: ArtifactStatusUploadPending, | |||
} | |||
if _, err := db.GetEngine(ctx).Insert(artifact); err != nil { | |||
return nil, err | |||
@@ -72,9 +73,9 @@ func CreateArtifact(ctx context.Context, t *ActionTask, artifactName string) (*A | |||
return artifact, nil | |||
} | |||
func getArtifactByArtifactName(ctx context.Context, runID int64, name string) (*ActionArtifact, error) { | |||
func getArtifactByNameAndPath(ctx context.Context, runID int64, name, fpath string) (*ActionArtifact, error) { | |||
var art ActionArtifact | |||
has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ?", runID, name).Get(&art) | |||
has, err := db.GetEngine(ctx).Where("run_id = ? AND artifact_name = ? AND artifact_path = ?", runID, name, fpath).Get(&art) | |||
if err != nil { | |||
return nil, err | |||
} else if !has { | |||
@@ -109,14 +110,42 @@ func ListArtifactsByRunID(ctx context.Context, runID int64) ([]*ActionArtifact, | |||
return arts, db.GetEngine(ctx).Where("run_id=?", runID).Find(&arts) | |||
} | |||
// ListArtifactsByRunIDAndArtifactName returns an artifacts of a run by artifact name | |||
func ListArtifactsByRunIDAndArtifactName(ctx context.Context, runID int64, artifactName string) ([]*ActionArtifact, error) { | |||
arts := make([]*ActionArtifact, 0, 10) | |||
return arts, db.GetEngine(ctx).Where("run_id=? AND artifact_name=?", runID, artifactName).Find(&arts) | |||
} | |||
// ListUploadedArtifactsByRunID returns all uploaded artifacts of a run | |||
func ListUploadedArtifactsByRunID(ctx context.Context, runID int64) ([]*ActionArtifact, error) { | |||
arts := make([]*ActionArtifact, 0, 10) | |||
return arts, db.GetEngine(ctx).Where("run_id=? AND status=?", runID, ArtifactStatusUploadConfirmed).Find(&arts) | |||
} | |||
// ActionArtifactMeta is the meta data of an artifact | |||
type ActionArtifactMeta struct { | |||
ArtifactName string | |||
FileSize int64 | |||
} | |||
// ListUploadedArtifactsMeta returns all uploaded artifacts meta of a run | |||
func ListUploadedArtifactsMeta(ctx context.Context, runID int64) ([]*ActionArtifactMeta, error) { | |||
arts := make([]*ActionArtifactMeta, 0, 10) | |||
return arts, db.GetEngine(ctx).Table("action_artifact"). | |||
Where("run_id=? AND status=?", runID, ArtifactStatusUploadConfirmed). | |||
GroupBy("artifact_name"). | |||
Select("artifact_name, sum(file_size) as file_size"). | |||
Find(&arts) | |||
} | |||
// ListArtifactsByRepoID returns all artifacts of a repo | |||
func ListArtifactsByRepoID(ctx context.Context, repoID int64) ([]*ActionArtifact, error) { | |||
arts := make([]*ActionArtifact, 0, 10) | |||
return arts, db.GetEngine(ctx).Where("repo_id=?", repoID).Find(&arts) | |||
} | |||
// ListArtifactsByRunIDAndName returns artifacts by name of a run | |||
func ListArtifactsByRunIDAndName(ctx context.Context, runID int64, name string) ([]*ActionArtifact, error) { | |||
arts := make([]*ActionArtifact, 0, 10) | |||
return arts, db.GetEngine(ctx).Where("run_id=? AND artifact_name=?", runID, name).Find(&arts) | |||
} |
@@ -511,6 +511,8 @@ var migrations = []Migration{ | |||
NewMigration("Add git_size and lfs_size columns to repository table", v1_21.AddGitSizeAndLFSSizeToRepositoryTable), | |||
// v264 -> v265 | |||
NewMigration("Add branch table", v1_21.AddBranchTable), | |||
// v265 -> v266 | |||
NewMigration("Alter Actions Artifact table", v1_21.AlterActionArtifactTable), | |||
} | |||
// GetCurrentDBVersion returns the current db version |
@@ -0,0 +1,19 @@ | |||
// Copyright 2023 The Gitea Authors. All rights reserved. | |||
// SPDX-License-Identifier: MIT | |||
package v1_21 //nolint | |||
import ( | |||
"xorm.io/xorm" | |||
) | |||
func AlterActionArtifactTable(x *xorm.Engine) error { | |||
// ActionArtifact is a file that is stored in the artifact storage. | |||
type ActionArtifact struct { | |||
RunID int64 `xorm:"index unique(runid_name_path)"` // The run id of the artifact | |||
ArtifactPath string `xorm:"index unique(runid_name_path)"` // The path to the artifact when runner uploads it | |||
ArtifactName string `xorm:"index unique(runid_name_path)"` // The name of the artifact when | |||
} | |||
return x.Sync(new(ActionArtifact)) | |||
} |
@@ -62,17 +62,12 @@ package actions | |||
// | |||
import ( | |||
"compress/gzip" | |||
"crypto/md5" | |||
"encoding/base64" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"net/http" | |||
"sort" | |||
"strconv" | |||
"strings" | |||
"time" | |||
"code.gitea.io/gitea/models/actions" | |||
"code.gitea.io/gitea/modules/context" | |||
@@ -85,11 +80,6 @@ import ( | |||
web_types "code.gitea.io/gitea/modules/web/types" | |||
) | |||
const ( | |||
artifactXTfsFileLengthHeader = "x-tfs-filelength" | |||
artifactXActionsResultsMD5Header = "x-actions-results-md5" | |||
) | |||
const artifactRouteBase = "/_apis/pipelines/workflows/{run_id}/artifacts" | |||
type artifactContextKeyType struct{} | |||
@@ -121,11 +111,10 @@ func ArtifactsRoutes(prefix string) *web.Route { | |||
// retrieve, list and confirm artifacts | |||
m.Combo("").Get(r.listArtifacts).Post(r.getUploadArtifactURL).Patch(r.comfirmUploadArtifact) | |||
// handle container artifacts list and download | |||
m.Group("/{artifact_id}", func() { | |||
m.Put("/upload", r.uploadArtifact) | |||
m.Get("/path", r.getDownloadArtifactURL) | |||
m.Get("/download", r.downloadArtifact) | |||
}) | |||
m.Put("/{artifact_hash}/upload", r.uploadArtifact) | |||
// handle artifacts download | |||
m.Get("/{artifact_hash}/download_url", r.getDownloadArtifactURL) | |||
m.Get("/{artifact_id}/download", r.downloadArtifact) | |||
}) | |||
return m | |||
@@ -173,10 +162,10 @@ type artifactRoutes struct { | |||
fs storage.ObjectStorage | |||
} | |||
func (ar artifactRoutes) buildArtifactURL(runID, artifactID int64, suffix string) string { | |||
func (ar artifactRoutes) buildArtifactURL(runID int64, artifactHash, suffix string) string { | |||
uploadURL := strings.TrimSuffix(setting.AppURL, "/") + strings.TrimSuffix(ar.prefix, "/") + | |||
strings.ReplaceAll(artifactRouteBase, "{run_id}", strconv.FormatInt(runID, 10)) + | |||
"/" + strconv.FormatInt(artifactID, 10) + "/" + suffix | |||
"/" + artifactHash + "/" + suffix | |||
return uploadURL | |||
} | |||
@@ -189,20 +178,9 @@ type getUploadArtifactResponse struct { | |||
FileContainerResourceURL string `json:"fileContainerResourceUrl"` | |||
} | |||
func (ar artifactRoutes) validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) { | |||
task := ctx.ActionTask | |||
runID := ctx.ParamsInt64("run_id") | |||
if task.Job.RunID != runID { | |||
log.Error("Error runID not match") | |||
ctx.Error(http.StatusBadRequest, "run-id does not match") | |||
return nil, 0, false | |||
} | |||
return task, runID, true | |||
} | |||
// getUploadArtifactURL generates a URL for uploading an artifact | |||
func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) { | |||
task, runID, ok := ar.validateRunID(ctx) | |||
_, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
@@ -214,131 +192,59 @@ func (ar artifactRoutes) getUploadArtifactURL(ctx *ArtifactContext) { | |||
return | |||
} | |||
artifact, err := actions.CreateArtifact(ctx, task, req.Name) | |||
if err != nil { | |||
log.Error("Error creating artifact: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
// use md5(artifact_name) to create upload url | |||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(req.Name))) | |||
resp := getUploadArtifactResponse{ | |||
FileContainerResourceURL: ar.buildArtifactURL(runID, artifact.ID, "upload"), | |||
FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "upload"), | |||
} | |||
log.Debug("[artifact] get upload url: %s, artifact id: %d", resp.FileContainerResourceURL, artifact.ID) | |||
log.Debug("[artifact] get upload url: %s", resp.FileContainerResourceURL) | |||
ctx.JSON(http.StatusOK, resp) | |||
} | |||
// getUploadFileSize returns the size of the file to be uploaded. | |||
// The raw size is the size of the file as reported by the header X-TFS-FileLength. | |||
func (ar artifactRoutes) getUploadFileSize(ctx *ArtifactContext) (int64, int64, error) { | |||
contentLength := ctx.Req.ContentLength | |||
xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64) | |||
if xTfsLength > 0 { | |||
return xTfsLength, contentLength, nil | |||
} | |||
return contentLength, contentLength, nil | |||
} | |||
func (ar artifactRoutes) saveUploadChunk(ctx *ArtifactContext, | |||
artifact *actions.ActionArtifact, | |||
contentSize, runID int64, | |||
) (int64, error) { | |||
contentRange := ctx.Req.Header.Get("Content-Range") | |||
start, end, length := int64(0), int64(0), int64(0) | |||
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil { | |||
return -1, fmt.Errorf("parse content range error: %v", err) | |||
} | |||
storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end) | |||
// use io.TeeReader to avoid reading all body to md5 sum. | |||
// it writes data to hasher after reading end | |||
// if hash is not matched, delete the read-end result | |||
hasher := md5.New() | |||
r := io.TeeReader(ctx.Req.Body, hasher) | |||
// save chunk to storage | |||
writtenSize, err := ar.fs.Save(storagePath, r, -1) | |||
if err != nil { | |||
return -1, fmt.Errorf("save chunk to storage error: %v", err) | |||
} | |||
// check md5 | |||
reqMd5String := ctx.Req.Header.Get(artifactXActionsResultsMD5Header) | |||
chunkMd5String := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) | |||
log.Debug("[artifact] check chunk md5, sum: %s, header: %s", chunkMd5String, reqMd5String) | |||
if reqMd5String != chunkMd5String || writtenSize != contentSize { | |||
if err := ar.fs.Delete(storagePath); err != nil { | |||
log.Error("Error deleting chunk: %s, %v", storagePath, err) | |||
} | |||
return -1, fmt.Errorf("md5 not match") | |||
} | |||
log.Debug("[artifact] save chunk %s, size: %d, artifact id: %d, start: %d, end: %d", | |||
storagePath, contentSize, artifact.ID, start, end) | |||
return length, nil | |||
} | |||
// The rules are from https://github.com/actions/toolkit/blob/main/packages/artifact/src/internal/path-and-artifact-name-validation.ts#L32 | |||
var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<", ">", "|", "*", "?", "\r", "\n"}, "") | |||
func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) { | |||
_, runID, ok := ar.validateRunID(ctx) | |||
task, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
artifactID := ctx.ParamsInt64("artifact_id") | |||
artifact, err := actions.GetArtifactByID(ctx, artifactID) | |||
if errors.Is(err, util.ErrNotExist) { | |||
log.Error("Error getting artifact: %v", err) | |||
ctx.Error(http.StatusNotFound, err.Error()) | |||
return | |||
} else if err != nil { | |||
log.Error("Error getting artifact: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
artifactName, artifactPath, ok := parseArtifactItemPath(ctx) | |||
if !ok { | |||
return | |||
} | |||
// itemPath is generated from upload-artifact action | |||
// it's formatted as {artifact_name}/{artfict_path_in_runner} | |||
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath")) | |||
artifactName := strings.Split(itemPath, "/")[0] | |||
// checkArtifactName checks if the artifact name contains invalid characters. | |||
// If the name contains invalid characters, an error is returned. | |||
if strings.ContainsAny(artifactName, invalidArtifactNameChars) { | |||
log.Error("Error checking artifact name contains invalid character") | |||
ctx.Error(http.StatusBadRequest, err.Error()) | |||
// get upload file size | |||
fileRealTotalSize, contentLength, err := getUploadFileSize(ctx) | |||
if err != nil { | |||
log.Error("Error get upload file size: %v", err) | |||
ctx.Error(http.StatusInternalServerError, "Error get upload file size") | |||
return | |||
} | |||
// get upload file size | |||
fileSize, contentLength, err := ar.getUploadFileSize(ctx) | |||
// create or get artifact with name and path | |||
artifact, err := actions.CreateArtifact(ctx, task, artifactName, artifactPath) | |||
if err != nil { | |||
log.Error("Error getting upload file size: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
log.Error("Error create or get artifact: %v", err) | |||
ctx.Error(http.StatusInternalServerError, "Error create or get artifact") | |||
return | |||
} | |||
// save chunk | |||
chunkAllLength, err := ar.saveUploadChunk(ctx, artifact, contentLength, runID) | |||
// save chunk to storage, if success, return chunk stotal size | |||
// if artifact is not gzip when uploading, chunksTotalSize == fileRealTotalSize | |||
// if artifact is gzip when uploading, chunksTotalSize < fileRealTotalSize | |||
chunksTotalSize, err := saveUploadChunk(ar.fs, ctx, artifact, contentLength, runID) | |||
if err != nil { | |||
log.Error("Error saving upload chunk: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
log.Error("Error save upload chunk: %v", err) | |||
ctx.Error(http.StatusInternalServerError, "Error save upload chunk") | |||
return | |||
} | |||
// if artifact name is not set, update it | |||
if artifact.ArtifactName == "" { | |||
artifact.ArtifactName = artifactName | |||
artifact.ArtifactPath = itemPath // path in container | |||
artifact.FileSize = fileSize // this is total size of all chunks | |||
artifact.FileCompressedSize = chunkAllLength | |||
// update artifact size if zero | |||
if artifact.FileSize == 0 || artifact.FileCompressedSize == 0 { | |||
artifact.FileSize = fileRealTotalSize | |||
artifact.FileCompressedSize = chunksTotalSize | |||
artifact.ContentEncoding = ctx.Req.Header.Get("Content-Encoding") | |||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | |||
log.Error("Error updating artifact: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
log.Error("Error update artifact: %v", err) | |||
ctx.Error(http.StatusInternalServerError, "Error update artifact") | |||
return | |||
} | |||
} | |||
@@ -351,135 +257,26 @@ func (ar artifactRoutes) uploadArtifact(ctx *ArtifactContext) { | |||
// comfirmUploadArtifact comfirm upload artifact. | |||
// if all chunks are uploaded, merge them to one file. | |||
func (ar artifactRoutes) comfirmUploadArtifact(ctx *ArtifactContext) { | |||
_, runID, ok := ar.validateRunID(ctx) | |||
_, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
if err := ar.mergeArtifactChunks(ctx, runID); err != nil { | |||
log.Error("Error merging chunks: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
artifactName := ctx.Req.URL.Query().Get("artifactName") | |||
if artifactName == "" { | |||
log.Error("Error artifact name is empty") | |||
ctx.Error(http.StatusBadRequest, "Error artifact name is empty") | |||
return | |||
} | |||
if err := mergeChunksForRun(ctx, ar.fs, runID, artifactName); err != nil { | |||
log.Error("Error merge chunks: %v", err) | |||
ctx.Error(http.StatusInternalServerError, "Error merge chunks") | |||
return | |||
} | |||
ctx.JSON(http.StatusOK, map[string]string{ | |||
"message": "success", | |||
}) | |||
} | |||
type chunkItem struct { | |||
ArtifactID int64 | |||
Start int64 | |||
End int64 | |||
Path string | |||
} | |||
func (ar artifactRoutes) mergeArtifactChunks(ctx *ArtifactContext, runID int64) error { | |||
storageDir := fmt.Sprintf("tmp%d", runID) | |||
var chunks []*chunkItem | |||
if err := ar.fs.IterateObjects(storageDir, func(path string, obj storage.Object) error { | |||
item := chunkItem{Path: path} | |||
if _, err := fmt.Sscanf(path, storageDir+"/%d-%d-%d.chunk", &item.ArtifactID, &item.Start, &item.End); err != nil { | |||
return fmt.Errorf("parse content range error: %v", err) | |||
} | |||
chunks = append(chunks, &item) | |||
return nil | |||
}); err != nil { | |||
return err | |||
} | |||
// group chunks by artifact id | |||
chunksMap := make(map[int64][]*chunkItem) | |||
for _, c := range chunks { | |||
chunksMap[c.ArtifactID] = append(chunksMap[c.ArtifactID], c) | |||
} | |||
for artifactID, cs := range chunksMap { | |||
// get artifact to handle merged chunks | |||
artifact, err := actions.GetArtifactByID(ctx, cs[0].ArtifactID) | |||
if err != nil { | |||
return fmt.Errorf("get artifact error: %v", err) | |||
} | |||
sort.Slice(cs, func(i, j int) bool { | |||
return cs[i].Start < cs[j].Start | |||
}) | |||
allChunks := make([]*chunkItem, 0) | |||
startAt := int64(-1) | |||
// check if all chunks are uploaded and in order and clean repeated chunks | |||
for _, c := range cs { | |||
// startAt is -1 means this is the first chunk | |||
// previous c.ChunkEnd + 1 == c.ChunkStart means this chunk is in order | |||
// StartAt is not -1 and c.ChunkStart is not startAt + 1 means there is a chunk missing | |||
if c.Start == (startAt + 1) { | |||
allChunks = append(allChunks, c) | |||
startAt = c.End | |||
} | |||
} | |||
// if the last chunk.End + 1 is not equal to chunk.ChunkLength, means chunks are not uploaded completely | |||
if startAt+1 != artifact.FileCompressedSize { | |||
log.Debug("[artifact] chunks are not uploaded completely, artifact_id: %d", artifactID) | |||
break | |||
} | |||
// use multiReader | |||
readers := make([]io.Reader, 0, len(allChunks)) | |||
closeReaders := func() { | |||
for _, r := range readers { | |||
_ = r.(io.Closer).Close() // it guarantees to be io.Closer by the following loop's Open function | |||
} | |||
readers = nil | |||
} | |||
defer closeReaders() | |||
for _, c := range allChunks { | |||
var readCloser io.ReadCloser | |||
if readCloser, err = ar.fs.Open(c.Path); err != nil { | |||
return fmt.Errorf("open chunk error: %v, %s", err, c.Path) | |||
} | |||
readers = append(readers, readCloser) | |||
} | |||
mergedReader := io.MultiReader(readers...) | |||
// if chunk is gzip, decompress it | |||
if artifact.ContentEncoding == "gzip" { | |||
var err error | |||
mergedReader, err = gzip.NewReader(mergedReader) | |||
if err != nil { | |||
return fmt.Errorf("gzip reader error: %v", err) | |||
} | |||
} | |||
// save merged file | |||
storagePath := fmt.Sprintf("%d/%d/%d.chunk", runID%255, artifactID%255, time.Now().UnixNano()) | |||
written, err := ar.fs.Save(storagePath, mergedReader, -1) | |||
if err != nil { | |||
return fmt.Errorf("save merged file error: %v", err) | |||
} | |||
if written != artifact.FileSize { | |||
return fmt.Errorf("merged file size is not equal to chunk length") | |||
} | |||
// save storage path to artifact | |||
log.Debug("[artifact] merge chunks to artifact: %d, %s", artifact.ID, storagePath) | |||
artifact.StoragePath = storagePath | |||
artifact.Status = actions.ArtifactStatusUploadConfirmed | |||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | |||
return fmt.Errorf("update artifact error: %v", err) | |||
} | |||
closeReaders() // close before delete | |||
// drop chunks | |||
for _, c := range cs { | |||
if err := ar.fs.Delete(c.Path); err != nil { | |||
return fmt.Errorf("delete chunk file error: %v", err) | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
type ( | |||
listArtifactsResponse struct { | |||
Count int64 `json:"count"` | |||
@@ -492,7 +289,7 @@ type ( | |||
) | |||
func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) { | |||
_, runID, ok := ar.validateRunID(ctx) | |||
_, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
@@ -503,17 +300,35 @@ func (ar artifactRoutes) listArtifacts(ctx *ArtifactContext) { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
if len(artifacts) == 0 { | |||
log.Debug("[artifact] handleListArtifacts, no artifacts") | |||
ctx.Error(http.StatusNotFound) | |||
return | |||
} | |||
artficatsData := make([]listArtifactsResponseItem, 0, len(artifacts)) | |||
for _, a := range artifacts { | |||
artficatsData = append(artficatsData, listArtifactsResponseItem{ | |||
Name: a.ArtifactName, | |||
FileContainerResourceURL: ar.buildArtifactURL(runID, a.ID, "path"), | |||
}) | |||
var ( | |||
items []listArtifactsResponseItem | |||
values = make(map[string]bool) | |||
) | |||
for _, art := range artifacts { | |||
if values[art.ArtifactName] { | |||
continue | |||
} | |||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(art.ArtifactName))) | |||
item := listArtifactsResponseItem{ | |||
Name: art.ArtifactName, | |||
FileContainerResourceURL: ar.buildArtifactURL(runID, artifactHash, "download_url"), | |||
} | |||
items = append(items, item) | |||
values[art.ArtifactName] = true | |||
log.Debug("[artifact] handleListArtifacts, name: %s, url: %s", item.Name, item.FileContainerResourceURL) | |||
} | |||
respData := listArtifactsResponse{ | |||
Count: int64(len(artficatsData)), | |||
Value: artficatsData, | |||
Count: int64(len(items)), | |||
Value: items, | |||
} | |||
ctx.JSON(http.StatusOK, respData) | |||
} | |||
@@ -529,37 +344,56 @@ type ( | |||
} | |||
) | |||
// getDownloadArtifactURL generates download url for each artifact | |||
func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) { | |||
_, runID, ok := ar.validateRunID(ctx) | |||
_, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
artifactID := ctx.ParamsInt64("artifact_id") | |||
artifact, err := actions.GetArtifactByID(ctx, artifactID) | |||
if errors.Is(err, util.ErrNotExist) { | |||
log.Error("Error getting artifact: %v", err) | |||
ctx.Error(http.StatusNotFound, err.Error()) | |||
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath")) | |||
if !validateArtifactHash(ctx, itemPath) { | |||
return | |||
} else if err != nil { | |||
log.Error("Error getting artifact: %v", err) | |||
} | |||
artifacts, err := actions.ListArtifactsByRunIDAndArtifactName(ctx, runID, itemPath) | |||
if err != nil { | |||
log.Error("Error getting artifacts: %v", err) | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
downloadURL := ar.buildArtifactURL(runID, artifact.ID, "download") | |||
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath")) | |||
respData := downloadArtifactResponse{ | |||
Value: []downloadArtifactResponseItem{{ | |||
if len(artifacts) == 0 { | |||
log.Debug("[artifact] getDownloadArtifactURL, no artifacts") | |||
ctx.Error(http.StatusNotFound) | |||
return | |||
} | |||
if itemPath != artifacts[0].ArtifactName { | |||
log.Error("Error dismatch artifact name, itemPath: %v, artifact: %v", itemPath, artifacts[0].ArtifactName) | |||
ctx.Error(http.StatusBadRequest, "Error dismatch artifact name") | |||
return | |||
} | |||
var items []downloadArtifactResponseItem | |||
for _, artifact := range artifacts { | |||
downloadURL := ar.buildArtifactURL(runID, strconv.FormatInt(artifact.ID, 10), "download") | |||
item := downloadArtifactResponseItem{ | |||
Path: util.PathJoinRel(itemPath, artifact.ArtifactPath), | |||
ItemType: "file", | |||
ContentLocation: downloadURL, | |||
}}, | |||
} | |||
log.Debug("[artifact] getDownloadArtifactURL, path: %s, url: %s", item.Path, item.ContentLocation) | |||
items = append(items, item) | |||
} | |||
respData := downloadArtifactResponse{ | |||
Value: items, | |||
} | |||
ctx.JSON(http.StatusOK, respData) | |||
} | |||
// downloadArtifact downloads artifact content | |||
func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) { | |||
_, runID, ok := ar.validateRunID(ctx) | |||
_, runID, ok := validateRunID(ctx) | |||
if !ok { | |||
return | |||
} | |||
@@ -589,9 +423,11 @@ func (ar artifactRoutes) downloadArtifact(ctx *ArtifactContext) { | |||
} | |||
defer fd.Close() | |||
if strings.HasSuffix(artifact.ArtifactPath, ".gz") { | |||
// if artifact is compressed, set content-encoding header to gzip | |||
if artifact.ContentEncoding == "gzip" { | |||
ctx.Resp.Header().Set("Content-Encoding", "gzip") | |||
} | |||
log.Debug("[artifact] downloadArtifact, name: %s, path: %s, storage: %s, size: %d", artifact.ArtifactName, artifact.ArtifactPath, artifact.StoragePath, artifact.FileSize) | |||
ctx.ServeContent(fd, &context.ServeHeaderOptions{ | |||
Filename: artifact.ArtifactName, | |||
LastModified: artifact.CreatedUnix.AsLocalTime(), |
@@ -0,0 +1,187 @@ | |||
// Copyright 2023 The Gitea Authors. All rights reserved. | |||
// SPDX-License-Identifier: MIT | |||
package actions | |||
import ( | |||
"crypto/md5" | |||
"encoding/base64" | |||
"fmt" | |||
"io" | |||
"sort" | |||
"time" | |||
"code.gitea.io/gitea/models/actions" | |||
"code.gitea.io/gitea/modules/log" | |||
"code.gitea.io/gitea/modules/storage" | |||
) | |||
func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext, | |||
artifact *actions.ActionArtifact, | |||
contentSize, runID int64, | |||
) (int64, error) { | |||
// parse content-range header, format: bytes 0-1023/146515 | |||
contentRange := ctx.Req.Header.Get("Content-Range") | |||
start, end, length := int64(0), int64(0), int64(0) | |||
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil { | |||
return -1, fmt.Errorf("parse content range error: %v", err) | |||
} | |||
// build chunk store path | |||
storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end) | |||
// use io.TeeReader to avoid reading all body to md5 sum. | |||
// it writes data to hasher after reading end | |||
// if hash is not matched, delete the read-end result | |||
hasher := md5.New() | |||
r := io.TeeReader(ctx.Req.Body, hasher) | |||
// save chunk to storage | |||
writtenSize, err := st.Save(storagePath, r, -1) | |||
if err != nil { | |||
return -1, fmt.Errorf("save chunk to storage error: %v", err) | |||
} | |||
// check md5 | |||
reqMd5String := ctx.Req.Header.Get(artifactXActionsResultsMD5Header) | |||
chunkMd5String := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) | |||
log.Info("[artifact] check chunk md5, sum: %s, header: %s", chunkMd5String, reqMd5String) | |||
// if md5 not match, delete the chunk | |||
if reqMd5String != chunkMd5String || writtenSize != contentSize { | |||
if err := st.Delete(storagePath); err != nil { | |||
log.Error("Error deleting chunk: %s, %v", storagePath, err) | |||
} | |||
return -1, fmt.Errorf("md5 not match") | |||
} | |||
log.Info("[artifact] save chunk %s, size: %d, artifact id: %d, start: %d, end: %d", | |||
storagePath, contentSize, artifact.ID, start, end) | |||
// return chunk total size | |||
return length, nil | |||
} | |||
type chunkFileItem struct { | |||
ArtifactID int64 | |||
Start int64 | |||
End int64 | |||
Path string | |||
} | |||
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) { | |||
storageDir := fmt.Sprintf("tmp%d", runID) | |||
var chunks []*chunkFileItem | |||
if err := st.IterateObjects(storageDir, func(path string, obj storage.Object) error { | |||
item := chunkFileItem{Path: path} | |||
if _, err := fmt.Sscanf(path, storageDir+"/%d-%d-%d.chunk", &item.ArtifactID, &item.Start, &item.End); err != nil { | |||
return fmt.Errorf("parse content range error: %v", err) | |||
} | |||
chunks = append(chunks, &item) | |||
return nil | |||
}); err != nil { | |||
return nil, err | |||
} | |||
// chunks group by artifact id | |||
chunksMap := make(map[int64][]*chunkFileItem) | |||
for _, c := range chunks { | |||
chunksMap[c.ArtifactID] = append(chunksMap[c.ArtifactID], c) | |||
} | |||
return chunksMap, nil | |||
} | |||
func mergeChunksForRun(ctx *ArtifactContext, st storage.ObjectStorage, runID int64, artifactName string) error { | |||
// read all db artifacts by name | |||
artifacts, err := actions.ListArtifactsByRunIDAndName(ctx, runID, artifactName) | |||
if err != nil { | |||
return err | |||
} | |||
// read all uploading chunks from storage | |||
chunksMap, err := listChunksByRunID(st, runID) | |||
if err != nil { | |||
return err | |||
} | |||
// range db artifacts to merge chunks | |||
for _, art := range artifacts { | |||
chunks, ok := chunksMap[art.ID] | |||
if !ok { | |||
log.Debug("artifact %d chunks not found", art.ID) | |||
continue | |||
} | |||
if err := mergeChunksForArtifact(ctx, chunks, st, art); err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func mergeChunksForArtifact(ctx *ArtifactContext, chunks []*chunkFileItem, st storage.ObjectStorage, artifact *actions.ActionArtifact) error { | |||
sort.Slice(chunks, func(i, j int) bool { | |||
return chunks[i].Start < chunks[j].Start | |||
}) | |||
allChunks := make([]*chunkFileItem, 0) | |||
startAt := int64(-1) | |||
// check if all chunks are uploaded and in order and clean repeated chunks | |||
for _, c := range chunks { | |||
// startAt is -1 means this is the first chunk | |||
// previous c.ChunkEnd + 1 == c.ChunkStart means this chunk is in order | |||
// StartAt is not -1 and c.ChunkStart is not startAt + 1 means there is a chunk missing | |||
if c.Start == (startAt + 1) { | |||
allChunks = append(allChunks, c) | |||
startAt = c.End | |||
} | |||
} | |||
// if the last chunk.End + 1 is not equal to chunk.ChunkLength, means chunks are not uploaded completely | |||
if startAt+1 != artifact.FileCompressedSize { | |||
log.Debug("[artifact] chunks are not uploaded completely, artifact_id: %d", artifact.ID) | |||
return nil | |||
} | |||
// use multiReader | |||
readers := make([]io.Reader, 0, len(allChunks)) | |||
closeReaders := func() { | |||
for _, r := range readers { | |||
_ = r.(io.Closer).Close() // it guarantees to be io.Closer by the following loop's Open function | |||
} | |||
readers = nil | |||
} | |||
defer closeReaders() | |||
for _, c := range allChunks { | |||
var readCloser io.ReadCloser | |||
var err error | |||
if readCloser, err = st.Open(c.Path); err != nil { | |||
return fmt.Errorf("open chunk error: %v, %s", err, c.Path) | |||
} | |||
readers = append(readers, readCloser) | |||
} | |||
mergedReader := io.MultiReader(readers...) | |||
// if chunk is gzip, use gz as extension | |||
// download-artifact action will use content-encoding header to decide if it should decompress the file | |||
extension := "chunk" | |||
if artifact.ContentEncoding == "gzip" { | |||
extension = "chunk.gz" | |||
} | |||
// save merged file | |||
storagePath := fmt.Sprintf("%d/%d/%d.%s", artifact.RunID%255, artifact.ID%255, time.Now().UnixNano(), extension) | |||
written, err := st.Save(storagePath, mergedReader, -1) | |||
if err != nil { | |||
return fmt.Errorf("save merged file error: %v", err) | |||
} | |||
if written != artifact.FileCompressedSize { | |||
return fmt.Errorf("merged file size is not equal to chunk length") | |||
} | |||
defer func() { | |||
closeReaders() // close before delete | |||
// drop chunks | |||
for _, c := range chunks { | |||
if err := st.Delete(c.Path); err != nil { | |||
log.Warn("Error deleting chunk: %s, %v", c.Path, err) | |||
} | |||
} | |||
}() | |||
// save storage path to artifact | |||
log.Debug("[artifact] merge chunks to artifact: %d, %s", artifact.ID, storagePath) | |||
artifact.StoragePath = storagePath | |||
artifact.Status = actions.ArtifactStatusUploadConfirmed | |||
if err := actions.UpdateArtifactByID(ctx, artifact.ID, artifact); err != nil { | |||
return fmt.Errorf("update artifact error: %v", err) | |||
} | |||
return nil | |||
} |
@@ -0,0 +1,82 @@ | |||
// Copyright 2023 The Gitea Authors. All rights reserved. | |||
// SPDX-License-Identifier: MIT | |||
package actions | |||
import ( | |||
"crypto/md5" | |||
"fmt" | |||
"net/http" | |||
"strconv" | |||
"strings" | |||
"code.gitea.io/gitea/models/actions" | |||
"code.gitea.io/gitea/modules/log" | |||
"code.gitea.io/gitea/modules/util" | |||
) | |||
const ( | |||
artifactXTfsFileLengthHeader = "x-tfs-filelength" | |||
artifactXActionsResultsMD5Header = "x-actions-results-md5" | |||
) | |||
// The rules are from https://github.com/actions/toolkit/blob/main/packages/artifact/src/internal/path-and-artifact-name-validation.ts#L32 | |||
var invalidArtifactNameChars = strings.Join([]string{"\\", "/", "\"", ":", "<", ">", "|", "*", "?", "\r", "\n"}, "") | |||
func validateArtifactName(ctx *ArtifactContext, artifactName string) bool { | |||
if strings.ContainsAny(artifactName, invalidArtifactNameChars) { | |||
log.Error("Error checking artifact name contains invalid character") | |||
ctx.Error(http.StatusBadRequest, "Error checking artifact name contains invalid character") | |||
return false | |||
} | |||
return true | |||
} | |||
func validateRunID(ctx *ArtifactContext) (*actions.ActionTask, int64, bool) { | |||
task := ctx.ActionTask | |||
runID := ctx.ParamsInt64("run_id") | |||
if task.Job.RunID != runID { | |||
log.Error("Error runID not match") | |||
ctx.Error(http.StatusBadRequest, "run-id does not match") | |||
return nil, 0, false | |||
} | |||
return task, runID, true | |||
} | |||
func validateArtifactHash(ctx *ArtifactContext, artifactName string) bool { | |||
paramHash := ctx.Params("artifact_hash") | |||
// use artifact name to create upload url | |||
artifactHash := fmt.Sprintf("%x", md5.Sum([]byte(artifactName))) | |||
if paramHash == artifactHash { | |||
return true | |||
} | |||
log.Error("Invalid artifact hash: %s", paramHash) | |||
ctx.Error(http.StatusBadRequest, "Invalid artifact hash") | |||
return false | |||
} | |||
func parseArtifactItemPath(ctx *ArtifactContext) (string, string, bool) { | |||
// itemPath is generated from upload-artifact action | |||
// it's formatted as {artifact_name}/{artfict_path_in_runner} | |||
itemPath := util.PathJoinRel(ctx.Req.URL.Query().Get("itemPath")) | |||
artifactName := strings.Split(itemPath, "/")[0] | |||
artifactPath := strings.TrimPrefix(itemPath, artifactName+"/") | |||
if !validateArtifactHash(ctx, artifactName) { | |||
return "", "", false | |||
} | |||
if !validateArtifactName(ctx, artifactName) { | |||
return "", "", false | |||
} | |||
return artifactName, artifactPath, true | |||
} | |||
// getUploadFileSize returns the size of the file to be uploaded. | |||
// The raw size is the size of the file as reported by the header X-TFS-FileLength. | |||
func getUploadFileSize(ctx *ArtifactContext) (int64, int64, error) { | |||
contentLength := ctx.Req.ContentLength | |||
xTfsLength, _ := strconv.ParseInt(ctx.Req.Header.Get(artifactXTfsFileLengthHeader), 10, 64) | |||
if xTfsLength > 0 { | |||
return xTfsLength, contentLength, nil | |||
} | |||
return contentLength, contentLength, nil | |||
} |
@@ -4,10 +4,14 @@ | |||
package actions | |||
import ( | |||
"archive/zip" | |||
"compress/gzip" | |||
"context" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"net/http" | |||
"net/url" | |||
"strings" | |||
"time" | |||
@@ -479,7 +483,6 @@ type ArtifactsViewResponse struct { | |||
type ArtifactsViewItem struct { | |||
Name string `json:"name"` | |||
Size int64 `json:"size"` | |||
ID int64 `json:"id"` | |||
} | |||
func ArtifactsView(ctx *context_module.Context) { | |||
@@ -493,7 +496,7 @@ func ArtifactsView(ctx *context_module.Context) { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
artifacts, err := actions_model.ListUploadedArtifactsByRunID(ctx, run.ID) | |||
artifacts, err := actions_model.ListUploadedArtifactsMeta(ctx, run.ID) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
@@ -505,7 +508,6 @@ func ArtifactsView(ctx *context_module.Context) { | |||
artifactsResponse.Artifacts = append(artifactsResponse.Artifacts, &ArtifactsViewItem{ | |||
Name: art.ArtifactName, | |||
Size: art.FileSize, | |||
ID: art.ID, | |||
}) | |||
} | |||
ctx.JSON(http.StatusOK, artifactsResponse) | |||
@@ -513,15 +515,8 @@ func ArtifactsView(ctx *context_module.Context) { | |||
func ArtifactsDownloadView(ctx *context_module.Context) { | |||
runIndex := ctx.ParamsInt64("run") | |||
artifactID := ctx.ParamsInt64("id") | |||
artifactName := ctx.Params("artifact_name") | |||
artifact, err := actions_model.GetArtifactByID(ctx, artifactID) | |||
if errors.Is(err, util.ErrNotExist) { | |||
ctx.Error(http.StatusNotFound, err.Error()) | |||
} else if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
run, err := actions_model.GetRunByIndex(ctx, ctx.Repo.Repository.ID, runIndex) | |||
if err != nil { | |||
if errors.Is(err, util.ErrNotExist) { | |||
@@ -531,20 +526,49 @@ func ArtifactsDownloadView(ctx *context_module.Context) { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
if artifact.RunID != run.ID { | |||
ctx.Error(http.StatusNotFound, "artifact not found") | |||
return | |||
} | |||
f, err := storage.ActionsArtifacts.Open(artifact.StoragePath) | |||
artifacts, err := actions_model.ListArtifactsByRunIDAndName(ctx, run.ID, artifactName) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
defer f.Close() | |||
if len(artifacts) == 0 { | |||
ctx.Error(http.StatusNotFound, "artifact not found") | |||
return | |||
} | |||
ctx.ServeContent(f, &context_module.ServeHeaderOptions{ | |||
Filename: artifact.ArtifactName, | |||
LastModified: artifact.CreatedUnix.AsLocalTime(), | |||
}) | |||
ctx.Resp.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s.zip; filename*=UTF-8''%s.zip", url.PathEscape(artifactName), artifactName)) | |||
writer := zip.NewWriter(ctx.Resp) | |||
defer writer.Close() | |||
for _, art := range artifacts { | |||
f, err := storage.ActionsArtifacts.Open(art.StoragePath) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
var r io.ReadCloser | |||
if art.ContentEncoding == "gzip" { | |||
r, err = gzip.NewReader(f) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
} else { | |||
r = f | |||
} | |||
defer r.Close() | |||
w, err := writer.Create(art.ArtifactPath) | |||
if err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
if _, err := io.Copy(w, r); err != nil { | |||
ctx.Error(http.StatusInternalServerError, err.Error()) | |||
return | |||
} | |||
} | |||
} |
@@ -1210,7 +1210,7 @@ func registerRoutes(m *web.Route) { | |||
m.Post("/cancel", reqRepoActionsWriter, actions.Cancel) | |||
m.Post("/approve", reqRepoActionsWriter, actions.Approve) | |||
m.Post("/artifacts", actions.ArtifactsView) | |||
m.Get("/artifacts/{id}", actions.ArtifactsDownloadView) | |||
m.Get("/artifacts/{artifact_name}", actions.ArtifactsDownloadView) | |||
m.Post("/rerun", reqRepoActionsWriter, actions.RerunAll) | |||
}) | |||
}, reqRepoActionsReader, actions.MustEnableActions) |
@@ -13,17 +13,17 @@ import ( | |||
"github.com/stretchr/testify/assert" | |||
) | |||
func TestActionsArtifactUpload(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
type uploadArtifactResponse struct { | |||
FileContainerResourceURL string `json:"fileContainerResourceUrl"` | |||
} | |||
type uploadArtifactResponse struct { | |||
FileContainerResourceURL string `json:"fileContainerResourceUrl"` | |||
} | |||
type getUploadArtifactRequest struct { | |||
Type string | |||
Name string | |||
} | |||
type getUploadArtifactRequest struct { | |||
Type string | |||
Name string | |||
} | |||
func TestActionsArtifactUploadSingleFile(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
// acquire artifact upload url | |||
req := NewRequestWithJSON(t, "POST", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts", getUploadArtifactRequest{ | |||
@@ -52,32 +52,33 @@ func TestActionsArtifactUpload(t *testing.T) { | |||
t.Logf("Create artifact confirm") | |||
// confirm artifact upload | |||
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName=artifact") | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
MakeRequest(t, req, http.StatusOK) | |||
} | |||
func TestActionsArtifactUploadNotExist(t *testing.T) { | |||
func TestActionsArtifactUploadInvalidHash(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
// artifact id 54321 not exist | |||
url := "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts/54321/upload?itemPath=artifact/abc.txt" | |||
url := "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts/8e5b948a454515dbabfc7eb718ddddddd/upload?itemPath=artifact/abc.txt" | |||
body := strings.Repeat("A", 1024) | |||
req := NewRequestWithBody(t, "PUT", url, strings.NewReader(body)) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
req.Header.Add("Content-Range", "bytes 0-1023/1024") | |||
req.Header.Add("x-tfs-filelength", "1024") | |||
req.Header.Add("x-actions-results-md5", "1HsSe8LeLWh93ILaw1TEFQ==") // base64(md5(body)) | |||
MakeRequest(t, req, http.StatusNotFound) | |||
resp := MakeRequest(t, req, http.StatusBadRequest) | |||
assert.Contains(t, resp.Body.String(), "Invalid artifact hash") | |||
} | |||
func TestActionsArtifactConfirmUpload(t *testing.T) { | |||
func TestActionsArtifactConfirmUploadWithoutName(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
req := NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp := MakeRequest(t, req, http.StatusOK) | |||
assert.Contains(t, resp.Body.String(), "success") | |||
resp := MakeRequest(t, req, http.StatusBadRequest) | |||
assert.Contains(t, resp.Body.String(), "artifact name is empty") | |||
} | |||
func TestActionsArtifactUploadWithoutToken(t *testing.T) { | |||
@@ -87,20 +88,28 @@ func TestActionsArtifactUploadWithoutToken(t *testing.T) { | |||
MakeRequest(t, req, http.StatusUnauthorized) | |||
} | |||
type ( | |||
listArtifactsResponseItem struct { | |||
Name string `json:"name"` | |||
FileContainerResourceURL string `json:"fileContainerResourceUrl"` | |||
} | |||
listArtifactsResponse struct { | |||
Count int64 `json:"count"` | |||
Value []listArtifactsResponseItem `json:"value"` | |||
} | |||
downloadArtifactResponseItem struct { | |||
Path string `json:"path"` | |||
ItemType string `json:"itemType"` | |||
ContentLocation string `json:"contentLocation"` | |||
} | |||
downloadArtifactResponse struct { | |||
Value []downloadArtifactResponseItem `json:"value"` | |||
} | |||
) | |||
func TestActionsArtifactDownload(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
type ( | |||
listArtifactsResponseItem struct { | |||
Name string `json:"name"` | |||
FileContainerResourceURL string `json:"fileContainerResourceUrl"` | |||
} | |||
listArtifactsResponse struct { | |||
Count int64 `json:"count"` | |||
Value []listArtifactsResponseItem `json:"value"` | |||
} | |||
) | |||
req := NewRequest(t, "GET", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp := MakeRequest(t, req, http.StatusOK) | |||
@@ -110,19 +119,8 @@ func TestActionsArtifactDownload(t *testing.T) { | |||
assert.Equal(t, "artifact", listResp.Value[0].Name) | |||
assert.Contains(t, listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
type ( | |||
downloadArtifactResponseItem struct { | |||
Path string `json:"path"` | |||
ItemType string `json:"itemType"` | |||
ContentLocation string `json:"contentLocation"` | |||
} | |||
downloadArtifactResponse struct { | |||
Value []downloadArtifactResponseItem `json:"value"` | |||
} | |||
) | |||
idx := strings.Index(listResp.Value[0].FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/") | |||
url := listResp.Value[0].FileContainerResourceURL[idx+1:] | |||
url := listResp.Value[0].FileContainerResourceURL[idx+1:] + "?itemPath=artifact" | |||
req = NewRequest(t, "GET", url) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp = MakeRequest(t, req, http.StatusOK) | |||
@@ -141,3 +139,116 @@ func TestActionsArtifactDownload(t *testing.T) { | |||
body := strings.Repeat("A", 1024) | |||
assert.Equal(t, resp.Body.String(), body) | |||
} | |||
func TestActionsArtifactUploadMultipleFile(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
const testArtifactName = "multi-files" | |||
// acquire artifact upload url | |||
req := NewRequestWithJSON(t, "POST", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts", getUploadArtifactRequest{ | |||
Type: "actions_storage", | |||
Name: testArtifactName, | |||
}) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp := MakeRequest(t, req, http.StatusOK) | |||
var uploadResp uploadArtifactResponse | |||
DecodeJSON(t, resp, &uploadResp) | |||
assert.Contains(t, uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
type uploadingFile struct { | |||
Path string | |||
Content string | |||
MD5 string | |||
} | |||
files := []uploadingFile{ | |||
{ | |||
Path: "abc.txt", | |||
Content: strings.Repeat("A", 1024), | |||
MD5: "1HsSe8LeLWh93ILaw1TEFQ==", | |||
}, | |||
{ | |||
Path: "xyz/def.txt", | |||
Content: strings.Repeat("B", 1024), | |||
MD5: "6fgADK/7zjadf+6cB9Q1CQ==", | |||
}, | |||
} | |||
for _, f := range files { | |||
// get upload url | |||
idx := strings.Index(uploadResp.FileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/") | |||
url := uploadResp.FileContainerResourceURL[idx:] + "?itemPath=" + testArtifactName + "/" + f.Path | |||
// upload artifact chunk | |||
req = NewRequestWithBody(t, "PUT", url, strings.NewReader(f.Content)) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
req.Header.Add("Content-Range", "bytes 0-1023/1024") | |||
req.Header.Add("x-tfs-filelength", "1024") | |||
req.Header.Add("x-actions-results-md5", f.MD5) // base64(md5(body)) | |||
MakeRequest(t, req, http.StatusOK) | |||
} | |||
t.Logf("Create artifact confirm") | |||
// confirm artifact upload | |||
req = NewRequest(t, "PATCH", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts?artifactName="+testArtifactName) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
MakeRequest(t, req, http.StatusOK) | |||
} | |||
func TestActionsArtifactDownloadMultiFiles(t *testing.T) { | |||
defer tests.PrepareTestEnv(t)() | |||
const testArtifactName = "multi-files" | |||
req := NewRequest(t, "GET", "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp := MakeRequest(t, req, http.StatusOK) | |||
var listResp listArtifactsResponse | |||
DecodeJSON(t, resp, &listResp) | |||
assert.Equal(t, int64(2), listResp.Count) | |||
var fileContainerResourceURL string | |||
for _, v := range listResp.Value { | |||
if v.Name == testArtifactName { | |||
fileContainerResourceURL = v.FileContainerResourceURL | |||
break | |||
} | |||
} | |||
assert.Contains(t, fileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
idx := strings.Index(fileContainerResourceURL, "/api/actions_pipeline/_apis/pipelines/") | |||
url := fileContainerResourceURL[idx+1:] + "?itemPath=" + testArtifactName | |||
req = NewRequest(t, "GET", url) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp = MakeRequest(t, req, http.StatusOK) | |||
var downloadResp downloadArtifactResponse | |||
DecodeJSON(t, resp, &downloadResp) | |||
assert.Len(t, downloadResp.Value, 2) | |||
downloads := [][]string{{"multi-files/abc.txt", "A"}, {"multi-files/xyz/def.txt", "B"}} | |||
for _, v := range downloadResp.Value { | |||
var bodyChar string | |||
var path string | |||
for _, d := range downloads { | |||
if v.Path == d[0] { | |||
path = d[0] | |||
bodyChar = d[1] | |||
break | |||
} | |||
} | |||
value := v | |||
assert.Equal(t, path, value.Path) | |||
assert.Equal(t, "file", value.ItemType) | |||
assert.Contains(t, value.ContentLocation, "/api/actions_pipeline/_apis/pipelines/workflows/791/artifacts") | |||
idx = strings.Index(value.ContentLocation, "/api/actions_pipeline/_apis/pipelines/") | |||
url = value.ContentLocation[idx:] | |||
req = NewRequest(t, "GET", url) | |||
req = addTokenAuthHeader(req, "Bearer 8061e833a55f6fc0157c98b883e91fcfeeb1a71a") | |||
resp = MakeRequest(t, req, http.StatusOK) | |||
body := strings.Repeat(bodyChar, 1024) | |||
assert.Equal(t, resp.Body.String(), body) | |||
} | |||
} |
@@ -49,8 +49,8 @@ | |||
{{ locale.artifactsTitle }} | |||
</div> | |||
<ul class="job-artifacts-list"> | |||
<li class="job-artifacts-item" v-for="artifact in artifacts" :key="artifact.id"> | |||
<a class="job-artifacts-link" target="_blank" :href="run.link+'/artifacts/'+artifact.id"> | |||
<li class="job-artifacts-item" v-for="artifact in artifacts" :key="artifact.name"> | |||
<a class="job-artifacts-link" target="_blank" :href="run.link+'/artifacts/'+artifact.name"> | |||
<SvgIcon name="octicon-file" class="ui text black job-artifacts-icon"/>{{ artifact.name }} | |||
</a> | |||
</li> |