aboutsummaryrefslogtreecommitdiffstats
path: root/modules
diff options
context:
space:
mode:
authorLunny Xiao <xiaolunwen@gmail.com>2024-05-30 15:33:50 +0800
committerGitHub <noreply@github.com>2024-05-30 07:33:50 +0000
commitfb7b743bd0f305a6462896398bcba2a74c6e391e (patch)
treeab821590da64878c10c369fea6c6ca9221d04085 /modules
parent015efcd8bfd451ef593192eb43cfcfb7001f7861 (diff)
downloadgitea-fb7b743bd0f305a6462896398bcba2a74c6e391e.tar.gz
gitea-fb7b743bd0f305a6462896398bcba2a74c6e391e.zip
Azure blob storage support (#30995)
This PR implemented object storages(LFS/Packages/Attachments and etc.) for Azure Blob Storage. It depends on azure official golang SDK and can support both the azure blob storage cloud service and azurite mock server. Replace #25458 Fix #22527 - [x] CI Tests - [x] integration test, MSSQL integration tests will now based on azureblob - [x] unit test - [x] CLI Migrate Storage - [x] Documentation for configuration added ------ TODO (other PRs): - [ ] Improve performance of `blob download`. --------- Co-authored-by: yp05327 <576951401@qq.com>
Diffstat (limited to 'modules')
-rw-r--r--modules/packages/content_store.go2
-rw-r--r--modules/setting/storage.go90
-rw-r--r--modules/setting/storage_test.go112
-rw-r--r--modules/storage/azureblob.go322
-rw-r--r--modules/storage/azureblob_test.go56
-rw-r--r--modules/storage/minio_test.go2
-rw-r--r--modules/storage/storage_test.go1
-rw-r--r--modules/util/io.go21
8 files changed, 593 insertions, 13 deletions
diff --git a/modules/packages/content_store.go b/modules/packages/content_store.go
index da93e6cf6b..2108be64d2 100644
--- a/modules/packages/content_store.go
+++ b/modules/packages/content_store.go
@@ -34,7 +34,7 @@ func (s *ContentStore) Get(key BlobHash256Key) (storage.Object, error) {
}
func (s *ContentStore) ShouldServeDirect() bool {
- return setting.Packages.Storage.MinioConfig.ServeDirect
+ return setting.Packages.Storage.ServeDirect()
}
func (s *ContentStore) GetServeDirectURL(key BlobHash256Key, filename string) (*url.URL, error) {
diff --git a/modules/setting/storage.go b/modules/setting/storage.go
index d80a61a45e..d44c968423 100644
--- a/modules/setting/storage.go
+++ b/modules/setting/storage.go
@@ -18,11 +18,14 @@ const (
LocalStorageType StorageType = "local"
// MinioStorageType is the type descriptor for minio storage
MinioStorageType StorageType = "minio"
+ // AzureBlobStorageType is the type descriptor for azure blob storage
+ AzureBlobStorageType StorageType = "azureblob"
)
var storageTypes = []StorageType{
LocalStorageType,
MinioStorageType,
+ AzureBlobStorageType,
}
// IsValidStorageType returns true if the given storage type is valid
@@ -50,25 +53,55 @@ type MinioStorageConfig struct {
BucketLookUpType string `ini:"MINIO_BUCKET_LOOKUP_TYPE" json:",omitempty"`
}
+func (cfg *MinioStorageConfig) ToShadow() {
+ if cfg.AccessKeyID != "" {
+ cfg.AccessKeyID = "******"
+ }
+ if cfg.SecretAccessKey != "" {
+ cfg.SecretAccessKey = "******"
+ }
+}
+
+// MinioStorageConfig represents the configuration for a minio storage
+type AzureBlobStorageConfig struct {
+ Endpoint string `ini:"AZURE_BLOB_ENDPOINT" json:",omitempty"`
+ AccountName string `ini:"AZURE_BLOB_ACCOUNT_NAME" json:",omitempty"`
+ AccountKey string `ini:"AZURE_BLOB_ACCOUNT_KEY" json:",omitempty"`
+ Container string `ini:"AZURE_BLOB_CONTAINER" json:",omitempty"`
+ BasePath string `ini:"AZURE_BLOB_BASE_PATH" json:",omitempty"`
+ ServeDirect bool `ini:"SERVE_DIRECT"`
+}
+
+func (cfg *AzureBlobStorageConfig) ToShadow() {
+ if cfg.AccountKey != "" {
+ cfg.AccountKey = "******"
+ }
+ if cfg.AccountName != "" {
+ cfg.AccountName = "******"
+ }
+}
+
// Storage represents configuration of storages
type Storage struct {
- Type StorageType // local or minio
- Path string `json:",omitempty"` // for local type
- TemporaryPath string `json:",omitempty"`
- MinioConfig MinioStorageConfig // for minio type
+ Type StorageType // local or minio or azureblob
+ Path string `json:",omitempty"` // for local type
+ TemporaryPath string `json:",omitempty"`
+ MinioConfig MinioStorageConfig // for minio type
+ AzureBlobConfig AzureBlobStorageConfig // for azureblob type
}
func (storage *Storage) ToShadowCopy() Storage {
shadowStorage := *storage
- if shadowStorage.MinioConfig.AccessKeyID != "" {
- shadowStorage.MinioConfig.AccessKeyID = "******"
- }
- if shadowStorage.MinioConfig.SecretAccessKey != "" {
- shadowStorage.MinioConfig.SecretAccessKey = "******"
- }
+ shadowStorage.MinioConfig.ToShadow()
+ shadowStorage.AzureBlobConfig.ToShadow()
return shadowStorage
}
+func (storage *Storage) ServeDirect() bool {
+ return (storage.Type == MinioStorageType && storage.MinioConfig.ServeDirect) ||
+ (storage.Type == AzureBlobStorageType && storage.AzureBlobConfig.ServeDirect)
+}
+
const storageSectionName = "storage"
func getDefaultStorageSection(rootCfg ConfigProvider) ConfigSection {
@@ -84,6 +117,10 @@ func getDefaultStorageSection(rootCfg ConfigProvider) ConfigSection {
storageSec.Key("MINIO_INSECURE_SKIP_VERIFY").MustBool(false)
storageSec.Key("MINIO_CHECKSUM_ALGORITHM").MustString("default")
storageSec.Key("MINIO_BUCKET_LOOKUP_TYPE").MustString("auto")
+ storageSec.Key("AZURE_BLOB_ENDPOINT").MustString("")
+ storageSec.Key("AZURE_BLOB_ACCOUNT_NAME").MustString("")
+ storageSec.Key("AZURE_BLOB_ACCOUNT_KEY").MustString("")
+ storageSec.Key("AZURE_BLOB_CONTAINER").MustString("gitea")
return storageSec
}
@@ -107,6 +144,8 @@ func getStorage(rootCfg ConfigProvider, name, typ string, sec ConfigSection) (*S
return getStorageForLocal(targetSec, overrideSec, tp, name)
case string(MinioStorageType):
return getStorageForMinio(targetSec, overrideSec, tp, name)
+ case string(AzureBlobStorageType):
+ return getStorageForAzureBlob(targetSec, overrideSec, tp, name)
default:
return nil, fmt.Errorf("unsupported storage type %q", targetType)
}
@@ -247,7 +286,7 @@ func getStorageForLocal(targetSec, overrideSec ConfigSection, tp targetSecType,
return &storage, nil
}
-func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) {
+func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) { //nolint:dupl
var storage Storage
storage.Type = StorageType(targetSec.Key("STORAGE_TYPE").String())
if err := targetSec.MapTo(&storage.MinioConfig); err != nil {
@@ -275,3 +314,32 @@ func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType,
}
return &storage, nil
}
+
+func getStorageForAzureBlob(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) { //nolint:dupl
+ var storage Storage
+ storage.Type = StorageType(targetSec.Key("STORAGE_TYPE").String())
+ if err := targetSec.MapTo(&storage.AzureBlobConfig); err != nil {
+ return nil, fmt.Errorf("map azure blob config failed: %v", err)
+ }
+
+ var defaultPath string
+ if storage.AzureBlobConfig.BasePath != "" {
+ if tp == targetSecIsStorage || tp == targetSecIsDefault {
+ defaultPath = strings.TrimSuffix(storage.AzureBlobConfig.BasePath, "/") + "/" + name + "/"
+ } else {
+ defaultPath = storage.AzureBlobConfig.BasePath
+ }
+ }
+ if defaultPath == "" {
+ defaultPath = name + "/"
+ }
+
+ if overrideSec != nil {
+ storage.AzureBlobConfig.ServeDirect = ConfigSectionKeyBool(overrideSec, "SERVE_DIRECT", storage.AzureBlobConfig.ServeDirect)
+ storage.AzureBlobConfig.BasePath = ConfigSectionKeyString(overrideSec, "AZURE_BLOB_BASE_PATH", defaultPath)
+ storage.AzureBlobConfig.Container = ConfigSectionKeyString(overrideSec, "AZURE_BLOB_CONTAINER", storage.AzureBlobConfig.Container)
+ } else {
+ storage.AzureBlobConfig.BasePath = defaultPath
+ }
+ return &storage, nil
+}
diff --git a/modules/setting/storage_test.go b/modules/setting/storage_test.go
index 6f38bf1d55..44a5de6826 100644
--- a/modules/setting/storage_test.go
+++ b/modules/setting/storage_test.go
@@ -97,6 +97,44 @@ STORAGE_TYPE = minio
assert.EqualValues(t, "repo-avatars/", RepoAvatar.Storage.MinioConfig.BasePath)
}
+func Test_getStorageInheritStorageTypeAzureBlob(t *testing.T) {
+ iniStr := `
+[storage]
+STORAGE_TYPE = azureblob
+`
+ cfg, err := NewConfigProviderFromData(iniStr)
+ assert.NoError(t, err)
+
+ assert.NoError(t, loadPackagesFrom(cfg))
+ assert.EqualValues(t, "azureblob", Packages.Storage.Type)
+ assert.EqualValues(t, "gitea", Packages.Storage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "packages/", Packages.Storage.AzureBlobConfig.BasePath)
+
+ assert.NoError(t, loadRepoArchiveFrom(cfg))
+ assert.EqualValues(t, "azureblob", RepoArchive.Storage.Type)
+ assert.EqualValues(t, "gitea", RepoArchive.Storage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
+
+ assert.NoError(t, loadActionsFrom(cfg))
+ assert.EqualValues(t, "azureblob", Actions.LogStorage.Type)
+ assert.EqualValues(t, "gitea", Actions.LogStorage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "actions_log/", Actions.LogStorage.AzureBlobConfig.BasePath)
+
+ assert.EqualValues(t, "azureblob", Actions.ArtifactStorage.Type)
+ assert.EqualValues(t, "gitea", Actions.ArtifactStorage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "actions_artifacts/", Actions.ArtifactStorage.AzureBlobConfig.BasePath)
+
+ assert.NoError(t, loadAvatarsFrom(cfg))
+ assert.EqualValues(t, "azureblob", Avatar.Storage.Type)
+ assert.EqualValues(t, "gitea", Avatar.Storage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "avatars/", Avatar.Storage.AzureBlobConfig.BasePath)
+
+ assert.NoError(t, loadRepoAvatarFrom(cfg))
+ assert.EqualValues(t, "azureblob", RepoAvatar.Storage.Type)
+ assert.EqualValues(t, "gitea", RepoAvatar.Storage.AzureBlobConfig.Container)
+ assert.EqualValues(t, "repo-avatars/", RepoAvatar.Storage.AzureBlobConfig.BasePath)
+}
+
type testLocalStoragePathCase struct {
loader func(rootCfg ConfigProvider) error
storagePtr **Storage
@@ -465,3 +503,77 @@ MINIO_BASE_PATH = /lfs
assert.EqualValues(t, true, LFS.Storage.MinioConfig.UseSSL)
assert.EqualValues(t, "/lfs", LFS.Storage.MinioConfig.BasePath)
}
+
+func Test_getStorageConfiguration29(t *testing.T) {
+ cfg, err := NewConfigProviderFromData(`
+[repo-archive]
+STORAGE_TYPE = azureblob
+AZURE_BLOB_ACCOUNT_NAME = my_account_name
+AZURE_BLOB_ACCOUNT_KEY = my_account_key
+`)
+ assert.NoError(t, err)
+ // assert.Error(t, loadRepoArchiveFrom(cfg))
+ // FIXME: this should return error but now ini package's MapTo() doesn't check type
+ assert.NoError(t, loadRepoArchiveFrom(cfg))
+}
+
+func Test_getStorageConfiguration30(t *testing.T) {
+ cfg, err := NewConfigProviderFromData(`
+[storage.repo-archive]
+STORAGE_TYPE = azureblob
+AZURE_BLOB_ACCOUNT_NAME = my_account_name
+AZURE_BLOB_ACCOUNT_KEY = my_account_key
+`)
+ assert.NoError(t, err)
+ assert.NoError(t, loadRepoArchiveFrom(cfg))
+ assert.EqualValues(t, "my_account_name", RepoArchive.Storage.AzureBlobConfig.AccountName)
+ assert.EqualValues(t, "my_account_key", RepoArchive.Storage.AzureBlobConfig.AccountKey)
+ assert.EqualValues(t, "repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
+}
+
+func Test_getStorageConfiguration31(t *testing.T) {
+ cfg, err := NewConfigProviderFromData(`
+[storage]
+STORAGE_TYPE = azureblob
+AZURE_BLOB_ACCOUNT_NAME = my_account_name
+AZURE_BLOB_ACCOUNT_KEY = my_account_key
+AZURE_BLOB_BASE_PATH = /prefix
+`)
+ assert.NoError(t, err)
+ assert.NoError(t, loadRepoArchiveFrom(cfg))
+ assert.EqualValues(t, "my_account_name", RepoArchive.Storage.AzureBlobConfig.AccountName)
+ assert.EqualValues(t, "my_account_key", RepoArchive.Storage.AzureBlobConfig.AccountKey)
+ assert.EqualValues(t, "/prefix/repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
+
+ cfg, err = NewConfigProviderFromData(`
+[storage]
+STORAGE_TYPE = azureblob
+AZURE_BLOB_ACCOUNT_NAME = my_account_name
+AZURE_BLOB_ACCOUNT_KEY = my_account_key
+AZURE_BLOB_BASE_PATH = /prefix
+
+[lfs]
+AZURE_BLOB_BASE_PATH = /lfs
+`)
+ assert.NoError(t, err)
+ assert.NoError(t, loadLFSFrom(cfg))
+ assert.EqualValues(t, "my_account_name", LFS.Storage.AzureBlobConfig.AccountName)
+ assert.EqualValues(t, "my_account_key", LFS.Storage.AzureBlobConfig.AccountKey)
+ assert.EqualValues(t, "/lfs", LFS.Storage.AzureBlobConfig.BasePath)
+
+ cfg, err = NewConfigProviderFromData(`
+[storage]
+STORAGE_TYPE = azureblob
+AZURE_BLOB_ACCOUNT_NAME = my_account_name
+AZURE_BLOB_ACCOUNT_KEY = my_account_key
+AZURE_BLOB_BASE_PATH = /prefix
+
+[storage.lfs]
+AZURE_BLOB_BASE_PATH = /lfs
+`)
+ assert.NoError(t, err)
+ assert.NoError(t, loadLFSFrom(cfg))
+ assert.EqualValues(t, "my_account_name", LFS.Storage.AzureBlobConfig.AccountName)
+ assert.EqualValues(t, "my_account_key", LFS.Storage.AzureBlobConfig.AccountKey)
+ assert.EqualValues(t, "/lfs", LFS.Storage.AzureBlobConfig.BasePath)
+}
diff --git a/modules/storage/azureblob.go b/modules/storage/azureblob.go
new file mode 100644
index 0000000000..52a7d1637e
--- /dev/null
+++ b/modules/storage/azureblob.go
@@ -0,0 +1,322 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
+)
+
+var _ Object = &azureBlobObject{}
+
+type azureBlobObject struct {
+ blobClient *blob.Client
+ Context context.Context
+ Name string
+ Size int64
+ ModTime *time.Time
+ offset int64
+}
+
+func (a *azureBlobObject) Read(p []byte) (int, error) {
+ // TODO: improve the performance, we can implement another interface, maybe implement io.WriteTo
+ if a.offset >= a.Size {
+ return 0, io.EOF
+ }
+ count := min(int64(len(p)), a.Size-a.offset)
+
+ res, err := a.blobClient.DownloadBuffer(a.Context, p, &blob.DownloadBufferOptions{
+ Range: blob.HTTPRange{
+ Offset: a.offset,
+ Count: count,
+ },
+ })
+ if err != nil {
+ return 0, convertAzureBlobErr(err)
+ }
+ a.offset += res
+
+ return int(res), nil
+}
+
+func (a *azureBlobObject) Close() error {
+ a.offset = 0
+ return nil
+}
+
+func (a *azureBlobObject) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case io.SeekStart:
+ case io.SeekCurrent:
+ offset += a.offset
+ case io.SeekEnd:
+ offset = a.Size - offset
+ default:
+ return 0, errors.New("Seek: invalid whence")
+ }
+
+ if offset > a.Size {
+ return 0, errors.New("Seek: invalid offset")
+ } else if offset < 0 {
+ return 0, errors.New("Seek: invalid offset")
+ }
+ a.offset = offset
+ return a.offset, nil
+}
+
+func (a *azureBlobObject) Stat() (os.FileInfo, error) {
+ return &azureBlobFileInfo{
+ a.Name,
+ a.Size,
+ *a.ModTime,
+ }, nil
+}
+
+var _ ObjectStorage = &AzureBlobStorage{}
+
+// AzureStorage returns a azure blob storage
+type AzureBlobStorage struct {
+ cfg *setting.AzureBlobStorageConfig
+ ctx context.Context
+ credential *azblob.SharedKeyCredential
+ client *azblob.Client
+}
+
+func convertAzureBlobErr(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if bloberror.HasCode(err, bloberror.BlobNotFound) {
+ return os.ErrNotExist
+ }
+ var respErr *azcore.ResponseError
+ if !errors.As(err, &respErr) {
+ return err
+ }
+ return fmt.Errorf(respErr.ErrorCode)
+}
+
+// NewAzureBlobStorage returns a azure blob storage
+func NewAzureBlobStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
+ config := cfg.AzureBlobConfig
+
+ log.Info("Creating Azure Blob storage at %s:%s with base path %s", config.Endpoint, config.Container, config.BasePath)
+
+ cred, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+ client, err := azblob.NewClientWithSharedKeyCredential(config.Endpoint, cred, &azblob.ClientOptions{})
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+
+ _, err = client.CreateContainer(ctx, config.Container, &container.CreateOptions{})
+ if err != nil {
+ // Check to see if we already own this container (which happens if you run this twice)
+ if !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) {
+ return nil, convertMinioErr(err)
+ }
+ }
+
+ return &AzureBlobStorage{
+ cfg: &config,
+ ctx: ctx,
+ credential: cred,
+ client: client,
+ }, nil
+}
+
+func (a *AzureBlobStorage) buildAzureBlobPath(p string) string {
+ p = util.PathJoinRelX(a.cfg.BasePath, p)
+ if p == "." || p == "/" {
+ p = "" // azure uses prefix, so path should be empty as relative path
+ }
+ return p
+}
+
+func (a *AzureBlobStorage) getObjectNameFromPath(path string) string {
+ s := strings.Split(path, "/")
+ return s[len(s)-1]
+}
+
+// Open opens a file
+func (a *AzureBlobStorage) Open(path string) (Object, error) {
+ blobClient, err := a.getBlobClient(path)
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+ res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+ return &azureBlobObject{
+ Context: a.ctx,
+ blobClient: blobClient,
+ Name: a.getObjectNameFromPath(path),
+ Size: *res.ContentLength,
+ ModTime: res.LastModified,
+ }, nil
+}
+
+// Save saves a file to azure blob storage
+func (a *AzureBlobStorage) Save(path string, r io.Reader, size int64) (int64, error) {
+ rd := util.NewCountingReader(r)
+ _, err := a.client.UploadStream(
+ a.ctx,
+ a.cfg.Container,
+ a.buildAzureBlobPath(path),
+ rd,
+ // TODO: support set block size and concurrency
+ &blockblob.UploadStreamOptions{},
+ )
+ if err != nil {
+ return 0, convertAzureBlobErr(err)
+ }
+ return int64(rd.Count()), nil
+}
+
+type azureBlobFileInfo struct {
+ name string
+ size int64
+ modTime time.Time
+}
+
+func (a azureBlobFileInfo) Name() string {
+ return path.Base(a.name)
+}
+
+func (a azureBlobFileInfo) Size() int64 {
+ return a.size
+}
+
+func (a azureBlobFileInfo) ModTime() time.Time {
+ return a.modTime
+}
+
+func (a azureBlobFileInfo) IsDir() bool {
+ return strings.HasSuffix(a.name, "/")
+}
+
+func (a azureBlobFileInfo) Mode() os.FileMode {
+ return os.ModePerm
+}
+
+func (a azureBlobFileInfo) Sys() any {
+ return nil
+}
+
+// Stat returns the stat information of the object
+func (a *AzureBlobStorage) Stat(path string) (os.FileInfo, error) {
+ blobClient, err := a.getBlobClient(path)
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+ res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+ s := strings.Split(path, "/")
+ return &azureBlobFileInfo{
+ s[len(s)-1],
+ *res.ContentLength,
+ *res.LastModified,
+ }, nil
+}
+
+// Delete delete a file
+func (a *AzureBlobStorage) Delete(path string) error {
+ blobClient, err := a.getBlobClient(path)
+ if err != nil {
+ return convertAzureBlobErr(err)
+ }
+ _, err = blobClient.Delete(a.ctx, nil)
+ return convertAzureBlobErr(err)
+}
+
+// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
+func (a *AzureBlobStorage) URL(path, name string) (*url.URL, error) {
+ blobClient, err := a.getBlobClient(path)
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+
+ startTime := time.Now()
+ u, err := blobClient.GetSASURL(sas.BlobPermissions{
+ Read: true,
+ }, time.Now().Add(5*time.Minute), &blob.GetSASURLOptions{
+ StartTime: &startTime,
+ })
+ if err != nil {
+ return nil, convertAzureBlobErr(err)
+ }
+
+ return url.Parse(u)
+}
+
+// IterateObjects iterates across the objects in the azureblobstorage
+func (a *AzureBlobStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
+ dirName = a.buildAzureBlobPath(dirName)
+ if dirName != "" {
+ dirName += "/"
+ }
+ pager := a.client.NewListBlobsFlatPager(a.cfg.Container, &container.ListBlobsFlatOptions{
+ Prefix: &dirName,
+ })
+ for pager.More() {
+ resp, err := pager.NextPage(a.ctx)
+ if err != nil {
+ return convertAzureBlobErr(err)
+ }
+ for _, object := range resp.Segment.BlobItems {
+ blobClient, err := a.getBlobClient(*object.Name)
+ if err != nil {
+ return convertAzureBlobErr(err)
+ }
+ object := &azureBlobObject{
+ Context: a.ctx,
+ blobClient: blobClient,
+ Name: *object.Name,
+ Size: *object.Properties.ContentLength,
+ ModTime: object.Properties.LastModified,
+ }
+ if err := func(object *azureBlobObject, fn func(path string, obj Object) error) error {
+ defer object.Close()
+ return fn(strings.TrimPrefix(object.Name, a.cfg.BasePath), object)
+ }(object, fn); err != nil {
+ return convertAzureBlobErr(err)
+ }
+ }
+ }
+ return nil
+}
+
+// Delete delete a file
+func (a *AzureBlobStorage) getBlobClient(path string) (*blob.Client, error) {
+ return a.client.ServiceClient().NewContainerClient(a.cfg.Container).NewBlobClient(a.buildAzureBlobPath(path)), nil
+}
+
+func init() {
+ RegisterStorageType(setting.AzureBlobStorageType, NewAzureBlobStorage)
+}
diff --git a/modules/storage/azureblob_test.go b/modules/storage/azureblob_test.go
new file mode 100644
index 0000000000..604870cb98
--- /dev/null
+++ b/modules/storage/azureblob_test.go
@@ -0,0 +1,56 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package storage
+
+import (
+ "os"
+ "testing"
+
+ "code.gitea.io/gitea/modules/setting"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAzureBlobStorageIterator(t *testing.T) {
+ if os.Getenv("CI") == "" {
+ t.Skip("azureBlobStorage not present outside of CI")
+ return
+ }
+ testStorageIterator(t, setting.AzureBlobStorageType, &setting.Storage{
+ AzureBlobConfig: setting.AzureBlobStorageConfig{
+ // https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#ip-style-url
+ Endpoint: "http://devstoreaccount1.azurite.local:10000",
+ // https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#well-known-storage-account-and-key
+ AccountName: "devstoreaccount1",
+ AccountKey: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==",
+ Container: "test",
+ },
+ })
+}
+
+func TestAzureBlobStoragePath(t *testing.T) {
+ m := &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: ""}}
+ assert.Equal(t, "", m.buildAzureBlobPath("/"))
+ assert.Equal(t, "", m.buildAzureBlobPath("."))
+ assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
+ assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
+
+ m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/"}}
+ assert.Equal(t, "", m.buildAzureBlobPath("/"))
+ assert.Equal(t, "", m.buildAzureBlobPath("."))
+ assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
+ assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
+
+ m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base"}}
+ assert.Equal(t, "base", m.buildAzureBlobPath("/"))
+ assert.Equal(t, "base", m.buildAzureBlobPath("."))
+ assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
+ assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
+
+ m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base/"}}
+ assert.Equal(t, "base", m.buildAzureBlobPath("/"))
+ assert.Equal(t, "base", m.buildAzureBlobPath("."))
+ assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
+ assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
+}
diff --git a/modules/storage/minio_test.go b/modules/storage/minio_test.go
index ad11046dd6..6eb03c4a45 100644
--- a/modules/storage/minio_test.go
+++ b/modules/storage/minio_test.go
@@ -23,7 +23,7 @@ func TestMinioStorageIterator(t *testing.T) {
}
testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
MinioConfig: setting.MinioStorageConfig{
- Endpoint: "127.0.0.1:9000",
+ Endpoint: "minio:9000",
AccessKeyID: "123456",
SecretAccessKey: "12345678",
Bucket: "gitea",
diff --git a/modules/storage/storage_test.go b/modules/storage/storage_test.go
index 5e3e9c7dba..7edde558f3 100644
--- a/modules/storage/storage_test.go
+++ b/modules/storage/storage_test.go
@@ -35,6 +35,7 @@ func testStorageIterator(t *testing.T, typStr Type, cfg *setting.Storage) {
"b": {"b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt"},
"": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
"/": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
+ ".": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
"a/b/../../a": {"a/1.txt"},
}
for dir, expected := range expectedList {
diff --git a/modules/util/io.go b/modules/util/io.go
index 1559b019a0..eb200c9f9a 100644
--- a/modules/util/io.go
+++ b/modules/util/io.go
@@ -76,3 +76,24 @@ func IsEmptyReader(r io.Reader) (err error) {
}
}
}
+
+type CountingReader struct {
+ io.Reader
+ n int
+}
+
+var _ io.Reader = &CountingReader{}
+
+func (w *CountingReader) Count() int {
+ return w.n
+}
+
+func (w *CountingReader) Read(p []byte) (int, error) {
+ n, err := w.Reader.Read(p)
+ w.n += n
+ return n, err
+}
+
+func NewCountingReader(rd io.Reader) *CountingReader {
+ return &CountingReader{Reader: rd}
+}