summaryrefslogtreecommitdiffstats
path: root/modules/lfs
diff options
context:
space:
mode:
authorwxiaoguang <wxiaoguang@gmail.com>2023-03-29 00:02:13 +0800
committerGitHub <noreply@github.com>2023-03-29 00:02:13 +0800
commitb73d1ac1eb7d5c985749dc721bbea7ebd14f9c83 (patch)
tree313f5e06a34d5102e9166bd1c5d263d0ba76d385 /modules/lfs
parent428d26d4a8afe1b2777a992723168117d0bf9699 (diff)
downloadgitea-b73d1ac1eb7d5c985749dc721bbea7ebd14f9c83.tar.gz
gitea-b73d1ac1eb7d5c985749dc721bbea7ebd14f9c83.zip
Make minio package support legacy MD5 checksum (#23768) (#23770)
Backport #23768 (no source code conflict, only some unrelated docs/test-ini conflicts) Some storages like: * https://developers.cloudflare.com/r2/api/s3/api/ * https://www.backblaze.com/b2/docs/s3_compatible_api.html They do not support "x-amz-checksum-algorithm" header But minio recently uses that header with CRC32C by default. So we have to tell minio to use legacy MD5 checksum.
Diffstat (limited to 'modules/lfs')
-rw-r--r--modules/lfs/content_store.go40
1 files changed, 29 insertions, 11 deletions
diff --git a/modules/lfs/content_store.go b/modules/lfs/content_store.go
index 94277a6b8e..2fbe55d813 100644
--- a/modules/lfs/content_store.go
+++ b/modules/lfs/content_store.go
@@ -59,15 +59,22 @@ func (s *ContentStore) Put(pointer Pointer, r io.Reader) error {
return err
}
- // This shouldn't happen but it is sensible to test
- if written != pointer.Size {
- if err := s.Delete(p); err != nil {
- log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, err)
+ // check again whether there is any error during the Save operation
+ // because some errors might be ignored by the Reader's caller
+ if wrappedRd.lastError != nil && !errors.Is(wrappedRd.lastError, io.EOF) {
+ err = wrappedRd.lastError
+ } else if written != pointer.Size {
+ err = ErrSizeMismatch
+ }
+
+ // if the upload failed, try to delete the file
+ if err != nil {
+ if errDel := s.Delete(p); errDel != nil {
+ log.Error("Cleaning the LFS OID[%s] failed: %v", pointer.Oid, errDel)
}
- return ErrSizeMismatch
}
- return nil
+ return err
}
// Exists returns true if the object exists in the content store.
@@ -108,6 +115,17 @@ type hashingReader struct {
expectedSize int64
hash hash.Hash
expectedHash string
+ lastError error
+}
+
+// recordError records the last error during the Save operation
+// Some callers of the Reader doesn't respect the returned "err"
+// For example, MinIO's Put will ignore errors if the written size could equal to expected size
+// So we must remember the error by ourselves,
+// and later check again whether ErrSizeMismatch or ErrHashMismatch occurs during the Save operation
+func (r *hashingReader) recordError(err error) error {
+ r.lastError = err
+ return err
}
func (r *hashingReader) Read(b []byte) (int, error) {
@@ -117,22 +135,22 @@ func (r *hashingReader) Read(b []byte) (int, error) {
r.currentSize += int64(n)
wn, werr := r.hash.Write(b[:n])
if wn != n || werr != nil {
- return n, werr
+ return n, r.recordError(werr)
}
}
- if err != nil && err == io.EOF {
+ if errors.Is(err, io.EOF) || r.currentSize >= r.expectedSize {
if r.currentSize != r.expectedSize {
- return n, ErrSizeMismatch
+ return n, r.recordError(ErrSizeMismatch)
}
shaStr := hex.EncodeToString(r.hash.Sum(nil))
if shaStr != r.expectedHash {
- return n, ErrHashMismatch
+ return n, r.recordError(ErrHashMismatch)
}
}
- return n, err
+ return n, r.recordError(err)
}
func newHashingReader(expectedSize int64, expectedHash string, reader io.Reader) *hashingReader {