This allows to stream file chunks directly to S3 during upload.
Signed-off-by: Julius Härtl <jus@bitgrid.net>
'OCA\\DAV\\Traits\\PrincipalProxyTrait' => $baseDir . '/../lib/Traits/PrincipalProxyTrait.php',
'OCA\\DAV\\Upload\\AssemblyStream' => $baseDir . '/../lib/Upload/AssemblyStream.php',
'OCA\\DAV\\Upload\\ChunkingPlugin' => $baseDir . '/../lib/Upload/ChunkingPlugin.php',
+ 'OCA\\DAV\\Upload\\ChunkingV2Plugin' => $baseDir . '/../lib/Upload/ChunkingV2Plugin.php',
'OCA\\DAV\\Upload\\CleanupService' => $baseDir . '/../lib/Upload/CleanupService.php',
'OCA\\DAV\\Upload\\FutureFile' => $baseDir . '/../lib/Upload/FutureFile.php',
+ 'OCA\\DAV\\Upload\\PartFile' => $baseDir . '/../lib/Upload/PartFile.php',
'OCA\\DAV\\Upload\\RootCollection' => $baseDir . '/../lib/Upload/RootCollection.php',
'OCA\\DAV\\Upload\\UploadFile' => $baseDir . '/../lib/Upload/UploadFile.php',
'OCA\\DAV\\Upload\\UploadFolder' => $baseDir . '/../lib/Upload/UploadFolder.php',
'OCA\\DAV\\Traits\\PrincipalProxyTrait' => __DIR__ . '/..' . '/../lib/Traits/PrincipalProxyTrait.php',
'OCA\\DAV\\Upload\\AssemblyStream' => __DIR__ . '/..' . '/../lib/Upload/AssemblyStream.php',
'OCA\\DAV\\Upload\\ChunkingPlugin' => __DIR__ . '/..' . '/../lib/Upload/ChunkingPlugin.php',
+ 'OCA\\DAV\\Upload\\ChunkingV2Plugin' => __DIR__ . '/..' . '/../lib/Upload/ChunkingV2Plugin.php',
'OCA\\DAV\\Upload\\CleanupService' => __DIR__ . '/..' . '/../lib/Upload/CleanupService.php',
'OCA\\DAV\\Upload\\FutureFile' => __DIR__ . '/..' . '/../lib/Upload/FutureFile.php',
+ 'OCA\\DAV\\Upload\\PartFile' => __DIR__ . '/..' . '/../lib/Upload/PartFile.php',
'OCA\\DAV\\Upload\\RootCollection' => __DIR__ . '/..' . '/../lib/Upload/RootCollection.php',
'OCA\\DAV\\Upload\\UploadFile' => __DIR__ . '/..' . '/../lib/Upload/UploadFile.php',
'OCA\\DAV\\Upload\\UploadFolder' => __DIR__ . '/..' . '/../lib/Upload/UploadFolder.php',
use OCA\DAV\Connector\Sabre\Exception\FileLocked;
use OCA\DAV\Connector\Sabre\Exception\Forbidden;
use OCA\DAV\Connector\Sabre\Exception\InvalidPath;
+use OCA\DAV\Upload\FutureFile;
use OCP\Files\FileInfo;
use OCP\Files\Folder;
use OCP\Files\ForbiddenException;
return $this->info->getId();
}
+ public function getInternalPath(): string {
+ return $this->info->getInternalPath();
+ }
+
/**
* @param string $user
* @return int
use OCA\DAV\Provisioning\Apple\AppleProvisioningPlugin;
use OCA\DAV\SystemTag\SystemTagPlugin;
use OCA\DAV\Upload\ChunkingPlugin;
+use OCA\DAV\Upload\ChunkingV2Plugin;
use OCP\AppFramework\Http\Response;
use OCP\Diagnostics\IEventLogger;
use OCP\EventDispatcher\IEventDispatcher;
+use OCP\ICacheFactory;
use OCP\IRequest;
use OCP\Profiler\IProfiler;
use OCP\SabrePluginEvent;
$this->server->addPlugin(new CopyEtagHeaderPlugin());
$this->server->addPlugin(new RequestIdHeaderPlugin(\OC::$server->get(IRequest::class)));
+ $this->server->addPlugin(new ChunkingV2Plugin(\OCP\Server::get(ICacheFactory::class)));
$this->server->addPlugin(new ChunkingPlugin());
// allow setup of additional plugins
--- /dev/null
+<?php
+
+declare(strict_types=1);
+/*
+ * @copyright Copyright (c) 2021 Julius Härtl <jus@bitgrid.net>
+ *
+ * @author Julius Härtl <jus@bitgrid.net>
+ *
+ * @license GNU AGPL version 3 or any later version
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+namespace OCA\DAV\Upload;
+
+use Exception;
+use InvalidArgumentException;
+use OC\Files\Filesystem;
+use OC\Files\ObjectStore\ObjectStoreStorage;
+use OC\Files\View;
+use OC_Hook;
+use OCA\DAV\Connector\Sabre\Directory;
+use OCA\DAV\Connector\Sabre\File;
+use OCP\Files\IMimeTypeDetector;
+use OCP\Files\IRootFolder;
+use OCP\Files\ObjectStore\IObjectStoreMultiPartUpload;
+use OCP\Files\Storage\IChunkedFileWrite;
+use OCP\Files\StorageInvalidException;
+use OCP\ICache;
+use OCP\ICacheFactory;
+use OCP\Lock\ILockingProvider;
+use Sabre\DAV\Exception\BadRequest;
+use Sabre\DAV\Exception\InsufficientStorage;
+use Sabre\DAV\Exception\NotFound;
+use Sabre\DAV\Exception\PreconditionFailed;
+use Sabre\DAV\ICollection;
+use Sabre\DAV\INode;
+use Sabre\DAV\Server;
+use Sabre\DAV\ServerPlugin;
+use Sabre\HTTP\RequestInterface;
+use Sabre\HTTP\ResponseInterface;
+use Sabre\Uri;
+
+class ChunkingV2Plugin extends ServerPlugin {
+ /** @var Server */
+ private $server;
+ /** @var UploadFolder */
+ private $uploadFolder;
+ /** @var ICache */
+ private $cache;
+
+ private ?string $uploadId = null;
+ private ?string $uploadPath = null;
+
+ private const TEMP_TARGET = '.target';
+
+ public const CACHE_KEY = 'chunking-v2';
+ public const UPLOAD_TARGET_PATH = 'upload-target-path';
+ public const UPLOAD_TARGET_ID = 'upload-target-id';
+ public const UPLOAD_ID = 'upload-id';
+
+ private const DESTINATION_HEADER = 'Destination';
+
+ public function __construct(ICacheFactory $cacheFactory) {
+ $this->cache = $cacheFactory->createDistributed(self::CACHE_KEY);
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function initialize(Server $server) {
+ $server->on('afterMethod:MKCOL', [$this, 'afterMkcol']);
+ $server->on('beforeMethod:PUT', [$this, 'beforePut']);
+ $server->on('beforeMethod:DELETE', [$this, 'beforeDelete']);
+ $server->on('beforeMove', [$this, 'beforeMove'], 90);
+
+ $this->server = $server;
+ }
+
+ /**
+ * @param string $path
+ * @param bool $createIfNotExists
+ * @return FutureFile|UploadFile|ICollection|INode
+ */
+ private function getUploadFile(string $path, bool $createIfNotExists = false) {
+ try {
+ $actualFile = $this->server->tree->getNodeForPath($path);
+ // Only directly upload to the target file if it is on the same storage
+ // There may be further potential to optimize here by also uploading
+ // to other storages directly. This would require to also carefully pick
+ // the storage/path used in getStorage()
+ if ($actualFile instanceof File && $this->uploadFolder->getStorage()->getId() === $actualFile->getNode()->getStorage()->getId()) {
+ return $actualFile;
+ }
+ } catch (NotFound $e) {
+ // If there is no target file we upload to the upload folder first
+ }
+
+ // Use file in the upload directory that will be copied or moved afterwards
+ if ($createIfNotExists) {
+ $this->uploadFolder->createFile(self::TEMP_TARGET);
+ }
+
+ /** @var UploadFile $uploadFile */
+ $uploadFile = $this->uploadFolder->getChild(self::TEMP_TARGET);
+ return $uploadFile->getFile();
+ }
+
+ public function afterMkcol(RequestInterface $request, ResponseInterface $response): bool {
+ try {
+ $this->prepareUpload($request->getPath());
+ $this->checkPrerequisites(false);
+ } catch (BadRequest|StorageInvalidException|NotFound $e) {
+ return true;
+ }
+
+ $this->uploadPath = $this->server->calculateUri($this->server->httpRequest->getHeader(self::DESTINATION_HEADER));
+ $targetFile = $this->getUploadFile($this->uploadPath, true);
+ [$storage, $storagePath] = $this->getUploadStorage($this->uploadPath);
+
+ $this->uploadId = $storage->startChunkedWrite($storagePath);
+
+ $this->cache->set($this->uploadFolder->getName(), [
+ self::UPLOAD_ID => $this->uploadId,
+ self::UPLOAD_TARGET_PATH => $this->uploadPath,
+ self::UPLOAD_TARGET_ID => $targetFile->getId(),
+ ], 86400);
+
+ $response->setStatus(201);
+ return true;
+ }
+
+ public function beforePut(RequestInterface $request, ResponseInterface $response): bool {
+ try {
+ $this->prepareUpload(dirname($request->getPath()));
+ $this->checkPrerequisites();
+ } catch (StorageInvalidException|BadRequest|NotFound $e) {
+ return true;
+ }
+
+ [$storage, $storagePath] = $this->getUploadStorage($this->uploadPath);
+
+ $chunkName = basename($request->getPath());
+ $partId = is_numeric($chunkName) ? (int)$chunkName : -1;
+ if (!($partId >= 1 && $partId <= 10000)) {
+ throw new BadRequest('Invalid chunk name, must be numeric between 1 and 10000');
+ }
+
+ $uploadFile = $this->getUploadFile($this->uploadPath);
+ $tempTargetFile = null;
+
+ $additionalSize = (int)$request->getHeader('Content-Length');
+ if ($this->uploadFolder->childExists(self::TEMP_TARGET) && $this->uploadPath) {
+ /** @var UploadFile $tempTargetFile */
+ $tempTargetFile = $this->uploadFolder->getChild(self::TEMP_TARGET);
+ [$destinationDir, $destinationName] = Uri\split($this->uploadPath);
+ /** @var Directory $destinationParent */
+ $destinationParent = $this->server->tree->getNodeForPath($destinationDir);
+ $free = $storage->free_space($destinationParent->getInternalPath());
+ $newSize = $tempTargetFile->getSize() + $additionalSize;
+ if ($free >= 0 && ($tempTargetFile->getSize() > $free || $newSize > $free)) {
+ throw new InsufficientStorage("Insufficient space in $this->uploadPath");
+ }
+ }
+
+ $stream = $request->getBodyAsStream();
+ $storage->putChunkedWritePart($storagePath, $this->uploadId, (string)$partId, $stream, $additionalSize);
+
+ $storage->getCache()->update($uploadFile->getId(), ['size' => $uploadFile->getSize() + $additionalSize]);
+ if ($tempTargetFile) {
+ $storage->getPropagator()->propagateChange($tempTargetFile->getInternalPath(), time(), $additionalSize);
+ }
+
+ $response->setStatus(201);
+ return false;
+ }
+
+ public function beforeMove($sourcePath, $destination): bool {
+ try {
+ $this->prepareUpload(dirname($sourcePath));
+ $this->checkPrerequisites();
+ } catch (StorageInvalidException|BadRequest|NotFound|PreconditionFailed $e) {
+ return true;
+ }
+ [$storage, $storagePath] = $this->getUploadStorage($this->uploadPath);
+
+ $targetFile = $this->getUploadFile($this->uploadPath);
+
+ [$destinationDir, $destinationName] = Uri\split($destination);
+ /** @var Directory $destinationParent */
+ $destinationParent = $this->server->tree->getNodeForPath($destinationDir);
+ $destinationExists = $destinationParent->childExists($destinationName);
+
+
+ // allow sync clients to send the modification and creation time along in a header
+ $updateFileInfo = [];
+ if ($this->server->httpRequest->getHeader('X-OC-MTime') !== null) {
+ $updateFileInfo['mtime'] = $this->sanitizeMtime($this->server->httpRequest->getHeader('X-OC-MTime'));
+ $this->server->httpResponse->setHeader('X-OC-MTime', 'accepted');
+ }
+ if ($this->server->httpRequest->getHeader('X-OC-CTime') !== null) {
+ $updateFileInfo['creation_time'] = $this->sanitizeMtime($this->server->httpRequest->getHeader('X-OC-CTime'));
+ $this->server->httpResponse->setHeader('X-OC-CTime', 'accepted');
+ }
+ $updateFileInfo['mimetype'] = \OCP\Server::get(IMimeTypeDetector::class)->detectPath($destinationName);
+
+ if ($storage->instanceOfStorage(ObjectStoreStorage::class) && $storage->getObjectStore() instanceof IObjectStoreMultiPartUpload) {
+ /** @var ObjectStoreStorage $storage */
+ /** @var IObjectStoreMultiPartUpload $objectStore */
+ $objectStore = $storage->getObjectStore();
+ $parts = $objectStore->getMultipartUploads($storage->getURN($targetFile->getId()), $this->uploadId);
+ $size = 0;
+ foreach ($parts as $part) {
+ $size += $part['Size'];
+ }
+ $free = $storage->free_space($destinationParent->getInternalPath());
+ if ($free >= 0 && ($size > $free)) {
+ throw new InsufficientStorage("Insufficient space in $this->uploadPath");
+ }
+ }
+
+ $destinationInView = $destinationParent->getFileInfo()->getPath() . '/' . $destinationName;
+ $this->completeChunkedWrite($destinationInView);
+
+ $rootView = new View();
+ $rootView->putFileInfo($destinationInView, $updateFileInfo);
+
+ $sourceNode = $this->server->tree->getNodeForPath($sourcePath);
+ if ($sourceNode instanceof FutureFile) {
+ $this->uploadFolder->delete();
+ }
+
+ $this->server->emit('afterMove', [$sourcePath, $destination]);
+ $this->server->emit('afterUnbind', [$sourcePath]);
+ $this->server->emit('afterBind', [$destination]);
+
+ $response = $this->server->httpResponse;
+ $response->setHeader('Content-Type', 'application/xml; charset=utf-8');
+ $response->setHeader('Content-Length', '0');
+ $response->setStatus($destinationExists ? 204 : 201);
+ return false;
+ }
+
+ public function beforeDelete(RequestInterface $request, ResponseInterface $response) {
+ try {
+ $this->prepareUpload($request->getPath());
+ if (!$this->uploadFolder instanceof UploadFolder) {
+ return true;
+ }
+
+ [$storage, $storagePath] = $this->getUploadStorage($this->uploadPath);
+ $storage->cancelChunkedWrite($storagePath, $this->uploadId);
+ return true;
+ } catch (NotFound $e) {
+ return true;
+ }
+ }
+
+ /**
+ * @throws BadRequest
+ * @throws PreconditionFailed
+ * @throws StorageInvalidException
+ */
+ private function checkPrerequisites(bool $checkUploadMetadata = true): void {
+ if (!$this->uploadFolder instanceof UploadFolder || empty($this->server->httpRequest->getHeader(self::DESTINATION_HEADER))) {
+ throw new BadRequest('Skipping chunked file writing as the destination header was not passed');
+ }
+ if (!$this->uploadFolder->getStorage()->instanceOfStorage(IChunkedFileWrite::class)) {
+ throw new StorageInvalidException('Storage does not support chunked file writing');
+ }
+
+ if ($checkUploadMetadata) {
+ if ($this->uploadId === null || $this->uploadPath === null) {
+ throw new PreconditionFailed('Missing metadata for chunked upload');
+ }
+ }
+ }
+
+ /**
+ * @return array [IStorage, string]
+ */
+ private function getUploadStorage(string $targetPath): array {
+ $storage = $this->uploadFolder->getStorage();
+ $targetFile = $this->getUploadFile($targetPath);
+ return [$storage, $targetFile->getInternalPath()];
+ }
+
+ protected function sanitizeMtime(string $mtimeFromRequest): int {
+ if (!is_numeric($mtimeFromRequest)) {
+ throw new InvalidArgumentException('X-OC-MTime header must be an integer (unix timestamp).');
+ }
+
+ return (int)$mtimeFromRequest;
+ }
+
+ /**
+ * @throws NotFound
+ */
+ public function prepareUpload($path): void {
+ $this->uploadFolder = $this->server->tree->getNodeForPath($path);
+ $uploadMetadata = $this->cache->get($this->uploadFolder->getName());
+ $this->uploadId = $uploadMetadata[self::UPLOAD_ID] ?? null;
+ $this->uploadPath = $uploadMetadata[self::UPLOAD_TARGET_PATH] ?? null;
+ }
+
+ private function completeChunkedWrite(string $targetAbsolutePath): void {
+ $uploadFile = $this->getUploadFile($this->uploadPath)->getNode();
+ [$storage, $storagePath] = $this->getUploadStorage($this->uploadPath);
+
+ $rootFolder = \OCP\Server::get(IRootFolder::class);
+ $exists = $rootFolder->nodeExists($targetAbsolutePath);
+
+ $uploadFile->lock(ILockingProvider::LOCK_SHARED);
+ $this->emitPreHooks($targetAbsolutePath, $exists);
+ try {
+ $uploadFile->changeLock(ILockingProvider::LOCK_EXCLUSIVE);
+ $storage->completeChunkedWrite($storagePath, $this->uploadId);
+ $uploadFile->changeLock(ILockingProvider::LOCK_SHARED);
+ } catch (Exception $e) {
+ $uploadFile->unlock(ILockingProvider::LOCK_EXCLUSIVE);
+ throw $e;
+ }
+
+ // If the file was not uploaded to the user storage directly we need to copy/move it
+ try {
+ $uploadFileAbsolutePath = Filesystem::getRoot() . $uploadFile->getPath();
+ if ($uploadFileAbsolutePath !== $targetAbsolutePath) {
+ $uploadFile = $rootFolder->get($uploadFile->getFileInfo()->getPath());
+ if ($exists) {
+ $uploadFile->copy($targetAbsolutePath);
+ } else {
+ $uploadFile->move($targetAbsolutePath);
+ }
+ }
+ $this->emitPostHooks($targetAbsolutePath, $exists);
+ } catch (Exception $e) {
+ $uploadFile->unlock(ILockingProvider::LOCK_SHARED);
+ throw $e;
+ }
+ }
+
+ private function emitPreHooks(string $target, bool $exists): void {
+ $hookPath = $this->getHookPath($target);
+ if (!$exists) {
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_create, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ } else {
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_update, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ }
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_write, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ }
+
+ private function emitPostHooks(string $target, bool $exists): void {
+ $hookPath = $this->getHookPath($target);
+ if (!$exists) {
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_post_create, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ } else {
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_post_update, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ }
+ OC_Hook::emit(Filesystem::CLASSNAME, Filesystem::signal_post_write, [
+ Filesystem::signal_param_path => $hookPath,
+ ]);
+ }
+
+ private function getHookPath(string $path): ?string {
+ if (!Filesystem::getView()) {
+ return $path;
+ }
+ return Filesystem::getView()->getRelativePath($path);
+ }
+}
* @package OCA\DAV\Upload
*/
class FutureFile implements \Sabre\DAV\IFile {
-
/** @var Directory */
private $root;
/** @var string */
return AssemblyStream::wrap($nodes);
}
+ public function getPath() {
+ return $this->root->getFileInfo()->getInternalPath() . '/.file';
+ }
+
/**
* @inheritdoc
*/
--- /dev/null
+<?php
+/**
+ * @copyright Copyright (c) 2016, ownCloud, Inc.
+ *
+ * @author Christoph Wurst <christoph@winzerhof-wurst.at>
+ * @author Lukas Reschke <lukas@statuscode.ch>
+ * @author Thomas Müller <thomas.mueller@tmit.eu>
+ *
+ * @license AGPL-3.0
+ *
+ * This code is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License, version 3,
+ * along with this program. If not, see <http://www.gnu.org/licenses/>
+ *
+ */
+namespace OCA\DAV\Upload;
+
+use OCA\DAV\Connector\Sabre\Directory;
+use Sabre\DAV\Exception\Forbidden;
+use Sabre\DAV\IFile;
+
+/**
+ * This class represents an Upload part which is not present on the storage itself
+ * but handled directly by external storage services like S3 with Multipart Upload
+ */
+class PartFile implements IFile {
+ /** @var Directory */
+ private $root;
+ /** @var array */
+ private $partInfo;
+
+ public function __construct(Directory $root, array $partInfo) {
+ $this->root = $root;
+ $this->partInfo = $partInfo;
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function put($data) {
+ throw new Forbidden('Permission denied to put into this file');
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function get() {
+ throw new Forbidden('Permission denied to get this file');
+ }
+
+ public function getPath() {
+ return $this->root->getFileInfo()->getInternalPath() . '/' . $this->partInfo['PartNumber'];
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function getContentType() {
+ return 'application/octet-stream';
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function getETag() {
+ return $this->partInfo['ETag'];
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function getSize() {
+ return $this->partInfo['Size'];
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function delete() {
+ $this->root->delete();
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function getName() {
+ return $this->partInfo['PartNumber'];
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function setName($name) {
+ throw new Forbidden('Permission denied to rename this file');
+ }
+
+ /**
+ * @inheritdoc
+ */
+ public function getLastModified() {
+ return $this->partInfo['LastModified'];
+ }
+}
return $this->file->get();
}
+ public function getId() {
+ return $this->file->getId();
+ }
+
public function getContentType() {
return $this->file->getContentType();
}
public function getLastModified() {
return $this->file->getLastModified();
}
+
+ public function getInternalPath(): string {
+ return $this->file->getInternalPath();
+ }
+
+ public function getFile(): File {
+ return $this->file;
+ }
+
+ public function getNode() {
+ return $this->file->getNode();
+ }
}
*/
namespace OCA\DAV\Upload;
+use OC\Files\ObjectStore\ObjectStoreStorage;
use OCA\DAV\Connector\Sabre\Directory;
+use OCP\Files\ObjectStore\IObjectStoreMultiPartUpload;
+use OCP\Files\Storage\IStorage;
use Sabre\DAV\Exception\Forbidden;
use Sabre\DAV\ICollection;
class UploadFolder implements ICollection {
-
/** @var Directory */
private $node;
/** @var CleanupService */
private $cleanupService;
+ /** @var IStorage */
+ private $storage;
- public function __construct(Directory $node, CleanupService $cleanupService) {
+ public function __construct(Directory $node, CleanupService $cleanupService, IStorage $storage) {
$this->node = $node;
$this->cleanupService = $cleanupService;
+ $this->storage = $storage;
}
public function createFile($name, $data = null) {
$children[] = new UploadFile($child);
}
+ if ($this->storage->instanceOfStorage(ObjectStoreStorage::class)) {
+ /** @var ObjectStoreStorage $storage */
+ $objectStore = $this->storage->getObjectStore();
+ if ($objectStore instanceof IObjectStoreMultiPartUpload) {
+ $cache = \OC::$server->getMemCacheFactory()->createDistributed(ChunkingV2Plugin::CACHE_KEY);
+ $uploadSession = $cache->get($this->getName());
+ if ($uploadSession) {
+ $uploadId = $uploadSession[ChunkingV2Plugin::UPLOAD_ID];
+ $id = $uploadSession[ChunkingV2Plugin::UPLOAD_TARGET_ID];
+ $parts = $objectStore->getMultipartUploads($this->storage->getURN($id), $uploadId);
+ foreach ($parts as $part) {
+ $children[] = new PartFile($this->node, $part);
+ }
+ }
+ }
+ }
+
return $children;
}
public function getLastModified() {
return $this->node->getLastModified();
}
+
+ public function getStorage() {
+ return $this->storage;
+ }
}
use Sabre\DAV\ICollection;
class UploadHome implements ICollection {
-
/** @var array */
private $principalInfo;
/** @var CleanupService */
}
public function getChild($name): UploadFolder {
- return new UploadFolder($this->impl()->getChild($name), $this->cleanupService);
+ return new UploadFolder($this->impl()->getChild($name), $this->cleanupService, $this->getStorage());
}
public function getChildren(): array {
return array_map(function ($node) {
- return new UploadFolder($node, $this->cleanupService);
+ return new UploadFolder($node, $this->cleanupService, $this->getStorage());
}, $this->impl()->getChildren());
}
* @return Directory
*/
private function impl() {
+ $view = $this->getView();
+ $rootInfo = $view->getFileInfo('');
+ return new Directory($view, $rootInfo);
+ }
+
+ private function getView() {
$rootView = new View();
$user = \OC::$server->getUserSession()->getUser();
Filesystem::initMountPoints($user->getUID());
if (!$rootView->file_exists('/' . $user->getUID() . '/uploads')) {
$rootView->mkdir('/' . $user->getUID() . '/uploads');
}
- $view = new View('/' . $user->getUID() . '/uploads');
- $rootInfo = $view->getFileInfo('');
- return new Directory($view, $rootInfo);
+ return new View('/' . $user->getUID() . '/uploads');
+ }
+
+ private function getStorage() {
+ $view = $this->getView();
+ $storage = $view->getFileInfo('')->getStorage();
+ return $storage;
}
}
&& this.getFile().size > this.uploader.fileUploadParam.maxChunkSize
) {
data.isChunked = true;
+ var headers = {
+ Destination: this.uploader.davClient._buildUrl(this.getTargetDestination())
+ };
+
chunkFolderPromise = this.uploader.davClient.createDirectory(
- 'uploads/' + OC.getCurrentUser().uid + '/' + this.getId()
+ 'uploads/' + OC.getCurrentUser().uid + '/' + this.getId(), headers
);
// TODO: if fails, it means same id already existed, need to retry
} else {
}
if (size) {
headers['OC-Total-Length'] = size;
-
}
+ headers['Destination'] = this.uploader.davClient._buildUrl(this.getTargetDestination());
return this.uploader.davClient.move(
'uploads/' + uid + '/' + this.getId() + '/.file',
- 'files/' + uid + '/' + OC.joinPaths(this.getFullPath(), this.getFileName()),
+ this.getTargetDestination(),
true,
headers
);
},
+ getTargetDestination: function() {
+ var uid = OC.getCurrentUser().uid;
+ return 'files/' + uid + '/' + OC.joinPaths(this.getFullPath(), this.getFileName());
+ },
+
_deleteChunkFolder: function() {
// delete transfer directory for this upload
this.uploader.davClient.remove(
}
var range = data.contentRange.split(' ')[1];
var chunkId = range.split('/')[0].split('-')[0];
+ // Use a numeric chunk id and set the Destination header on all request for ChunkingV2
+ chunkId = Math.ceil((data.chunkSize+Number(chunkId)) / upload.uploader.fileUploadParam.maxChunkSize);
+ data.headers['Destination'] = self.davClient._buildUrl(upload.getTargetDestination());
+
data.url = OC.getRootPath() +
'/remote.php/dav/uploads' +
'/' + OC.getCurrentUser().uid +
promise = dfd.promise(),
jqXHR,
upload;
+
+ // Dynamically adjust the chunk size for Chunking V2 to fit into the 10000 chunk limit
+ if (file.size/mcs > 10000) {
+ mcs = Math.ceil(file.size/10000)
+ }
+
if (!(this._isXHRUpload(options) && slice && (ub || mcs < fs)) ||
options.data) {
return false;
$options['auth'] = [$this->currentUser, $this->regularUser];
}
$options['headers'] = [
- 'OCS_APIREQUEST' => 'true'
+ 'OCS-APIRequest' => 'true'
];
if ($body instanceof TableNode) {
$fd = $body->getRowsHash();
* @param string $user
*/
public function loggingInUsingWebAs($user) {
- $loginUrl = substr($this->baseUrl, 0, -5) . '/login';
+ $loginUrl = substr($this->baseUrl, 0, -5) . '/index.php/login';
// Request a new session and extract CSRF token
$client = new Client();
$response = $client->get(
/** @var int */
private $storedFileID = null;
+ private string $s3MultipartDestination;
+ private string $uploadId;
+
/**
* @Given /^using dav path "([^"]*)"$/
*/
* @Given user :user creates a new chunking upload with id :id
*/
public function userCreatesANewChunkingUploadWithId($user, $id) {
+ $this->parts = [];
$destination = '/uploads/' . $user . '/' . $id;
$this->makeDavRequest($user, 'MKCOL', $destination, [], null, "uploads");
}
}
}
+
+ /**
+ * @Given user :user creates a new chunking v2 upload with id :id and destination :targetDestination
+ */
+ public function userCreatesANewChunkingv2UploadWithIdAndDestination($user, $id, $targetDestination) {
+ $this->s3MultipartDestination = $this->getTargetDestination($user, $targetDestination);
+ $this->newUploadId();
+ $destination = '/uploads/' . $user . '/' . $this->getUploadId($id);
+ $this->response = $this->makeDavRequest($user, 'MKCOL', $destination, [
+ 'Destination' => $this->s3MultipartDestination,
+ ], null, "uploads");
+ }
+
+ /**
+ * @Given user :user uploads new chunk v2 file :num to id :id
+ */
+ public function userUploadsNewChunkv2FileToIdAndDestination($user, $num, $id) {
+ $data = \GuzzleHttp\Psr7\Utils::streamFor(fopen('/tmp/part-upload-' . $num, 'r'));
+ $destination = '/uploads/' . $user . '/' . $this->getUploadId($id) . '/' . $num;
+ $this->response = $this->makeDavRequest($user, 'PUT', $destination, [
+ 'Destination' => $this->s3MultipartDestination
+ ], $data, "uploads");
+ }
+
+ /**
+ * @Given user :user moves new chunk v2 file with id :id
+ */
+ public function userMovesNewChunkv2FileWithIdToMychunkedfileAndDestination($user, $id) {
+ $source = '/uploads/' . $user . '/' . $this->getUploadId($id) . '/.file';
+ try {
+ $this->response = $this->makeDavRequest($user, 'MOVE', $source, [
+ 'Destination' => $this->s3MultipartDestination,
+ ], null, "uploads");
+ } catch (\GuzzleHttp\Exception\ServerException $e) {
+ // 5xx responses cause a server exception
+ $this->response = $e->getResponse();
+ } catch (\GuzzleHttp\Exception\ClientException $e) {
+ // 4xx responses cause a client exception
+ $this->response = $e->getResponse();
+ }
+ }
+
+ private function getTargetDestination(string $user, string $destination): string {
+ return substr($this->baseUrl, 0, -4) . $this->getDavFilesPath($user) . $destination;
+ }
+
+ private function getUploadId(string $id): string {
+ return $id . '-' . $this->uploadId;
+ }
+
+ private function newUploadId() {
+ $this->uploadId = (string)time();
+ }
+
/**
* @Given /^Downloading file "([^"]*)" as "([^"]*)"$/
*/
$currentFileID = $this->getFileIdForPath($user, $path);
Assert::assertEquals($currentFileID, $this->storedFileID);
}
+
+ /**
+ * @Given /^user "([^"]*)" creates a file locally with "([^"]*)" x 5 MB chunks$/
+ */
+ public function userCreatesAFileLocallyWithChunks($arg1, $chunks) {
+ $this->parts = [];
+ for ($i = 1;$i <= (int)$chunks;$i++) {
+ $randomletter = substr(str_shuffle("abcdefghijklmnopqrstuvwxyz"), 0, 1);
+ file_put_contents('/tmp/part-upload-' . $i, str_repeat($randomletter, 5 * 1024 * 1024));
+ $this->parts[] = '/tmp/part-upload-' . $i;
+ }
+ }
+
+ /**
+ * @Given user :user creates the chunk :id with a size of :size MB
+ */
+ public function userCreatesAChunk($user, $id, $size) {
+ $randomletter = substr(str_shuffle("abcdefghijklmnopqrstuvwxyz"), 0, 1);
+ file_put_contents('/tmp/part-upload-' . $id, str_repeat($randomletter, (int)$size * 1024 * 1024));
+ $this->parts[] = '/tmp/part-upload-' . $id;
+ }
+
+ /**
+ * @Then /^Downloaded content should be the created file$/
+ */
+ public function downloadedContentShouldBeTheCreatedFile() {
+ $content = '';
+ sort($this->parts);
+ foreach ($this->parts as $part) {
+ $content .= file_get_contents($part);
+ }
+ Assert::assertEquals($content, (string)$this->response->getBody());
+ }
+
+ /**
+ * @Then /^the S3 multipart upload was successful with status "([^"]*)"$/
+ */
+ public function theSmultipartUploadWasSuccessful($status) {
+ Assert::assertEquals((int)$status, $this->response->getStatusCode());
+ }
}
And As an "user1"
And user "user1" created a folder "/testquota"
And as "user1" creating a share with
- | path | testquota |
- | shareType | 0 |
- | permissions | 31 |
- | shareWith | user0 |
+ | path | testquota |
+ | shareType | 0 |
+ | permissions | 31 |
+ | shareWith | user0 |
And user "user0" accepts last share
And As an "user0"
When User "user0" uploads file "data/textfile.txt" to "/testquota/asdf.txt"
And As an "user1"
And user "user1" created a folder "/testshare "
Then the HTTP status code should be "400"
+
+ @s3-multipart
+ Scenario: Upload chunked file asc with new chunking v2
+ Given using new dav path
+ And user "user0" exists
+ And user "user0" creates a file locally with "3" x 5 MB chunks
+ And user "user0" creates a new chunking v2 upload with id "chunking-42" and destination "/myChunkedFile1.txt"
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-42"
+ And user "user0" moves new chunk v2 file with id "chunking-42"
+ Then the S3 multipart upload was successful with status "201"
+ When As an "user0"
+ And Downloading file "/myChunkedFile1.txt"
+ Then Downloaded content should be the created file
+
+ @s3-multipart
+ Scenario: Upload chunked file desc with new chunking v2
+ Given using new dav path
+ And user "user0" exists
+ And user "user0" creates a file locally with "3" x 5 MB chunks
+ And user "user0" creates a new chunking v2 upload with id "chunking-42" and destination "/myChunkedFile.txt"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-42"
+ And user "user0" moves new chunk v2 file with id "chunking-42"
+ Then the S3 multipart upload was successful with status "201"
+ When As an "user0"
+ And Downloading file "/myChunkedFile.txt"
+ Then Downloaded content should be the created file
+
+ @s3-multipart
+ Scenario: Upload chunked file with random chunk sizes
+ Given using new dav path
+ And user "user0" exists
+ And user "user0" creates a new chunking v2 upload with id "chunking-random" and destination "/myChunkedFile.txt"
+ And user user0 creates the chunk 1 with a size of 5 MB
+ And user user0 creates the chunk 2 with a size of 7 MB
+ And user user0 creates the chunk 3 with a size of 9 MB
+ And user user0 creates the chunk 4 with a size of 1 MB
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "4" to id "chunking-random"
+ And user "user0" moves new chunk v2 file with id "chunking-random"
+ Then the S3 multipart upload was successful with status "201"
+ When As an "user0"
+ And Downloading file "/myChunkedFile.txt"
+ Then Downloaded content should be the created file
+
+ @s3-multipart
+ Scenario: Upload chunked file with too low chunk sizes
+ Given using new dav path
+ And user "user0" exists
+ And user "user0" creates a new chunking v2 upload with id "chunking-random" and destination "/myChunkedFile.txt"
+ And user user0 creates the chunk 1 with a size of 5 MB
+ And user user0 creates the chunk 2 with a size of 2 MB
+ And user user0 creates the chunk 3 with a size of 5 MB
+ And user user0 creates the chunk 4 with a size of 1 MB
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-random"
+ And user "user0" uploads new chunk v2 file "4" to id "chunking-random"
+ And user "user0" moves new chunk v2 file with id "chunking-random"
+ Then the HTTP status code should be "500"
+
+ @s3-multipart
+ Scenario: Upload chunked file with special characters with new chunking v2
+ Given using new dav path
+ And user "user0" exists
+ And user "user0" creates a file locally with "3" x 5 MB chunks
+ And user "user0" creates a new chunking v2 upload with id "chunking-42" and destination "/äöü.txt"
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-42"
+ And user "user0" moves new chunk v2 file with id "chunking-42"
+ Then the S3 multipart upload was successful with status "201"
+ When As an "user0"
+ And Downloading file "/äöü.txt"
+ Then Downloaded content should be the created file
+
+ @s3-multipart
+ Scenario: Upload chunked file with special characters in path with new chunking v2
+ Given using new dav path
+ And user "user0" exists
+ And User "user0" created a folder "üäöé"
+ And user "user0" creates a file locally with "3" x 5 MB chunks
+ And user "user0" creates a new chunking v2 upload with id "chunking-42" and destination "/üäöé/äöü.txt"
+ And user "user0" uploads new chunk v2 file "1" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "2" to id "chunking-42"
+ And user "user0" uploads new chunk v2 file "3" to id "chunking-42"
+ And user "user0" moves new chunk v2 file with id "chunking-42"
+ Then the S3 multipart upload was successful with status "201"
+ When As an "user0"
+ And Downloading file "/üäöé/äöü.txt"
+ Then Downloaded content should be the created file
return promise
},
- _simpleCall: function(method, path) {
+ _simpleCall: function(method, path, headers) {
if (!path) {
throw 'Missing argument "path"'
}
this._client.request(
method,
- this._buildUrl(path)
+ this._buildUrl(path),
+ headers ? headers : {}
).then(
function(result) {
if (self._isSuccessStatus(result.status)) {
*
* @returns {Promise}
*/
- createDirectory: function(path) {
- return this._simpleCall('MKCOL', path)
+ createDirectory: function(path, headers) {
+ return this._simpleCall('MKCOL', path, headers)
},
/**
'OCP\\Files\\Notify\\INotifyHandler' => $baseDir . '/lib/public/Files/Notify/INotifyHandler.php',
'OCP\\Files\\Notify\\IRenameChange' => $baseDir . '/lib/public/Files/Notify/IRenameChange.php',
'OCP\\Files\\ObjectStore\\IObjectStore' => $baseDir . '/lib/public/Files/ObjectStore/IObjectStore.php',
+ 'OCP\\Files\\ObjectStore\\IObjectStoreMultiPartUpload' => $baseDir . '/lib/public/Files/ObjectStore/IObjectStoreMultiPartUpload.php',
'OCP\\Files\\ReservedWordException' => $baseDir . '/lib/public/Files/ReservedWordException.php',
'OCP\\Files\\Search\\ISearchBinaryOperator' => $baseDir . '/lib/public/Files/Search/ISearchBinaryOperator.php',
'OCP\\Files\\Search\\ISearchComparison' => $baseDir . '/lib/public/Files/Search/ISearchComparison.php',
'OCP\\Files\\StorageInvalidException' => $baseDir . '/lib/public/Files/StorageInvalidException.php',
'OCP\\Files\\StorageNotAvailableException' => $baseDir . '/lib/public/Files/StorageNotAvailableException.php',
'OCP\\Files\\StorageTimeoutException' => $baseDir . '/lib/public/Files/StorageTimeoutException.php',
+ 'OCP\\Files\\Storage\\IChunkedFileWrite' => $baseDir . '/lib/public/Files/Storage/IChunkedFileWrite.php',
'OCP\\Files\\Storage\\IDisableEncryptionStorage' => $baseDir . '/lib/public/Files/Storage/IDisableEncryptionStorage.php',
'OCP\\Files\\Storage\\ILockingStorage' => $baseDir . '/lib/public/Files/Storage/ILockingStorage.php',
'OCP\\Files\\Storage\\INotifyStorage' => $baseDir . '/lib/public/Files/Storage/INotifyStorage.php',
'OCP\\Files\\Notify\\INotifyHandler' => __DIR__ . '/../../..' . '/lib/public/Files/Notify/INotifyHandler.php',
'OCP\\Files\\Notify\\IRenameChange' => __DIR__ . '/../../..' . '/lib/public/Files/Notify/IRenameChange.php',
'OCP\\Files\\ObjectStore\\IObjectStore' => __DIR__ . '/../../..' . '/lib/public/Files/ObjectStore/IObjectStore.php',
+ 'OCP\\Files\\ObjectStore\\IObjectStoreMultiPartUpload' => __DIR__ . '/../../..' . '/lib/public/Files/ObjectStore/IObjectStoreMultiPartUpload.php',
'OCP\\Files\\ReservedWordException' => __DIR__ . '/../../..' . '/lib/public/Files/ReservedWordException.php',
'OCP\\Files\\Search\\ISearchBinaryOperator' => __DIR__ . '/../../..' . '/lib/public/Files/Search/ISearchBinaryOperator.php',
'OCP\\Files\\Search\\ISearchComparison' => __DIR__ . '/../../..' . '/lib/public/Files/Search/ISearchComparison.php',
'OCP\\Files\\StorageInvalidException' => __DIR__ . '/../../..' . '/lib/public/Files/StorageInvalidException.php',
'OCP\\Files\\StorageNotAvailableException' => __DIR__ . '/../../..' . '/lib/public/Files/StorageNotAvailableException.php',
'OCP\\Files\\StorageTimeoutException' => __DIR__ . '/../../..' . '/lib/public/Files/StorageTimeoutException.php',
+ 'OCP\\Files\\Storage\\IChunkedFileWrite' => __DIR__ . '/../../..' . '/lib/public/Files/Storage/IChunkedFileWrite.php',
'OCP\\Files\\Storage\\IDisableEncryptionStorage' => __DIR__ . '/../../..' . '/lib/public/Files/Storage/IDisableEncryptionStorage.php',
'OCP\\Files\\Storage\\ILockingStorage' => __DIR__ . '/../../..' . '/lib/public/Files/Storage/ILockingStorage.php',
'OCP\\Files\\Storage\\INotifyStorage' => __DIR__ . '/../../..' . '/lib/public/Files/Storage/INotifyStorage.php',
*/
namespace OC\Files\ObjectStore;
+use Aws\S3\Exception\S3Exception;
+use Aws\S3\Exception\S3MultipartUploadException;
use Icewind\Streams\CallbackWrapper;
use Icewind\Streams\CountWrapper;
use Icewind\Streams\IteratorDirectory;
use OC\Files\Storage\PolyFill\CopyDirectory;
use OCP\Files\Cache\ICacheEntry;
use OCP\Files\FileInfo;
+use OCP\Files\GenericFileException;
use OCP\Files\NotFoundException;
use OCP\Files\ObjectStore\IObjectStore;
+use OCP\Files\ObjectStore\IObjectStoreMultiPartUpload;
+use OCP\Files\Storage\IChunkedFileWrite;
use OCP\Files\Storage\IStorage;
-class ObjectStoreStorage extends \OC\Files\Storage\Common {
+class ObjectStoreStorage extends \OC\Files\Storage\Common implements IChunkedFileWrite {
use CopyDirectory;
/**
public function mkdir($path) {
$path = $this->normalizePath($path);
-
if ($this->file_exists($path)) {
return false;
}
throw $e;
}
}
+
+ public function startChunkedWrite(string $targetPath): string {
+ if (!$this->objectStore instanceof IObjectStoreMultiPartUpload) {
+ throw new GenericFileException('Object store does not support multipart upload');
+ }
+ $cacheEntry = $this->getCache()->get($targetPath);
+ $urn = $this->getURN($cacheEntry->getId());
+ return $this->objectStore->initiateMultipartUpload($urn);
+ }
+
+ /**
+ *
+ * @throws GenericFileException
+ */
+ public function putChunkedWritePart(string $targetPath, string $writeToken, string $chunkId, $data, $size = null): ?array {
+ if (!$this->objectStore instanceof IObjectStoreMultiPartUpload) {
+ throw new GenericFileException('Object store does not support multipart upload');
+ }
+ $cacheEntry = $this->getCache()->get($targetPath);
+ $urn = $this->getURN($cacheEntry->getId());
+
+ $result = $this->objectStore->uploadMultipartPart($urn, $writeToken, (int)$chunkId, $data, $size);
+
+ $parts[$chunkId] = [
+ 'PartNumber' => $chunkId,
+ 'ETag' => trim($result->get('ETag'), '"')
+ ];
+ return $parts[$chunkId];
+ }
+
+ public function completeChunkedWrite(string $targetPath, string $writeToken): int {
+ if (!$this->objectStore instanceof IObjectStoreMultiPartUpload) {
+ throw new GenericFileException('Object store does not support multipart upload');
+ }
+ $cacheEntry = $this->getCache()->get($targetPath);
+ $urn = $this->getURN($cacheEntry->getId());
+ $parts = $this->objectStore->getMultipartUploads($urn, $writeToken);
+ $sortedParts = array_values($parts);
+ sort($sortedParts);
+ try {
+ $size = $this->objectStore->completeMultipartUpload($urn, $writeToken, $sortedParts);
+ $stat = $this->stat($targetPath);
+ $mtime = time();
+ if (is_array($stat)) {
+ $stat['size'] = $size;
+ $stat['mtime'] = $mtime;
+ $stat['mimetype'] = $this->getMimeType($targetPath);
+ $this->getCache()->update($stat['fileid'], $stat);
+ }
+ } catch (S3MultipartUploadException | S3Exception $e) {
+ $this->objectStore->abortMultipartUpload($urn, $writeToken);
+ $this->logger->logException($e, [
+ 'app' => 'objectstore',
+ 'message' => 'Could not compete multipart upload ' . $urn. ' with uploadId ' . $writeToken
+ ]);
+ throw new GenericFileException('Could not write chunked file');
+ }
+ return $size;
+ }
+
+ public function cancelChunkedWrite(string $targetPath, string $writeToken): void {
+ if (!$this->objectStore instanceof IObjectStoreMultiPartUpload) {
+ throw new GenericFileException('Object store does not support multipart upload');
+ }
+ $cacheEntry = $this->getCache()->get($targetPath);
+ $urn = $this->getURN($cacheEntry->getId());
+ $this->objectStore->abortMultipartUpload($urn, $writeToken);
+ }
}
*/
namespace OC\Files\ObjectStore;
+use Aws\Result;
+use Exception;
use OCP\Files\ObjectStore\IObjectStore;
+use OCP\Files\ObjectStore\IObjectStoreMultiPartUpload;
-class S3 implements IObjectStore {
+class S3 implements IObjectStore, IObjectStoreMultiPartUpload {
use S3ConnectionTrait;
use S3ObjectTrait;
public function getStorageId() {
return $this->id;
}
+
+ public function initiateMultipartUpload(string $urn): string {
+ $upload = $this->getConnection()->createMultipartUpload([
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ ]);
+ $uploadId = $upload->get('UploadId');
+ if ($uploadId === null) {
+ throw new Exception('No upload id returned');
+ }
+ return (string)$uploadId;
+ }
+
+ public function uploadMultipartPart(string $urn, string $uploadId, int $partId, $stream, $size): Result {
+ return $this->getConnection()->uploadPart([
+ 'Body' => $stream,
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ 'ContentLength' => $size,
+ 'PartNumber' => $partId,
+ 'UploadId' => $uploadId,
+ ]);
+ }
+
+ public function getMultipartUploads(string $urn, string $uploadId): array {
+ $parts = $this->getConnection()->listParts([
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ 'UploadId' => $uploadId,
+ 'MaxParts' => 10000
+ ]);
+ return $parts->get('Parts') ?? [];
+ }
+
+ public function completeMultipartUpload(string $urn, string $uploadId, array $result): int {
+ $this->getConnection()->completeMultipartUpload([
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ 'UploadId' => $uploadId,
+ 'MultipartUpload' => ['Parts' => $result],
+ ]);
+ $stat = $this->getConnection()->headObject([
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ ]);
+ return (int)$stat->get('ContentLength');
+ }
+
+ public function abortMultipartUpload($urn, $uploadId): void {
+ $this->getConnection()->abortMultipartUpload([
+ 'Bucket' => $this->bucket,
+ 'Key' => $urn,
+ 'UploadId' => $uploadId
+ ]);
+ }
}
--- /dev/null
+<?php
+/*
+ * @copyright Copyright (c) 2021 Julius Härtl <jus@bitgrid.net>
+ *
+ * @author Julius Härtl <jus@bitgrid.net>
+ *
+ * @license GNU AGPL version 3 or any later version
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+declare(strict_types=1);
+
+
+namespace OCP\Files\ObjectStore;
+
+use Aws\Result;
+
+/**
+ * @since 26.0.0
+ */
+interface IObjectStoreMultiPartUpload {
+ /**
+ * @since 26.0.0
+ */
+ public function initiateMultipartUpload(string $urn): string;
+
+ /**
+ * @since 26.0.0
+ */
+ public function uploadMultipartPart(string $urn, string $uploadId, int $partId, $stream, $size): Result;
+
+ /**
+ * @since 26.0.0
+ */
+ public function completeMultipartUpload(string $urn, string $uploadId, array $result): int;
+
+ /**
+ * @since 26.0.0
+ */
+ public function abortMultipartUpload(string $urn, string $uploadId): void;
+
+ /**
+ * @since 26.0.0
+ */
+ public function getMultipartUploads(string $urn, string $uploadId): array;
+}
--- /dev/null
+<?php
+/*
+ * @copyright Copyright (c) 2021 Julius Härtl <jus@bitgrid.net>
+ *
+ * @author Julius Härtl <jus@bitgrid.net>
+ *
+ * @license GNU AGPL version 3 or any later version
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+declare(strict_types=1);
+
+
+namespace OCP\Files\Storage;
+
+use OCP\Files\GenericFileException;
+
+/**
+ * @since 26.0.0
+ */
+interface IChunkedFileWrite extends IStorage {
+ /**
+ * @param string $targetPath Relative target path in the storage
+ * @return string writeToken to be used with the other methods to uniquely identify the file write operation
+ * @throws GenericFileException
+ * @since 26.0.0
+ */
+ public function startChunkedWrite(string $targetPath): string;
+
+ /**
+ * @param string $targetPath
+ * @param string $writeToken
+ * @param string $chunkId
+ * @param resource $data
+ * @param int|null $size
+ * @throws GenericFileException
+ * @since 26.0.0
+ */
+ public function putChunkedWritePart(string $targetPath, string $writeToken, string $chunkId, $data, int $size = null): ?array;
+
+ /**
+ * @param string $targetPath
+ * @param string $writeToken
+ * @return int
+ * @throws GenericFileException
+ * @since 26.0.0
+ */
+ public function completeChunkedWrite(string $targetPath, string $writeToken): int;
+
+ /**
+ * @param string $targetPath
+ * @param string $writeToken
+ * @throws GenericFileException
+ * @since 26.0.0
+ */
+ public function cancelChunkedWrite(string $targetPath, string $writeToken): void;
+}