</properties>
<dependencies>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-policies</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva.redback.components.registry</groupId>
<artifactId>spring-registry-api</artifactId>
import org.apache.archiva.repository.ContentNotFoundException;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.events.RepositoryListener;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
log.error( "Error during metadata retrieval {}: {}", metaBaseId, e.getMessage( ) );
}
}
- Path artifactFile = repository.toFile( reference );
+ StorageAsset artifactFile = repository.toFile( reference );
for ( RepositoryListener listener : listeners )
{
listener.deleteArtifact( metadataRepository, repository.getId( ), reference.getGroupId( ),
reference.getArtifactId( ), reference.getVersion( ),
- artifactFile.getFileName( ).toString( ) );
+ artifactFile.getName( ));
}
try
{
- Files.delete( artifactFile );
- log.debug( "File deleted: {}", artifactFile.toAbsolutePath( ) );
+ artifactFile.getStorage().removeAsset(artifactFile);
+ log.debug( "File deleted: {}", artifactFile );
}
catch ( IOException e )
{
- log.error( "Could not delete file {}: {}", artifactFile.toAbsolutePath( ), e.getMessage( ), e );
+ log.error( "Could not delete file {}: {}", artifactFile.toString(), e.getMessage( ), e );
continue;
}
try
}
}
- private void deleteSilently( Path path )
+ private void deleteSilently( StorageAsset path )
{
try
{
- Files.deleteIfExists( path );
+ path.getStorage().removeAsset(path);
triggerAuditEvent( repository.getRepository( ).getId( ), path.toString( ), AuditEvent.PURGE_FILE );
}
catch ( IOException e )
*
* @param artifactFile the file to base off of.
*/
- private void purgeSupportFiles( Path artifactFile )
+ private void purgeSupportFiles( StorageAsset artifactFile )
{
- Path parentDir = artifactFile.getParent( );
+ StorageAsset parentDir = artifactFile.getParent( );
- if ( !Files.exists( parentDir ) )
+ if ( !parentDir.exists() )
{
return;
}
- final String artifactName = artifactFile.getFileName( ).toString( );
+ final String artifactName = artifactFile.getName( );
try
{
- Files.find( parentDir, 3,
- ( path, basicFileAttributes ) -> path.getFileName( ).toString( ).startsWith( artifactName )
- && Files.isRegularFile( path ) ).forEach( this::deleteSilently );
+
+ StorageUtil.recurse(parentDir, a -> {
+ if (!artifactFile.isContainer() && artifactFile.getName().startsWith(artifactName)) deleteSilently(a);
+ }, true, 3 );
}
catch ( IOException e )
{
import org.apache.archiva.repository.LayoutException;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.events.RepositoryListener;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.time.DateUtils;
import java.io.IOException;
artifactFile.toAbsolutePath( ).toString() );
newArtifactReference.setVersion( version );
- Path newArtifactFile = repository.toFile( newArtifactReference );
+ StorageAsset newArtifactFile = repository.toFile( newArtifactReference );
// Is this a generic snapshot "1.0-SNAPSHOT" ?
if ( VersionUtil.isGenericSnapshot( newArtifactReference.getVersion( ) ) )
{
- if ( Files.getLastModifiedTime( newArtifactFile ).toMillis() < olderThanThisDate.getTimeInMillis( ) )
+ if ( newArtifactFile.getModificationTime().toEpochMilli() < olderThanThisDate.getTimeInMillis( ) )
{
artifactsToDelete.addAll( repository.getRelatedArtifacts( newArtifactReference ) );
}
}
purge( artifactsToDelete );
}
- catch ( ContentNotFoundException | IOException e )
+ catch ( ContentNotFoundException e )
{
throw new RepositoryPurgeException( e.getMessage( ), e );
}
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-common</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-checksum</artifactId>
</exclusion>
</exclusions>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-fs</artifactId>
+ <scope>test</scope>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-test-utils</artifactId>
*/
import org.apache.archiva.common.utils.VersionUtil;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Calendar;
-import java.util.Date;
import java.util.List;
import java.util.Properties;
*/
import org.apache.archiva.policies.urlcache.UrlFailureCache;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.archiva.checksum.ChecksumAlgorithm;
import org.apache.archiva.checksum.ChecksummedFile;
import org.apache.archiva.checksum.UpdateStatus;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* under the License.
*/
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.util.Map;
import java.util.Properties;
* under the License.
*/
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.util.Properties;
* under the License.
*/
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
* under the License.
*/
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.springframework.stereotype.Service;
-import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
*/
import junit.framework.TestCase;
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.policies.urlcache.UrlFailureCache;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.junit.Test;
import org.junit.runner.RunWith;
import javax.inject.Inject;
import javax.inject.Named;
+import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
@Inject
private UrlFailureCache urlFailureCache;
+ private FilesystemStorage filesystemStorage;
+
@Inject
@Named( value = "preDownloadPolicy#cache-failures" )
DownloadPolicy downloadPolicy;
return downloadPolicy;
}
- private Path getFile()
- {
- return Paths.get( "target/cache-failures/" + getName() + ".txt" );
+ private StorageAsset getFile() throws IOException {
+ if (filesystemStorage==null) {
+ filesystemStorage = new FilesystemStorage(Paths.get("target/cache-failures"), new DefaultFileLockManager());
+ }
+ return filesystemStorage.getAsset( getName() + ".txt" );
}
private Properties createRequest()
throws Exception
{
DownloadPolicy policy = lookupPolicy();
- Path localFile = getFile();
+ StorageAsset localFile = getFile();
Properties request = createRequest();
request.setProperty( "url", "http://a.bad.hostname.maven.org/path/to/resource.txt" );
{
DownloadPolicy policy = lookupPolicy();
- Path localFile = getFile();
+ StorageAsset localFile = getFile();
Properties request = createRequest();
// make unique name
String url = "http://a.bad.hostname.maven.org/path/to/resource"+ System.currentTimeMillis() +".txt";
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.apache.commons.io.FileUtils;
import org.junit.Rule;
import javax.inject.Named;
import java.io.BufferedReader;
import java.io.FileReader;
+import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
private static final String BAD = "bad";
+ private static FilesystemStorage filesystemStorage;
+
@Inject
@Named( value = "postDownloadPolicy#checksum" )
PostDownloadPolicy downloadPolicy;
throws Exception
{
PostDownloadPolicy policy = lookupPolicy();
- Path localFile = createTestableFiles( null, null );
+ StorageAsset localFile = createTestableFiles( null, null );
Properties request = createRequest();
policy.applyPolicy( ChecksumPolicy.IGNORE, request, localFile );
throws Exception
{
PostDownloadPolicy policy = lookupPolicy();
- Path localFile = createTestableFiles( md5State, sha1State );
+ StorageAsset localFile = createTestableFiles( md5State, sha1State );
Properties request = createRequest();
boolean actualResult;
actualResult = false;
String msg = createMessage( ChecksumPolicy.FAIL, md5State, sha1State );
- assertFalse( msg + " local file should not exist:", Files.exists(localFile) );
- Path md5File = localFile.toAbsolutePath().resolveSibling( localFile.getFileName() + ".sha1" );
- Path sha1File = localFile.toAbsolutePath().resolveSibling( localFile.getFileName() + ".md5" );
+ assertFalse( msg + " local file should not exist:", localFile.exists() );
+ Path md5File = localFile.getFilePath().toAbsolutePath().resolveSibling( localFile.getName() + ".sha1" );
+ Path sha1File = localFile.getFilePath().toAbsolutePath().resolveSibling( localFile.getName() + ".md5" );
assertFalse( msg + " local md5 file should not exist:", Files.exists(md5File) );
assertFalse( msg + " local sha1 file should not exist:", Files.exists(sha1File) );
}
throws Exception
{
PostDownloadPolicy policy = lookupPolicy();
- Path localFile = createTestableFiles( md5State, sha1State );
+ StorageAsset localFile = createTestableFiles( md5State, sha1State );
Properties request = createRequest();
boolean actualResult;
assertEquals( createMessage( ChecksumPolicy.FIX, md5State, sha1State ), expectedResult, actualResult );
// End result should be legitimate SHA1 and MD5 files.
- Path md5File = localFile.toAbsolutePath().resolveSibling( localFile.getFileName() + ".md5" );
- Path sha1File = localFile.toAbsolutePath().resolveSibling( localFile.getFileName() + ".sha1" );
+ Path md5File = localFile.getFilePath().toAbsolutePath().resolveSibling( localFile.getName() + ".md5" );
+ Path sha1File = localFile.getFilePath().toAbsolutePath().resolveSibling( localFile.getName() + ".sha1" );
assertTrue( "ChecksumPolicy.apply(FIX) md5 should exist.", Files.exists(md5File) && Files.isRegularFile(md5File) );
assertTrue( "ChecksumPolicy.apply(FIX) sha1 should exist.", Files.exists(sha1File) && Files.isRegularFile(sha1File) );
return request;
}
- private Path createTestableFiles( String md5State, String sha1State )
+ private StorageAsset createTestableFiles(String md5State, String sha1State )
throws Exception
{
- Path sourceDir = getTestFile( "src/test/resources/checksums/" );
- Path destDir = getTestFile( "target/checksum-tests/" + name.getMethodName() + "/" );
+ FilesystemStorage fs = new FilesystemStorage(Paths.get("target/checksum-tests"), new DefaultFileLockManager());
+ StorageAsset sourceDir = getTestFile( "src/test/resources/checksums/" );
+ StorageAsset destDir = getTestFile( "target/checksum-tests/" + name.getMethodName() + "/" );
- FileUtils.copyFileToDirectory( sourceDir.resolve("artifact.jar" ).toFile(), destDir.toFile() );
+ FileUtils.copyFileToDirectory( sourceDir.getFilePath().resolve("artifact.jar" ).toFile(), destDir.getFilePath().toFile() );
if ( md5State != null )
{
- Path md5File = sourceDir.resolve("artifact.jar.md5-" + md5State );
+ Path md5File = sourceDir.getFilePath().resolve("artifact.jar.md5-" + md5State );
assertTrue( "Testable file exists: " + md5File.getFileName() + ":", Files.exists(md5File) && Files.isRegularFile(md5File) );
- Path destFile = destDir.resolve("artifact.jar.md5" );
+ Path destFile = destDir.getFilePath().resolve("artifact.jar.md5" );
FileUtils.copyFile( md5File.toFile(), destFile.toFile() );
}
if ( sha1State != null )
{
- Path sha1File = sourceDir.resolve("artifact.jar.sha1-" + sha1State );
+ Path sha1File = sourceDir.getFilePath().resolve("artifact.jar.sha1-" + sha1State );
assertTrue( "Testable file exists: " + sha1File.getFileName() + ":", Files.exists(sha1File) && Files.isRegularFile(sha1File) );
- Path destFile = destDir.resolve("artifact.jar.sha1" );
+ Path destFile = destDir.getFilePath().resolve("artifact.jar.sha1" );
FileUtils.copyFile( sha1File.toFile(), destFile.toFile() );
}
- Path localFile = destDir.resolve("artifact.jar" );
- return localFile;
+
+ StorageAsset localAsset = fs.getAsset("artifact.jar");
+ return localAsset;
}
- public static Path getTestFile( String path )
- {
- return Paths.get( org.apache.archiva.common.utils.FileUtils.getBasedir(), path );
+ public static StorageAsset getTestFile( String path ) throws IOException {
+ if (filesystemStorage==null) {
+ filesystemStorage = new FilesystemStorage(Paths.get(org.apache.archiva.common.utils.FileUtils.getBasedir()), new DefaultFileLockManager());
+ }
+ return filesystemStorage.getAsset( path );
}
}
*/
import junit.framework.TestCase;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.junit.Before;
import org.junit.Test;
request.setProperty( "version", "2.0" );
}
- Path targetDir = ChecksumPolicyTest.getTestFile( "target/test-policy/" );
- Path localFile = targetDir.resolve( path );
+ StorageAsset targetDir = ChecksumPolicyTest.getTestFile( "target/test-policy/" );
+ StorageAsset localFile = targetDir.resolve( path );
- Files.deleteIfExists( localFile );
+ Files.deleteIfExists( localFile.getFilePath() );
if ( createLocalFile )
{
- Files.createDirectories( localFile.getParent());
- org.apache.archiva.common.utils.FileUtils.writeStringToFile( localFile, FILE_ENCODING, "random-junk" );
- Files.setLastModifiedTime( localFile,
- FileTime.fromMillis(Files.getLastModifiedTime(localFile).toMillis() - generatedLocalFileUpdateDelta));
+ Files.createDirectories( localFile.getParent().getFilePath());
+ org.apache.archiva.common.utils.FileUtils.writeStringToFile( localFile.getFilePath(), FILE_ENCODING, "random-junk" );
+ Files.setLastModifiedTime( localFile.getFilePath(),
+ FileTime.fromMillis(Files.getLastModifiedTime(localFile.getFilePath()).toMillis() - generatedLocalFileUpdateDelta));
}
policy.applyPolicy( setting, request, localFile );
*/
import junit.framework.TestCase;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.junit.Before;
import org.junit.Test;
@Inject @Named(value="preDownloadPolicy#snapshots")
PreDownloadPolicy policy;
+ private FilesystemStorage filesystemStorage;
+
private PreDownloadPolicy lookupPolicy()
throws Exception
{
request.setProperty( "version", "2.0" );
}
- Path targetDir = ChecksumPolicyTest.getTestFile( "target/test-policy/" );
- Path localFile = targetDir.resolve( path );
+ StorageAsset targetDir = ChecksumPolicyTest.getTestFile( "target/test-policy/" );
+ StorageAsset localFile = targetDir.resolve( path );
- Files.deleteIfExists( localFile );
+ Files.deleteIfExists( localFile.getFilePath() );
if ( createLocalFile )
{
- Files.createDirectories( localFile.getParent());
- org.apache.archiva.common.utils.FileUtils.writeStringToFile( localFile, FILE_ENCODING, "random-junk" );
- Files.setLastModifiedTime( localFile,
- FileTime.fromMillis( Files.getLastModifiedTime( localFile ).toMillis() - generatedLocalFileUpdateDelta ));
+ Files.createDirectories( localFile.getParent().getFilePath() );
+ org.apache.archiva.common.utils.FileUtils.writeStringToFile( localFile.getFilePath(), FILE_ENCODING, "random-junk" );
+ Files.setLastModifiedTime( localFile.getFilePath(),
+ FileTime.fromMillis( Files.getLastModifiedTime( localFile.getFilePath() ).toMillis() - generatedLocalFileUpdateDelta ));
}
policy.applyPolicy( setting, request, localFile );
*/
-import org.apache.archiva.repository.content.StorageAsset;
-
-import java.nio.file.Path;
+import org.apache.archiva.repository.storage.StorageAsset;
/**
* A result from a proxy fetch operation.
import org.apache.archiva.policies.ProxyDownloadException;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.RepositoryType;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.util.List;
import java.util.Map;
import org.apache.archiva.checksum.ChecksumAlgorithm;
import org.apache.archiva.checksum.ChecksumUtil;
import org.apache.archiva.proxy.model.ProxyConnectorRuleType;
-import org.apache.archiva.common.filelock.FileLockException;
import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.common.filelock.FileLockTimeoutException;
-import org.apache.archiva.common.filelock.Lock;
import org.apache.archiva.configuration.*;
import org.apache.archiva.model.ArtifactReference;
import org.apache.archiva.model.Keys;
import org.apache.archiva.redback.components.registry.RegistryListener;
import org.apache.archiva.redback.components.taskqueue.TaskQueueException;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.FilesystemStorage;
-import org.apache.archiva.repository.content.StorageAsset;
-import org.apache.archiva.repository.content.StorageUtil;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.repository.metadata.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import org.apache.archiva.scheduler.ArchivaTaskScheduler;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.archiva.admin.model.AuditInformation;
import org.apache.archiva.admin.model.RepositoryAdminException;
import org.apache.archiva.admin.model.beans.RepositoryGroup;
+import org.apache.archiva.repository.storage.StorageAsset;
-import java.nio.file.Path;
import java.util.List;
import java.util.Map;
Map<String, List<String>> getRepositoryToGroupMap()
throws RepositoryAdminException;
- Path getMergedIndexDirectory(String repositoryGroupId );
+ StorageAsset getMergedIndexDirectory(String repositoryGroupId );
}
import org.apache.archiva.configuration.RepositoryGroupConfiguration;
import org.apache.archiva.metadata.model.facets.AuditEvent;
import org.apache.archiva.indexer.merger.MergedRemoteIndexesScheduler;
-import org.apache.archiva.repository.EditableRepository;
import org.apache.archiva.repository.EditableRepositoryGroup;
import org.apache.archiva.repository.RepositoryException;
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.features.IndexCreationFeature;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Override
- public Path getMergedIndexDirectory( String repositoryGroupId )
+ public StorageAsset getMergedIndexDirectory(String repositoryGroupId )
{
- return groupsDirectory.resolve( repositoryGroupId );
+ org.apache.archiva.repository.RepositoryGroup group = repositoryRegistry.getRepositoryGroup(repositoryGroupId);
+ if (group!=null) {
+ return group.getFeature(IndexCreationFeature.class).get().getLocalIndexPath();
+ } else {
+ return null;
+ }
}
@Override
* under the License.
*/
-import org.apache.archiva.admin.model.RepositoryAdminException;
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.configuration.ArchivaConfiguration;
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryType;
import org.apache.archiva.repository.UnsupportedRepositoryTypeException;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
private Path getIndexPath( ArchivaIndexingContext ctx )
{
- return PathUtil.getPathFromUri( ctx.getPath( ) );
+ return ctx.getPath( ).getFilePath();
}
@FunctionalInterface
@Override
public void addArtifactsToIndex( final ArchivaIndexingContext context, final Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.addArtifactsToIndex(artifacts, indexingContext);
} catch (IOException e) {
@Override
public void removeArtifactsFromIndex( ArchivaIndexingContext context, Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.deleteArtifactsFromIndex(artifacts, indexingContext);
} catch (IOException e) {
log.warn("Index close failed");
}
try {
- FileUtils.deleteDirectory(Paths.get(context.getPath()));
+ StorageUtil.deleteRecursively(context.getPath());
} catch (IOException e) {
throw new IndexUpdateFailedException("Could not delete index files");
}
URI indexDir = icf.getIndexPath();
String indexPath = indexDir.getPath();
Path indexDirectory = null;
+ FilesystemStorage filesystemStorage = (FilesystemStorage) repo.getAsset("").getStorage();
if ( ! StringUtils.isEmpty(indexDir.toString( ) ) )
{
if ( indexDirectory.isAbsolute( ) )
{
indexPath = indexDirectory.getFileName().toString();
+ filesystemStorage = new FilesystemStorage(indexDirectory.getParent(), new DefaultFileLockManager());
}
else
{
{
Files.createDirectories( indexDirectory );
}
- return new FilesystemAsset( indexPath, indexDirectory);
+ return new FilesystemAsset( filesystemStorage, indexPath, indexDirectory);
}
private IndexingContext createRemoteContext(RemoteRepository remoteRepository ) throws IOException
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.indexer.ArchivaIndexingContext;
import org.apache.archiva.repository.Repository;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.index.context.IndexingContext;
import java.io.IOException;
private IndexingContext delegate;
private Repository repository;
+ private FilesystemStorage filesystemStorage;
MavenIndexContextMock(Repository repository, IndexingContext delegate) {
this.delegate = delegate;
this.repository = repository;
+ try {
+ this.filesystemStorage = new FilesystemStorage(delegate.getIndexDirectoryFile().toPath(), new DefaultFileLockManager());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
}
}
@Override
- public URI getPath() {
- return delegate.getIndexDirectoryFile().toURI();
+ public StorageAsset getPath() {
+ return
+ new FilesystemAsset(filesystemStorage, "", delegate.getIndexDirectoryFile().toPath());
}
@Override
import org.apache.archiva.indexer.merger.MergedRemoteIndexesScheduler;
import org.apache.archiva.repository.RepositoryGroup;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.springframework.stereotype.Service;
-import java.nio.file.Path;
-
/**
* @author Olivier Lamy
*/
{
@Override
- public void schedule( RepositoryGroup repositoryGroup, Path directory )
+ public void schedule(RepositoryGroup repositoryGroup, StorageAsset directory )
{
// no op
}
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-common</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
<dependency>
<groupId>commons-lang</groupId>
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryType;
-import org.apache.archiva.repository.content.StorageAsset;
import java.net.URI;
-import java.nio.file.Path;
import java.util.Collection;
import java.util.List;
*/
import org.apache.archiva.repository.Repository;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.io.IOException;
import java.net.URI;
* The path where the index is stored.
* @return
*/
- URI getPath();
+ StorageAsset getPath();
/**
* Returns true, if the index has no entries or is not initialized.
* under the License.
*/
+import org.apache.archiva.repository.storage.StorageAsset;
+
import java.nio.file.Path;
import java.util.Collection;
private int mergedIndexTtl;
- private Path mergedIndexDirectory;
+ private StorageAsset mergedIndexDirectory;
private boolean temporary;
this.mergedIndexTtl = mergedIndexTtl;
}
- public Path getMergedIndexDirectory()
+ public StorageAsset getMergedIndexDirectory()
{
return mergedIndexDirectory;
}
- public void setMergedIndexDirectory( Path mergedIndexDirectory )
+ public void setMergedIndexDirectory( StorageAsset mergedIndexDirectory )
{
this.mergedIndexDirectory = mergedIndexDirectory;
}
- public IndexMergerRequest mergedIndexDirectory( Path mergedIndexDirectory )
+ public IndexMergerRequest mergedIndexDirectory( StorageAsset mergedIndexDirectory )
{
this.mergedIndexDirectory = mergedIndexDirectory;
return this;
import org.apache.archiva.repository.RepositoryGroup;
-
-import java.nio.file.Path;
+import org.apache.archiva.repository.storage.StorageAsset;
/**
* @author Olivier Lamy
* will check if this repository group need to a schedule a cron to download/merge
* remote indexes
* @param repositoryGroup
+ * @param directory
*/
- void schedule(RepositoryGroup repositoryGroup, Path directory );
+ void schedule(RepositoryGroup repositoryGroup, StorageAsset directory );
void unschedule( RepositoryGroup repositoryGroup );
* under the License.
*/
+import org.apache.archiva.repository.storage.StorageAsset;
+
import java.io.Serializable;
import java.nio.file.Path;
import java.util.Date;
{
private long creationTime = new Date().getTime();
- private Path directory;
+ private StorageAsset directory;
private String indexId;
private int mergedIndexTtl;
- public TemporaryGroupIndex(Path directory, String indexId, String groupId, int mergedIndexTtl)
+ public TemporaryGroupIndex(StorageAsset directory, String indexId, String groupId, int mergedIndexTtl)
{
this.directory = directory;
this.indexId = indexId;
return this;
}
- public Path getDirectory()
+ public StorageAsset getDirectory()
{
return directory;
}
- public TemporaryGroupIndex setDirectory( Path directory )
+ public TemporaryGroupIndex setDirectory( StorageAsset directory )
{
this.directory = directory;
return this;
*/
-import org.apache.archiva.repository.content.RepositoryStorage;
-
import java.util.Set;
/**
import org.apache.archiva.model.ArtifactReference;
import org.apache.archiva.model.ProjectReference;
import org.apache.archiva.model.VersionedReference;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.util.Set;
*/
import org.apache.archiva.indexer.ArchivaIndexingContext;
-import org.apache.archiva.repository.content.RepositoryStorage;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.archiva.repository.features.RepositoryFeature;
import java.net.URI;
* under the License.
*/
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import java.util.List;
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.CopyOption;
-import java.util.function.Consumer;
-
-/**
- * Repository storage gives access to the files and directories on the storage.
- * The storage may be on a filesystem but can be any other storage system.
- *
- * This API is low level repository access. If you use this API you must
- * either have knowledge about the specific repository layout or use the structure
- * as it is, e.g. for browsing.
- *
- * It is the decision of the implementation, if this API provides access to all elements, or
- * just a selected view.
- *
- * Checking access is not part of this API.
- */
-public interface RepositoryStorage {
- /**
- * Returns information about a specific storage asset.
- * @param path
- * @return
- */
- StorageAsset getAsset(String path);
-
- /**
- * Consumes the data and sets a lock for the file during the operation.
- *
- * @param asset The asset from which the data is consumed.
- * @param consumerFunction The consumer that reads the data
- * @param readLock If true, a read lock is acquired on the asset.
- * @throws IOException
- */
- void consumeData(StorageAsset asset, Consumer<InputStream> consumerFunction, boolean readLock) throws IOException;
-
- /**
- * Consumes the data and sets a lock for the file during the operation.
- *
- * @param asset The asset from which the data is consumed.
- * @param consumerFunction The consumer that reads the data
- * @param readLock If true, a read lock is acquired on the asset.
- * @throws IOException
- */
- void consumeDataFromChannel( StorageAsset asset, Consumer<ReadableByteChannel> consumerFunction, boolean readLock) throws IOException;
-
- /**
- * Writes data to the asset using a write lock.
- *
- * @param asset The asset to which the data is written.
- * @param consumerFunction The function that provides the data.
- * @param writeLock If true, a write lock is acquired on the destination.
- */
- void writeData( StorageAsset asset, Consumer<OutputStream> consumerFunction, boolean writeLock) throws IOException;;
-
- /**
- * Writes data and sets a lock during the operation.
- *
- * @param asset The asset to which the data is written.
- * @param consumerFunction The function that provides the data.
- * @param writeLock If true, a write lock is acquired on the destination.
- * @throws IOException
- */
- void writeDataToChannel( StorageAsset asset, Consumer<WritableByteChannel> consumerFunction, boolean writeLock) throws IOException;
-
- /**
- * Adds a new asset to the underlying storage.
- * @param path The path to the asset.
- * @param container True, if the asset should be a container, false, if it is a file.
- * @return
- */
- StorageAsset addAsset(String path, boolean container);
-
- /**
- * Removes the given asset from the storage.
- *
- * @param asset
- * @throws IOException
- */
- void removeAsset(StorageAsset asset) throws IOException;
-
- /**
- * Moves the asset to the given location and returns the asset object for the destination.
- *
- * @param origin The original asset
- * @param destination The destination path pointing to the new asset.
- * @param copyOptions The copy options.
- * @return The asset representation of the moved object.
- */
- StorageAsset moveAsset(StorageAsset origin, String destination, CopyOption... copyOptions) throws IOException;
-
- /**
- * Moves the asset to the new path.
- *
- * @param origin The original asset
- * @param destination The destination asset.
- * @param copyOptions The copy options (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
- * @throws IOException If it was not possible to copy the asset.
- */
- void moveAsset(StorageAsset origin, StorageAsset destination, CopyOption... copyOptions) throws IOException;
-
- /**
- * Copies the given asset to the new destination.
- *
- * @param origin The original asset
- * @param destination The path to the new asset
- * @param copyOptions The copy options, e.g. (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
- * @return The asset representation of the copied object
- * @throws IOException If it was not possible to copy the asset
- */
- StorageAsset copyAsset(StorageAsset origin, String destination, CopyOption... copyOptions) throws IOException;
-
- /**
- * Copies the given asset to the new destination.
- *
- * @param origin The original asset
- * @param destination The path to the new asset
- * @param copyOptions The copy options, e.g. (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
- * @throws IOException If it was not possible to copy the asset
- */
- void copyAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions) throws IOException;
-
-
-}
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.Path;
-import java.time.Instant;
-import java.util.List;
-import java.util.function.Consumer;
-
-/**
- * A instance of this interface represents information about an specific asset in a repository.
- * The asset may be an real artifact, a directory, or a virtual asset.
- *
- * Each asset has a unique path relative to the repository.
- *
- * The implementation may read the data directly from the filesystem or underlying storage implementation.
- *
- * @author Martin Stockhammer <martin_s@apache.org>
- */
-public interface StorageAsset
-{
-
- /**
- * Returns the storage this asset belongs to.
- * @return
- */
- RepositoryStorage getStorage();
-
- /**
- * Returns the complete path relative to the repository to the given asset.
- *
- * @return A path starting with '/' that uniquely identifies the asset in the repository.
- */
- String getPath();
-
- /**
- * Returns the name of the asset. It may be just the filename.
- * @return
- */
- String getName();
-
- /**
- * Returns the time of the last modification.
- *
- * @return
- */
- Instant getModificationTime();
-
- /**
- * Returns true, if this asset is a container type and contains further child assets.
- * @return
- */
- boolean isContainer();
-
- /**
- * List the child assets.
- *
- * @return The list of children. If there are no children and if the asset is not a container, a empty list will be returned.
- */
- List<StorageAsset> list();
-
- /**
- * The size in bytes of the asset. If the asset does not have a size, -1 should be returned.
- *
- * @return The size if the asset has a size, otherwise -1
- */
- long getSize();
-
- /**
- * Returns the input stream of the artifact content.
- * It will throw a IOException, if the stream could not be created.
- * Implementations should create a new stream instance for each invocation and make sure that the
- * stream is proper closed after usage.
- *
- * @return The InputStream representing the content of the artifact.
- * @throws IOException
- */
- InputStream getReadStream() throws IOException;
-
- /**
- * Returns a NIO representation of the data.
- *
- * @return A channel to the asset data.
- * @throws IOException
- */
- ReadableByteChannel getReadChannel() throws IOException;
-
- /**
- *
- * Returns an output stream where you can write data to the asset. The operation is not locked or synchronized.
- * User of this method have to make sure, that the stream is proper closed after usage.
- *
- * @param replace If true, the original data will be replaced, otherwise the data will be appended.
- * @return The OutputStream where the data can be written.
- * @throws IOException
- */
- OutputStream getWriteStream( boolean replace) throws IOException;
-
- /**
- * Returns a NIO representation of the asset where you can write the data.
- *
- * @param replace True, if the content should be replaced by the data written to the stream.
- * @return The Channel for writing the data.
- * @throws IOException
- */
- WritableByteChannel getWriteChannel( boolean replace) throws IOException;
-
- /**
- * Replaces the content. The implementation may do an atomic move operation, or keep a backup. If
- * the operation fails, the implementation should try to restore the old data, if possible.
- *
- * The original file may be deleted, if the storage was successful.
- *
- * @param newData Replaces the data by the content of the given file.
- */
- boolean replaceDataFromFile( Path newData) throws IOException;
-
- /**
- * Returns true, if the asset exists.
- *
- * @return True, if the asset exists, otherwise false.
- */
- boolean exists();
-
- /**
- * Creates the asset in the underlying storage, if it does not exist.
- */
- void create() throws IOException;
-
- /**
- * Returns the real path to the asset, if it exist. Not all implementations may implement this method.
- * The method throws {@link UnsupportedOperationException}, if and only if {@link #isFileBased()} returns false.
- *
- * @return The filesystem path to the asset.
- * @throws UnsupportedOperationException If the underlying storage is not file based.
- */
- Path getFilePath() throws UnsupportedOperationException;
-
- /**
- * Returns true, if the asset can return a file path for the given asset. If this is true, the {@link #getFilePath()}
- * will not throw a {@link UnsupportedOperationException}
- *
- * @return
- */
- boolean isFileBased();
-
- /**
- * Returns true, if there is a parent to this asset.
- * @return
- */
- boolean hasParent();
-
- /**
- * Returns the parent of this asset.
- * @return The asset, or <code>null</code>, if it does not exist.
- */
- StorageAsset getParent();
-}
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryEventListener;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import java.net.URI;
import java.net.URISyntaxException;
-import java.nio.file.Path;
import static org.apache.archiva.indexer.ArchivaIndexManager.DEFAULT_INDEX_PATH;
import static org.apache.archiva.indexer.ArchivaIndexManager.DEFAULT_PACKED_INDEX_PATH;
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-model</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-fs</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-checksum</artifactId>
import org.apache.archiva.indexer.merger.TemporaryGroupIndex;
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryRegistry;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang.time.StopWatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
stopWatch.reset();
stopWatch.start();
- Path mergedIndexDirectory = indexMergerRequest.getMergedIndexDirectory();
+ StorageAsset mergedIndexDirectory = indexMergerRequest.getMergedIndexDirectory();
Repository destinationRepository = repositoryRegistry.getRepository(indexMergerRequest.getGroupId());
ArchivaIndexManager idxManager = repositoryRegistry.getIndexManager(destinationRepository.getType());
ctx.close(true);
temporaryGroupIndexes.remove( temporaryGroupIndex );
temporaryContextes.remove( ctx );
- Path directory = temporaryGroupIndex.getDirectory();
- if ( directory != null && Files.exists(directory) )
+ StorageAsset directory = temporaryGroupIndex.getDirectory();
+ if ( directory != null && directory.exists() )
{
- FileUtils.deleteDirectory( directory );
+ StorageUtil.deleteRecursively( directory );
}
}
}
import org.apache.archiva.repository.ManagedRepository;
import org.apache.archiva.repository.RepositoryGroup;
import org.apache.archiva.repository.features.IndexCreationFeature;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private Map<String, ScheduledFuture> scheduledFutureMap = new ConcurrentHashMap<>();
@Override
- public void schedule(RepositoryGroup repositoryGroup, Path directory )
+ public void schedule(RepositoryGroup repositoryGroup, StorageAsset directory )
{
if ( StringUtils.isEmpty( repositoryGroup.getSchedulingDefinition() ) )
{
*/
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.RepositoryStorage;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
import java.util.Collections;
import java.util.HashSet;
import java.util.Locale;
import java.util.Set;
-import java.util.function.Consumer;
/**
* Simple implementation of a managed repository.
*/
-import org.apache.archiva.repository.content.RepositoryStorage;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import java.nio.file.Path;
import java.time.Duration;
import com.cronutils.model.definition.CronDefinition;
import com.cronutils.model.definition.CronDefinitionBuilder;
import com.cronutils.parser.CronParser;
-import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.indexer.ArchivaIndexingContext;
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.RepositoryStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.RepositoryFeature;
import org.apache.archiva.repository.features.StagingRepositoryFeature;
import org.apache.commons.lang.StringUtils;
* under the License.
*/
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.commons.collections4.map.ListOrderedMap;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.Consumer;
/**
* Abstract repository group implementation.
import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.repository.content.FilesystemStorage;
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.archiva.repository.features.ArtifactCleanupFeature;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.StagingRepositoryFeature;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.io.InputStream;
import java.nio.file.Path;
import java.util.Locale;
-import java.util.function.Consumer;
/**
*
import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.repository.content.FilesystemStorage;
-import org.apache.archiva.repository.content.RepositoryStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
import org.slf4j.Logger;
import org.apache.archiva.indexer.IndexManagerFactory;
import org.apache.archiva.indexer.IndexUpdateFailedException;
import org.apache.archiva.redback.components.registry.RegistryException;
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationEvent;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.StagingRepositoryFeature;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.RepositoryContentFactory;
import org.apache.archiva.repository.RepositoryException;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.springframework.stereotype.Service;
import javax.inject.Inject;
return Paths.get(repository.getLocation()).resolve(artifactPath);
}
+ /**
+ * Returns the physical location of a given artifact in the repository. There is no check for the
+ * existence of the returned file.
+ *
+ * @param repository The repository, where the artifact is stored.
+ * @param artifactReference The artifact reference.
+ * @return The asset representation of the artifact.
+ * @throws RepositoryException
+ */
+ public StorageAsset getArtifactAsset(ManagedRepository repository, ArtifactReference artifactReference) throws RepositoryException {
+ final ManagedRepositoryContent content = repositoryContentFactory.getManagedRepositoryContent(repository);
+ final String artifactPath = content.toPath( artifactReference );
+ return repository.getAsset(artifactPath);
+ }
+
}
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.FileChannel;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.*;
-import java.nio.file.attribute.*;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * Implementation of an asset that is stored on the filesystem.
- * <p>
- * The implementation does not check the given paths. Caller should normalize the asset path
- * and check, if the base path is a parent of the resulting path.
- * <p>
- * The file must not exist for all operations.
- *
- * @author Martin Stockhammer <martin_s@apache.org>
- */
-public class FilesystemAsset implements StorageAsset {
-
- private final static Logger log = LoggerFactory.getLogger(FilesystemAsset.class);
-
- private final Path basePath;
- private final Path assetPath;
- private final String relativePath;
-
- public static final String DEFAULT_POSIX_FILE_PERMS = "rw-rw----";
- public static final String DEFAULT_POSIX_DIR_PERMS = "rwxrwx---";
-
- public static final Set<PosixFilePermission> DEFAULT_POSIX_FILE_PERMISSIONS;
- public static final Set<PosixFilePermission> DEFAULT_POSIX_DIR_PERMISSIONS;
-
- public static final AclEntryPermission[] DEFAULT_ACL_FILE_PERMISSIONS = new AclEntryPermission[]{
- AclEntryPermission.DELETE, AclEntryPermission.READ_ACL, AclEntryPermission.READ_ATTRIBUTES, AclEntryPermission.READ_DATA, AclEntryPermission.WRITE_ACL,
- AclEntryPermission.WRITE_ATTRIBUTES, AclEntryPermission.WRITE_DATA, AclEntryPermission.APPEND_DATA
- };
-
- public static final AclEntryPermission[] DEFAULT_ACL_DIR_PERMISSIONS = new AclEntryPermission[]{
- AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.DELETE_CHILD,
- AclEntryPermission.DELETE, AclEntryPermission.READ_ACL, AclEntryPermission.READ_ATTRIBUTES, AclEntryPermission.READ_DATA, AclEntryPermission.WRITE_ACL,
- AclEntryPermission.WRITE_ATTRIBUTES, AclEntryPermission.WRITE_DATA, AclEntryPermission.APPEND_DATA
- };
-
- static {
-
- DEFAULT_POSIX_FILE_PERMISSIONS = PosixFilePermissions.fromString(DEFAULT_POSIX_FILE_PERMS);
- DEFAULT_POSIX_DIR_PERMISSIONS = PosixFilePermissions.fromString(DEFAULT_POSIX_DIR_PERMS);
- }
-
- Set<PosixFilePermission> defaultPosixFilePermissions = DEFAULT_POSIX_FILE_PERMISSIONS;
- Set<PosixFilePermission> defaultPosixDirectoryPermissions = DEFAULT_POSIX_DIR_PERMISSIONS;
-
- List<AclEntry> defaultFileAcls;
- List<AclEntry> defaultDirectoryAcls;
-
- boolean supportsAcl = false;
- boolean supportsPosix = false;
- final boolean setPermissionsForNew;
- final RepositoryStorage storage;
-
- boolean directoryHint = false;
-
- private static final OpenOption[] REPLACE_OPTIONS = new OpenOption[]{StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE};
- private static final OpenOption[] APPEND_OPTIONS = new OpenOption[]{StandardOpenOption.APPEND};
-
-
- FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath) {
- this.assetPath = assetPath;
- this.relativePath = path;
- this.setPermissionsForNew=false;
- this.basePath = basePath;
- this.storage = storage;
- init();
- }
-
- /**
- * Creates an asset for the given path. The given paths are not checked.
- * The base path should be an absolute path.
- *
- * @param path The logical path for the asset relative to the repository.
- * @param assetPath The asset path.
- */
- public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath) {
- this.assetPath = assetPath;
- this.relativePath = path;
- this.setPermissionsForNew = false;
- this.basePath = null;
- this.storage = storage;
- init();
- }
-
- /**
- * Creates an asset for the given path. The given paths are not checked.
- * The base path should be an absolute path.
- *
- * @param path The logical path for the asset relative to the repository
- * @param assetPath The asset path.
- * @param directory This is only relevant, if the represented file or directory does not exist yet and
- * is a hint.
- */
- public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath, boolean directory) {
- this.assetPath = assetPath;
- this.relativePath = path;
- this.directoryHint = directory;
- this.setPermissionsForNew = false;
- this.basePath = basePath;
- this.storage = storage;
- init();
- }
-
- /**
- * Creates an asset for the given path. The given paths are not checked.
- * The base path should be an absolute path.
- *
- * @param path The logical path for the asset relative to the repository
- * @param assetPath The asset path.
- * @param directory This is only relevant, if the represented file or directory does not exist yet and
- * is a hint.
- */
- public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath, boolean directory, boolean setPermissionsForNew) {
- this.assetPath = assetPath;
- this.relativePath = path;
- this.directoryHint = directory;
- this.setPermissionsForNew = setPermissionsForNew;
- this.basePath = basePath;
- this.storage = storage;
- init();
- }
-
- private void init() {
-
- if (setPermissionsForNew) {
- try {
- supportsAcl = Files.getFileStore(assetPath.getRoot()).supportsFileAttributeView(AclFileAttributeView.class);
- } catch (IOException e) {
- log.error("Could not check filesystem capabilities {}", e.getMessage());
- }
- try {
- supportsPosix = Files.getFileStore(assetPath.getRoot()).supportsFileAttributeView(PosixFileAttributeView.class);
- } catch (IOException e) {
- log.error("Could not check filesystem capabilities {}", e.getMessage());
- }
-
- if (supportsAcl) {
- AclFileAttributeView aclView = Files.getFileAttributeView(assetPath.getParent(), AclFileAttributeView.class);
- UserPrincipal owner = null;
- try {
- owner = aclView.getOwner();
- setDefaultFileAcls(processPermissions(owner, DEFAULT_ACL_FILE_PERMISSIONS));
- setDefaultDirectoryAcls(processPermissions(owner, DEFAULT_ACL_DIR_PERMISSIONS));
-
- } catch (IOException e) {
- supportsAcl = false;
- }
-
-
- }
- }
- }
-
- private List<AclEntry> processPermissions(UserPrincipal owner, AclEntryPermission[] defaultAclFilePermissions) {
- AclEntry.Builder aclBuilder = AclEntry.newBuilder();
- aclBuilder.setPermissions(defaultAclFilePermissions);
- aclBuilder.setType(AclEntryType.ALLOW);
- aclBuilder.setPrincipal(owner);
- ArrayList<AclEntry> aclList = new ArrayList<>();
- aclList.add(aclBuilder.build());
- return aclList;
- }
-
-
- @Override
- public RepositoryStorage getStorage( )
- {
- return storage;
- }
-
- @Override
- public String getPath() {
- return relativePath;
- }
-
- @Override
- public String getName() {
- return assetPath.getFileName().toString();
- }
-
- @Override
- public Instant getModificationTime() {
- try {
- return Files.getLastModifiedTime(assetPath).toInstant();
- } catch (IOException e) {
- log.error("Could not read modification time of {}", assetPath);
- return Instant.now();
- }
- }
-
- /**
- * Returns true, if the path of this asset points to a directory
- *
- * @return
- */
- @Override
- public boolean isContainer() {
- if (Files.exists(assetPath)) {
- return Files.isDirectory(assetPath);
- } else {
- return directoryHint;
- }
- }
-
- /**
- * Returns the list of directory entries, if this asset represents a directory.
- * Otherwise a empty list will be returned.
- *
- * @return The list of entries in the directory, if it exists.
- */
- @Override
- public List<StorageAsset> list() {
- try {
- return Files.list(assetPath).map(p -> new FilesystemAsset(storage, relativePath + "/" + p.getFileName().toString(), assetPath.resolve(p)))
- .collect(Collectors.toList());
- } catch (IOException e) {
- return Collections.EMPTY_LIST;
- }
- }
-
- /**
- * Returns the size of the represented file. If it cannot be determined, -1 is returned.
- *
- * @return
- */
- @Override
- public long getSize() {
- try {
- return Files.size(assetPath);
- } catch (IOException e) {
- return -1;
- }
- }
-
- /**
- * Returns a input stream to the underlying file, if it exists. The caller has to make sure, that
- * the stream is closed after it was used.
- *
- * @return
- * @throws IOException
- */
- @Override
- public InputStream getReadStream() throws IOException {
- if (isContainer()) {
- throw new IOException("Can not create input stream for container");
- }
- return Files.newInputStream(assetPath);
- }
-
- @Override
- public ReadableByteChannel getReadChannel( ) throws IOException
- {
- return FileChannel.open( assetPath, StandardOpenOption.READ );
- }
-
- private OpenOption[] getOpenOptions(boolean replace) {
- return replace ? REPLACE_OPTIONS : APPEND_OPTIONS;
- }
-
- @Override
- public OutputStream getWriteStream( boolean replace) throws IOException {
- OpenOption[] options = getOpenOptions( replace );
- if (!Files.exists( assetPath )) {
- create();
- }
- return Files.newOutputStream(assetPath, options);
- }
-
- @Override
- public WritableByteChannel getWriteChannel( boolean replace ) throws IOException
- {
- OpenOption[] options = getOpenOptions( replace );
- return FileChannel.open( assetPath, options );
- }
-
- @Override
- public boolean replaceDataFromFile( Path newData) throws IOException {
- final boolean createNew = !Files.exists(assetPath);
- Path backup = null;
- if (!createNew) {
- backup = findBackupFile(assetPath);
- }
- try {
- if (!createNew) {
- Files.move(assetPath, backup);
- }
- Files.move(newData, assetPath, StandardCopyOption.REPLACE_EXISTING);
- applyDefaultPermissions(assetPath);
- return true;
- } catch (IOException e) {
- log.error("Could not overwrite file {}", assetPath);
- // Revert if possible
- if (backup != null && Files.exists(backup)) {
- Files.move(backup, assetPath, StandardCopyOption.REPLACE_EXISTING);
- }
- throw e;
- } finally {
- if (backup != null) {
- try {
- Files.deleteIfExists(backup);
- } catch (IOException e) {
- log.error("Could not delete backup file {}", backup);
- }
- }
- }
-
- }
-
- private void applyDefaultPermissions(Path filePath) {
- try {
- if (supportsPosix) {
- Set<PosixFilePermission> perms;
- if (Files.isDirectory(filePath)) {
- perms = defaultPosixFilePermissions;
- } else {
- perms = defaultPosixDirectoryPermissions;
- }
- Files.setPosixFilePermissions(filePath, perms);
- } else if (supportsAcl) {
- List<AclEntry> perms;
- if (Files.isDirectory(filePath)) {
- perms = getDefaultDirectoryAcls();
- } else {
- perms = getDefaultFileAcls();
- }
- AclFileAttributeView aclAttr = Files.getFileAttributeView(filePath, AclFileAttributeView.class);
- aclAttr.setAcl(perms);
- }
- } catch (IOException e) {
- log.error("Could not set permissions for {}: {}", filePath, e.getMessage());
- }
- }
-
- private Path findBackupFile(Path file) {
- String ext = ".bak";
- Path backupPath = file.getParent().resolve(file.getFileName().toString() + ext);
- int idx = 0;
- while (Files.exists(backupPath)) {
- backupPath = file.getParent().resolve(file.getFileName().toString() + ext + idx++);
- }
- return backupPath;
- }
-
- @Override
- public boolean exists() {
- return Files.exists(assetPath);
- }
-
- @Override
- public Path getFilePath() throws UnsupportedOperationException {
- return assetPath;
- }
-
- @Override
- public boolean isFileBased( )
- {
- return true;
- }
-
- @Override
- public boolean hasParent( )
- {
- if (basePath!=null && assetPath.equals(basePath)) {
- return false;
- }
- return assetPath.getParent()!=null;
- }
-
- @Override
- public StorageAsset getParent( )
- {
- Path parentPath;
- if (basePath!=null && assetPath.equals( basePath )) {
- parentPath=null;
- } else
- {
- parentPath = assetPath.getParent( );
- }
- String relativeParent = StringUtils.substringBeforeLast( relativePath,"/");
- if (parentPath!=null) {
- return new FilesystemAsset(storage, relativeParent, parentPath, basePath, true, setPermissionsForNew );
- } else {
- return null;
- }
- }
-
-
- public void setDefaultFileAcls(List<AclEntry> acl) {
- defaultFileAcls = acl;
- }
-
- public List<AclEntry> getDefaultFileAcls() {
- return defaultFileAcls;
- }
-
- public void setDefaultPosixFilePermissions(Set<PosixFilePermission> perms) {
- defaultPosixFilePermissions = perms;
- }
-
- public Set<PosixFilePermission> getDefaultPosixFilePermissions() {
- return defaultPosixFilePermissions;
- }
-
- public void setDefaultDirectoryAcls(List<AclEntry> acl) {
- defaultDirectoryAcls = acl;
- }
-
- public List<AclEntry> getDefaultDirectoryAcls() {
- return defaultDirectoryAcls;
- }
-
- public void setDefaultPosixDirectoryPermissions(Set<PosixFilePermission> perms) {
- defaultPosixDirectoryPermissions = perms;
- }
-
- public Set<PosixFilePermission> getDefaultPosixDirectoryPermissions() {
- return defaultPosixDirectoryPermissions;
- }
-
- @Override
- public void create() throws IOException {
- if (!Files.exists(assetPath)) {
- if (directoryHint) {
- Files.createDirectories(assetPath);
- } else {
- if (!Files.exists( assetPath.getParent() )) {
- Files.createDirectories( assetPath.getParent( ) );
- }
- Files.createFile(assetPath);
- }
- if (setPermissionsForNew) {
- applyDefaultPermissions(assetPath);
- }
- }
- }
-
- @Override
- public String toString() {
- return relativePath+":"+assetPath;
- }
-
-}
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.archiva.common.filelock.FileLockException;
-import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.common.filelock.FileLockTimeoutException;
-import org.apache.archiva.common.filelock.Lock;
-import org.apache.commons.io.FileUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.FileChannel;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.CopyOption;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
-import java.nio.file.StandardOpenOption;
-import java.util.function.Consumer;
-
-/**
- * Implementation of <code>{@link RepositoryStorage}</code> where data is stored in the filesystem.
- *
- * All files are relative to a given base path. Path values are separated by '/', '..' is allowed to navigate
- * to a parent directory, but navigation out of the base path will lead to a exception.
- */
-public class FilesystemStorage implements RepositoryStorage {
-
- private static final Logger log = LoggerFactory.getLogger(FilesystemStorage.class);
-
- private final Path basePath;
- private final FileLockManager fileLockManager;
-
- public FilesystemStorage(Path basePath, FileLockManager fileLockManager) throws IOException {
- if (!Files.exists(basePath)) {
- Files.createDirectories(basePath);
- }
- this.basePath = basePath.normalize().toRealPath();
- this.fileLockManager = fileLockManager;
- }
-
- private Path normalize(final String path) {
- String nPath = path;
- while (nPath.startsWith("/")) {
- nPath = nPath.substring(1);
- }
- return Paths.get(nPath);
- }
-
- private Path getAssetPath(String path) throws IOException {
- Path assetPath = basePath.resolve(normalize(path)).normalize();
- if (!assetPath.startsWith(basePath))
- {
- throw new IOException("Path navigation out of allowed scope: "+path);
- }
- return assetPath;
- }
-
- @Override
- public void consumeData( StorageAsset asset, Consumer<InputStream> consumerFunction, boolean readLock ) throws IOException
- {
- final Path path = asset.getFilePath();
- try {
- if (readLock) {
- consumeDataLocked( path, consumerFunction );
- } else
- {
- try ( InputStream is = Files.newInputStream( path ) )
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not read the input stream from file {}", path);
- throw e;
- }
- }
- } catch (RuntimeException e)
- {
- log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
- throw new IOException( e );
- }
-
- }
-
- @Override
- public void consumeDataFromChannel( StorageAsset asset, Consumer<ReadableByteChannel> consumerFunction, boolean readLock ) throws IOException
- {
- final Path path = asset.getFilePath();
- try {
- if (readLock) {
- consumeDataFromChannelLocked( path, consumerFunction );
- } else
- {
- try ( FileChannel is = FileChannel.open( path, StandardOpenOption.READ ) )
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not read the input stream from file {}", path);
- throw e;
- }
- }
- } catch (RuntimeException e)
- {
- log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
- throw new IOException( e );
- }
- }
-
- @Override
- public void writeData( StorageAsset asset, Consumer<OutputStream> consumerFunction, boolean writeLock ) throws IOException
- {
- final Path path = asset.getFilePath();
- try {
- if (writeLock) {
- writeDataLocked( path, consumerFunction );
- } else
- {
- try ( OutputStream is = Files.newOutputStream( path ) )
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not write the output stream to file {}", path);
- throw e;
- }
- }
- } catch (RuntimeException e)
- {
- log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
- throw new IOException( e );
- }
-
- }
-
- @Override
- public void writeDataToChannel( StorageAsset asset, Consumer<WritableByteChannel> consumerFunction, boolean writeLock ) throws IOException
- {
- final Path path = asset.getFilePath();
- try {
- if (writeLock) {
- writeDataToChannelLocked( path, consumerFunction );
- } else
- {
- try ( FileChannel os = FileChannel.open( path, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE ))
- {
- consumerFunction.accept( os );
- }
- catch ( IOException e )
- {
- log.error("Could not write the data to file {}", path);
- throw e;
- }
- }
- } catch (RuntimeException e)
- {
- log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
- throw new IOException( e );
- }
- }
-
- private void consumeDataLocked( Path file, Consumer<InputStream> consumerFunction) throws IOException
- {
-
- final Lock lock;
- try
- {
- lock = fileLockManager.readFileLock( file );
- try ( InputStream is = Files.newInputStream( lock.getFile()))
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not read the input stream from file {}", file);
- throw e;
- } finally
- {
- fileLockManager.release( lock );
- }
- }
- catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
- {
- log.error("Locking error on file {}", file);
- throw new IOException(e);
- }
- }
-
- private void consumeDataFromChannelLocked( Path file, Consumer<ReadableByteChannel> consumerFunction) throws IOException
- {
-
- final Lock lock;
- try
- {
- lock = fileLockManager.readFileLock( file );
- try ( FileChannel is = FileChannel.open( lock.getFile( ), StandardOpenOption.READ ))
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not read the input stream from file {}", file);
- throw e;
- } finally
- {
- fileLockManager.release( lock );
- }
- }
- catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
- {
- log.error("Locking error on file {}", file);
- throw new IOException(e);
- }
- }
-
-
- private void writeDataLocked( Path file, Consumer<OutputStream> consumerFunction) throws IOException
- {
-
- final Lock lock;
- try
- {
- lock = fileLockManager.writeFileLock( file );
- try ( OutputStream is = Files.newOutputStream( lock.getFile()))
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not write the output stream to file {}", file);
- throw e;
- } finally
- {
- fileLockManager.release( lock );
- }
- }
- catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
- {
- log.error("Locking error on file {}", file);
- throw new IOException(e);
- }
- }
-
- private void writeDataToChannelLocked( Path file, Consumer<WritableByteChannel> consumerFunction) throws IOException
- {
-
- final Lock lock;
- try
- {
- lock = fileLockManager.writeFileLock( file );
- try ( FileChannel is = FileChannel.open( lock.getFile( ), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE ))
- {
- consumerFunction.accept( is );
- }
- catch ( IOException e )
- {
- log.error("Could not write to file {}", file);
- throw e;
- } finally
- {
- fileLockManager.release( lock );
- }
- }
- catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
- {
- log.error("Locking error on file {}", file);
- throw new IOException(e);
- }
- }
-
- @Override
- public StorageAsset getAsset( String path )
- {
- try {
- return new FilesystemAsset(this, path, getAssetPath(path));
- } catch (IOException e) {
- throw new IllegalArgumentException("Path navigates outside of base directory "+path);
- }
- }
-
- @Override
- public StorageAsset addAsset( String path, boolean container )
- {
- try {
- return new FilesystemAsset(this, path, getAssetPath(path), basePath, container);
- } catch (IOException e) {
- throw new IllegalArgumentException("Path navigates outside of base directory "+path);
- }
- }
-
- @Override
- public void removeAsset( StorageAsset asset ) throws IOException
- {
- Files.delete(asset.getFilePath());
- }
-
- @Override
- public StorageAsset moveAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
- {
- boolean container = origin.isContainer();
- FilesystemAsset newAsset = new FilesystemAsset(this, destination, getAssetPath(destination), basePath, container );
- moveAsset( origin, newAsset, copyOptions );
- return newAsset;
- }
-
- @Override
- public void moveAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
- {
- Files.move(origin.getFilePath(), destination.getFilePath(), copyOptions);
- }
-
- @Override
- public StorageAsset copyAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
- {
- boolean container = origin.isContainer();
- FilesystemAsset newAsset = new FilesystemAsset(this, destination, getAssetPath(destination), basePath, container );
- copyAsset( origin, newAsset, copyOptions );
- return newAsset;
- }
-
- @Override
- public void copyAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
- {
- Path destinationPath = destination.getFilePath();
- boolean overwrite = false;
- for (int i=0; i<copyOptions.length; i++) {
- if (copyOptions[i].equals( StandardCopyOption.REPLACE_EXISTING )) {
- overwrite=true;
- }
- }
- if (Files.exists(destinationPath) && !overwrite) {
- throw new IOException("Destination file exists already "+ destinationPath);
- }
- if (Files.isDirectory( origin.getFilePath() ))
- {
- FileUtils.copyDirectory(origin.getFilePath( ).toFile(), destinationPath.toFile() );
- } else if (Files.isRegularFile( origin.getFilePath() )) {
- if (!Files.exists( destinationPath )) {
- Files.createDirectories( destinationPath );
- }
- Files.copy( origin.getFilePath( ), destinationPath, copyOptions );
- }
- }
-
- public FileLockManager getFileLockManager() {
- return fileLockManager;
- }
-
-}
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.archiva.common.filelock.FileLockException;
-import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.common.filelock.FileLockTimeoutException;
-import org.apache.archiva.common.filelock.Lock;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.CopyOption;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-/**
- * @author Martin Stockhammer <martin_s@apache.org>
- */
-public class StorageUtil
-{
- private static final int DEFAULT_BUFFER_SIZE = 4096;
-
- /**
- * Copies the source asset to the target. The assets may be from different RepositoryStorage instances.
- *
- * @param source The source asset
- * @param target The target asset
- * @param locked If true, a readlock is set on the source and a write lock is set on the target.
- * @param copyOptions Copy options
- * @throws IOException
- */
- public static final void copyAsset( final StorageAsset source,
- final StorageAsset target,
- boolean locked,
- final CopyOption... copyOptions ) throws IOException
- {
- if (source.isFileBased() && target.isFileBased()) {
- // Short cut for FS operations
- final Path sourcePath = source.getFilePath();
- final Path targetPath = target.getFilePath( );
- if (locked) {
- final FileLockManager lmSource = ((FilesystemStorage)source.getStorage()).getFileLockManager();
- final FileLockManager lmTarget = ((FilesystemStorage)target.getStorage()).getFileLockManager();
- try (Lock lockRead = lmSource.readFileLock( sourcePath ); Lock lockWrite = lmTarget.writeFileLock( targetPath ) )
- {
- Files.copy( sourcePath, targetPath, copyOptions );
- }
- catch ( FileLockException e )
- {
- throw new IOException( e );
- }
- catch ( FileLockTimeoutException e )
- {
- throw new IOException( e );
- }
- } else
- {
- Files.copy( sourcePath, targetPath, copyOptions );
- }
- } else {
- try {
- final RepositoryStorage sourceStorage = source.getStorage();
- final RepositoryStorage targetStorage = target.getStorage();
- sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
- } catch (IOException e) {
- throw e;
- } catch (Throwable e) {
- Throwable cause = e.getCause();
- if (cause instanceof IOException) {
- throw (IOException)cause;
- } else
- {
- throw new IOException( e );
- }
- }
- }
- }
-
- /**
- *
- * @param source
- * @param target
- * @param locked
- * @param copyOptions
- * @throws IOException
- */
- public static void moveAsset(StorageAsset source, StorageAsset target, boolean locked, CopyOption... copyOptions) throws IOException
- {
- if (source.isFileBased() && target.isFileBased()) {
- // Short cut for FS operations
- // Move is atomic operation
- Files.move( source.getFilePath(), target.getFilePath(), copyOptions );
- } else {
- try {
- final RepositoryStorage sourceStorage = source.getStorage();
- final RepositoryStorage targetStorage = target.getStorage();
- sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
- sourceStorage.removeAsset( source );
- } catch (IOException e) {
- throw e;
- } catch (Throwable e) {
- Throwable cause = e.getCause();
- if (cause instanceof IOException) {
- throw (IOException)cause;
- } else
- {
- throw new IOException( e );
- }
- }
- }
-
- }
-
- private static void wrapWriteFunction(ReadableByteChannel is, RepositoryStorage targetStorage, StorageAsset target, boolean locked) {
- try {
- targetStorage.writeDataToChannel( target, os -> copy(is, os), locked );
- } catch (Exception e) {
- throw new RuntimeException( e );
- }
- }
-
-
- private static void copy( final ReadableByteChannel is, final WritableByteChannel os ) {
- if (is instanceof FileChannel) {
- copy( (FileChannel) is, os );
- } else if (os instanceof FileChannel) {
- copy(is, (FileChannel)os);
- } else
- {
- try
- {
- ByteBuffer buffer = ByteBuffer.allocate( DEFAULT_BUFFER_SIZE );
- while ( is.read( buffer ) != -1 )
- {
- buffer.flip( );
- while ( buffer.hasRemaining( ) )
- {
- os.write( buffer );
- }
- buffer.clear( );
- }
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
- }
-
- private static void copy( final FileChannel is, final WritableByteChannel os ) {
- try
- {
- is.transferTo( 0, is.size( ), os );
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
-
- private static void copy( final ReadableByteChannel is, final FileChannel os ) {
- try
- {
- os.transferFrom( is, 0, Long.MAX_VALUE );
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
-
-}
import org.apache.archiva.repository.LayoutException;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.RemoteRepositoryContent;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang.StringUtils;
ProjectReference reference, String proxyId )
{
String metadataPath = getRepositorySpecificName( proxyId, toPath( reference ) );
- Path metadataFile = Paths.get( managedRepository.getRepoRoot(), metadataPath );
+ StorageAsset metadataFile = managedRepository.getRepository().getAsset( metadataPath );
- if ( !Files.exists(metadataFile) || !Files.isRegularFile( metadataFile ))
+ if ( !metadataFile.exists() || metadataFile.isContainer())
{
// Nothing to do. return null.
return null;
{
return MavenMetadataReader.read( metadataFile );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
// TODO: [monitor] consider a monitor for this event.
// TODO: consider a read-redo on monitor return code?
- log.warn( "Unable to read metadata: {}", metadataFile.toAbsolutePath(), e );
+ log.warn( "Unable to read metadata: {}", metadataFile.getPath(), e );
return null;
}
}
String logicalResource, String proxyId )
{
String metadataPath = getRepositorySpecificName( proxyId, logicalResource );
- Path metadataFile = Paths.get( managedRepository.getRepoRoot(), metadataPath );
+ StorageAsset metadataFile = managedRepository.getRepository().getAsset( metadataPath );
- if ( !Files.exists(metadataFile) || !Files.isRegularFile( metadataFile))
+ if ( !metadataFile.exists() || metadataFile.isContainer())
{
// Nothing to do. return null.
return null;
{
return MavenMetadataReader.read( metadataFile );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
// TODO: [monitor] consider a monitor for this event.
// TODO: consider a read-redo on monitor return code?
- log.warn( "Unable to read metadata: {}", metadataFile.toAbsolutePath(), e );
+ log.warn( "Unable to read metadata: {}", metadataFile.getPath(), e );
return null;
}
}
VersionedReference reference, String proxyId )
{
String metadataPath = getRepositorySpecificName( proxyId, toPath( reference ) );
- Path metadataFile = Paths.get( managedRepository.getRepoRoot(), metadataPath );
+ StorageAsset metadataFile = managedRepository.getRepository().getAsset( metadataPath );
- if ( !Files.exists(metadataFile) || !Files.isRegularFile(metadataFile))
+ if ( !metadataFile.exists() || metadataFile.isContainer())
{
// Nothing to do. return null.
return null;
{
return MavenMetadataReader.read( metadataFile );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
// TODO: [monitor] consider a monitor for this event.
// TODO: consider a read-redo on monitor return code?
- log.warn( "Unable to read metadata: {}", metadataFile.toAbsolutePath(), e );
+ log.warn( "Unable to read metadata: {}", metadataFile.getPath(), e );
return null;
}
}
public void updateMetadata( ManagedRepositoryContent managedRepository, String logicalResource )
throws RepositoryMetadataException
{
- final Path metadataFile = Paths.get( managedRepository.getRepoRoot(), logicalResource );
+ final StorageAsset metadataFile = managedRepository.getRepository().getAsset( logicalResource );
ArchivaRepositoryMetadata metadata = null;
//Gather and merge all metadata available
RepositoryMetadataWriter.write( metadata, metadataFile );
- ChecksummedFile checksum = new ChecksummedFile( metadataFile );
+ ChecksummedFile checksum = new ChecksummedFile( metadataFile.getFilePath() );
checksum.fixChecksums( algorithms );
}
* @param metadataParentDirectory
* @return origional set plus newly found versions
*/
- private Set<String> findPossibleVersions( Set<String> versions, Path metadataParentDirectory )
+ private Set<String> findPossibleVersions( Set<String> versions, StorageAsset metadataParentDirectory )
{
Set<String> result = new HashSet<String>( versions );
- try (Stream<Path> stream = Files.list( metadataParentDirectory )) {
- stream.filter( Files::isDirectory ).filter(
- p ->
- {
- try(Stream<Path> substream = Files.list(p))
- {
- return substream.anyMatch( f -> Files.isRegularFile( f ) && f.toString().endsWith( ".pom" ));
- }
- catch ( IOException e )
- {
- return false;
- }
+ metadataParentDirectory.list().stream().filter(asset ->
+ asset.isContainer()).filter(asset -> {
+ return asset.list().stream().anyMatch(f -> !f.isContainer() && f.getName().endsWith(".pom"));
}
- ).forEach(
- p -> result.add(p.getFileName().toString())
- );
- } catch (IOException e) {
- //
- }
+ ).forEach( p -> result.add(p.getName()));
+
return result;
}
ManagedRepositoryContent managedRepository, String logicalResource )
{
List<ArchivaRepositoryMetadata> metadatas = new ArrayList<>();
- Path file = Paths.get( managedRepository.getRepoRoot(), logicalResource );
- if ( Files.exists(file) )
+ StorageAsset file = managedRepository.getRepository().getAsset( logicalResource );
+
+ if ( file.exists() )
{
try
{
metadatas.add( existingMetadata );
}
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
- log.debug( "Could not read metadata at {}. Metadata will be removed.", file.toAbsolutePath() );
- FileUtils.deleteQuietly( file );
+ log.debug( "Could not read metadata at {}. Metadata will be removed.", file.getPath() );
+ try {
+ file.getStorage().removeAsset(file);
+ } catch (IOException ex) {
+ log.error("Could not remove asset {}", file.getPath());
+ }
}
}
public void updateMetadata( ManagedRepositoryContent managedRepository, ProjectReference reference )
throws LayoutException, RepositoryMetadataException, IOException, ContentNotFoundException
{
- Path metadataFile = Paths.get( managedRepository.getRepoRoot(), toPath( reference ) );
+
+ StorageAsset metadataFile = managedRepository.getRepository().getAsset( toPath( reference ) );
long lastUpdated = getExistingLastUpdated( metadataFile );
// TODO: do we know this information instead?
// Set<Plugin> allPlugins = managedRepository.getPlugins( reference );
Set<Plugin> allPlugins;
- if ( Files.exists(metadataFile))
+ if ( metadataFile.exists())
{
try
{
// Save the metadata model to disk.
RepositoryMetadataWriter.write( metadata, metadataFile );
- ChecksummedFile checksum = new ChecksummedFile( metadataFile );
+ ChecksummedFile checksum = new ChecksummedFile( metadataFile.getFilePath() );
checksum.fixChecksums( algorithms );
}
}
}
- private long getExistingLastUpdated( Path metadataFile )
+ private long getExistingLastUpdated( StorageAsset metadataFile )
{
- if ( !Files.exists(metadataFile) )
+ if ( !metadataFile.exists() )
{
// Doesn't exist.
return 0;
return getLastUpdated( metadata );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
// Error.
return 0;
public void updateMetadata( ManagedRepositoryContent managedRepository, VersionedReference reference )
throws LayoutException, RepositoryMetadataException, IOException, ContentNotFoundException
{
- Path metadataFile = Paths.get( managedRepository.getRepoRoot(), toPath( reference ) );
+ StorageAsset metadataFile = managedRepository.getRepository().getAsset( toPath( reference ) );
long lastUpdated = getExistingLastUpdated( metadataFile );
// Save the metadata model to disk.
RepositoryMetadataWriter.write( metadata, metadataFile );
- ChecksummedFile checksum = new ChecksummedFile( metadataFile );
+ ChecksummedFile checksum = new ChecksummedFile( metadataFile.getFilePath() );
checksum.fixChecksums( algorithms );
}
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.model.ArchivaRepositoryMetadata;
import org.apache.archiva.model.Plugin;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.archiva.xml.XMLWriter;
import org.apache.commons.collections4.CollectionUtils;
import org.dom4j.Document;
import org.dom4j.DocumentHelper;
import org.dom4j.Element;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.FileWriter;
import java.io.IOException;
+import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.file.Path;
import java.util.Collections;
*/
public class RepositoryMetadataWriter
{
- public static void write( ArchivaRepositoryMetadata metadata, Path outputFile )
+ private static final Logger log = LoggerFactory.getLogger(RepositoryMetadataWriter.class);
+
+ public static void write( ArchivaRepositoryMetadata metadata, StorageAsset outputFile )
throws RepositoryMetadataException
{
boolean thrown = false;
- try (FileWriter writer = new FileWriter( outputFile.toFile() ))
+ try (OutputStreamWriter writer = new OutputStreamWriter( outputFile.getWriteStream(true)))
{
write( metadata, writer );
writer.flush();
{
thrown = true;
throw new RepositoryMetadataException(
- "Unable to write metadata file: " + outputFile.toAbsolutePath() + " - " + e.getMessage(), e );
+ "Unable to write metadata file: " + outputFile.getPath() + " - " + e.getMessage(), e );
}
finally
{
if ( thrown )
{
- FileUtils.deleteQuietly( outputFile );
+ try {
+ outputFile.getStorage().removeAsset(outputFile);
+ } catch (IOException e) {
+ log.error("Could not remove asset {}", outputFile);
+ }
}
}
}
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.commons.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.*;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Instant;
-
-import static org.junit.Assert.*;
-
-public class FilesystemAssetTest {
-
- Path assetPathFile;
- Path assetPathDir;
-
- @Before
- public void init() throws IOException {
- assetPathFile = Files.createTempFile("assetFile", "dat");
- assetPathDir = Files.createTempDirectory("assetDir");
- }
-
- @After
- public void cleanup() {
-
- try {
- Files.deleteIfExists(assetPathFile);
- } catch (IOException e) {
- e.printStackTrace();
- }
- try {
- Files.deleteIfExists(assetPathDir);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
- @Test
- public void getPath() {
- FilesystemAsset asset = new FilesystemAsset("/"+assetPathFile.getFileName().toString(), assetPathFile);
- assertEquals("/"+assetPathFile.getFileName().toString(), asset.getPath());
- }
-
- @Test
- public void getName() {
- FilesystemAsset asset = new FilesystemAsset("/"+assetPathFile.getFileName().toString(), assetPathFile);
- assertEquals(assetPathFile.getFileName().toString(), asset.getName());
-
- }
-
- @Test
- public void getModificationTime() throws IOException {
- Instant modTime = Files.getLastModifiedTime(assetPathFile).toInstant();
- FilesystemAsset asset = new FilesystemAsset("/test123", assetPathFile);
- assertTrue(modTime.equals(asset.getModificationTime()));
- }
-
- @Test
- public void isContainer() {
- FilesystemAsset asset = new FilesystemAsset("/test1323", assetPathFile);
- assertFalse(asset.isContainer());
- FilesystemAsset asset2 = new FilesystemAsset("/test1234", assetPathDir);
- assertTrue(asset2.isContainer());
- }
-
- @Test
- public void list() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- assertEquals(0, asset.list().size());
-
- FilesystemAsset asset2 = new FilesystemAsset("/test1235", assetPathDir);
- assertEquals(0, asset2.list().size());
- Path f1 = Files.createTempFile(assetPathDir, "testfile", "dat");
- Path f2 = Files.createTempFile(assetPathDir, "testfile", "dat");
- Path d1 = Files.createTempDirectory(assetPathDir, "testdir");
- assertEquals(3, asset2.list().size());
- assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(f1.getFileName().toString())));
- assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(f2.getFileName().toString())));
- assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(d1.getFileName().toString())));
- Files.deleteIfExists(f1);
- Files.deleteIfExists(f2);
- Files.deleteIfExists(d1);
-
-
- }
-
- @Test
- public void getSize() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- assertEquals(0, asset.getSize());
-
- Files.write(assetPathFile, new String("abcdef").getBytes("ASCII"));
- assertTrue(asset.getSize()>=6);
-
-
- }
-
- @Test
- public void getData() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
- try(InputStream is = asset.getReadStream()) {
- assertEquals("abcdef", IOUtils.toString(is, "ASCII"));
- }
-
- }
-
- @Test
- public void getDataExceptionOnDir() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathDir);
- Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
- try {
- InputStream is = asset.getReadStream();
- assertFalse("Exception expected for data on dir", true);
- } catch (IOException e) {
- // fine
- }
-
- }
-
- @Test
- public void writeData() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
- try(OutputStream os = asset.getWriteStream(true)) {
- IOUtils.write("test12345", os, "ASCII");
- }
- assertEquals("test12345", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
- }
-
- @Test
- public void writeDataAppend() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
- try(OutputStream os = asset.getWriteStream(false)) {
- IOUtils.write("test12345", os, "ASCII");
- }
- assertEquals("abcdeftest12345", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
- }
-
- @Test
- public void writeDataExceptionOnDir() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathDir);
- try {
-
- OutputStream os = asset.getWriteStream(true);
- assertTrue("Writing to a directory should throw a IOException", false);
- } catch (IOException e) {
- // Fine
- }
- }
-
- @Test
- public void storeDataFile() throws IOException {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- Path dataFile = Files.createTempFile("testdata", "dat");
- try(OutputStream os = Files.newOutputStream(dataFile)) {
- IOUtils.write("testkdkdkd", os, "ASCII");
- }
- asset.replaceDataFromFile(dataFile);
- assertEquals("testkdkdkd", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
- }
-
- @Test
- public void exists() {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- assertTrue(asset.exists());
- FilesystemAsset asset2 = new FilesystemAsset("/test1234", Paths.get("abcdefgkdkdk"));
- assertFalse(asset2.exists());
-
- }
-
- @Test
- public void getFilePath() {
- FilesystemAsset asset = new FilesystemAsset("/test1234", assetPathFile);
- assertEquals(assetPathFile, asset.getFilePath());
- }
-}
\ No newline at end of file
+++ /dev/null
-package org.apache.archiva.repository.content;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.archiva.common.filelock.DefaultFileLockManager;
-import org.apache.commons.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-
-import static org.junit.Assert.*;
-
-public class FilesystemStorageTest {
-
- private FilesystemStorage fsStorage;
- private FilesystemAsset file1Asset;
- private FilesystemAsset dir1Asset;
- private Path baseDir;
- private Path file1;
- private Path dir1;
-
- @Before
- public void init() throws IOException {
- baseDir = Files.createTempDirectory("FsStorageTest");
- DefaultFileLockManager fl = new DefaultFileLockManager();
- fsStorage = new FilesystemStorage(baseDir,fl);
- Files.createDirectories(baseDir.resolve("dir1"));
- Files.createDirectories(baseDir.resolve("dir2"));
- file1 = Files.createFile(baseDir.resolve("dir1/testfile1.dat"));
- dir1 = Files.createDirectories(baseDir.resolve("dir1/testdir"));
- file1Asset = new FilesystemAsset("/dir1/testfile1.dat", file1);
- dir1Asset = new FilesystemAsset("/dir1/testdir", dir1);
- }
-
- private class StringResult {
- public String getData() {
- return data;
- }
-
- public void setData(String data) {
- this.data = data;
- }
-
- String data;
- }
-
-
- @After
- public void cleanup() {
- try {
- Files.deleteIfExists(file1);
- } catch (IOException e) {
- e.printStackTrace();
- }
- try {
- Files.deleteIfExists(dir1);
- } catch (IOException e) {
- e.printStackTrace();
- }
- try {
- Files.deleteIfExists(baseDir.resolve("dir1"));
- } catch (IOException e) {
- e.printStackTrace();
- }
- try {
- Files.deleteIfExists(baseDir.resolve("dir2"));
- } catch (IOException e) {
- e.printStackTrace();
- }
- try {
- Files.deleteIfExists(baseDir);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
-
-
- @Test
- public void consumeData() throws IOException {
- try(OutputStream os = Files.newOutputStream(file1)) {
- IOUtils.write("abcdefghijkl", os, "ASCII");
- }
- StringResult result = new StringResult();
- fsStorage.consumeData(file1Asset, is -> consume(is, result), false );
- assertEquals("abcdefghijkl" ,result.getData());
- }
-
- private void consume(InputStream is, StringResult result) {
- try {
- result.setData(IOUtils.toString(is, "ASCII"));
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
-
- @Test
- public void getAsset() {
- StorageAsset asset = fsStorage.getAsset("/dir1/testfile1.dat");
- assertEquals(file1, asset.getFilePath());
- }
-
- @Test
- public void addAsset() {
- StorageAsset newAsset = fsStorage.addAsset("dir2/test", false);
- assertNotNull(newAsset);
- assertFalse(newAsset.isContainer());
- assertFalse(newAsset.exists());
-
- StorageAsset newDirAsset = fsStorage.addAsset("/dir2/testdir2", true);
- assertNotNull(newDirAsset);
- assertTrue(newDirAsset.isContainer());
- assertFalse(newDirAsset.exists());
- }
-
- @Test
- public void removeAsset() throws IOException {
- assertTrue(Files.exists(file1));
- fsStorage.removeAsset(file1Asset);
- assertFalse(Files.exists(file1));
-
- assertTrue(Files.exists(dir1));
- fsStorage.removeAsset(dir1Asset);
- assertFalse(Files.exists(dir1));
- }
-
- @Test
- public void moveAsset() throws IOException {
- Path newFile=null;
- Path newDir=null;
- try {
- assertTrue(Files.exists(file1));
- try (OutputStream os = Files.newOutputStream(file1)) {
- IOUtils.write("testakdkkdkdkdk", os, "ASCII");
- }
- long fileSize = Files.size(file1);
- fsStorage.moveAsset(file1Asset, "/dir2/testfile2.dat");
- assertFalse(Files.exists(file1));
- newFile = baseDir.resolve("dir2/testfile2.dat");
- assertTrue(Files.exists(newFile));
- assertEquals(fileSize, Files.size(newFile));
-
-
- assertTrue(Files.exists(dir1));
- newDir = baseDir.resolve("dir2/testdir2");
- fsStorage.moveAsset(dir1Asset, "dir2/testdir2");
- assertFalse(Files.exists(dir1));
- assertTrue(Files.exists(newDir));
- } finally {
- if (newFile!=null) Files.deleteIfExists(newFile);
- if (newDir!=null) Files.deleteIfExists(newDir);
- }
- }
-
- @Test
- public void copyAsset() throws IOException {
- Path newFile=null;
- Path newDir=null;
- try {
- assertTrue(Files.exists(file1));
- try (OutputStream os = Files.newOutputStream(file1)) {
- IOUtils.write("testakdkkdkdkdk", os, "ASCII");
- }
- long fileSize = Files.size(file1);
- fsStorage.copyAsset(file1Asset, "/dir2/testfile2.dat");
- assertTrue(Files.exists(file1));
- assertEquals(fileSize, Files.size(file1));
- newFile = baseDir.resolve("dir2/testfile2.dat");
- assertTrue(Files.exists(newFile));
- assertEquals(fileSize, Files.size(newFile));
-
-
- assertTrue(Files.exists(dir1));
- newDir = baseDir.resolve("dir2/testdir2");
- fsStorage.copyAsset(dir1Asset, "dir2/testdir2");
- assertTrue(Files.exists(dir1));
- assertTrue(Files.exists(newDir));
- } finally {
- if (newFile!=null) Files.deleteIfExists(newFile);
- if (newDir!=null) Files.deleteIfExists(newDir);
- }
- }
-}
\ No newline at end of file
import org.apache.archiva.repository.ManagedRepository;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.RepositoryException;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.springframework.stereotype.Service;
import java.util.Set;
import org.apache.archiva.consumers.KnownRepositoryContentConsumer;
import org.apache.archiva.consumers.RepositoryContentConsumer;
import org.apache.archiva.repository.ManagedRepository;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.archiva.repository.EditableManagedRepository;
import org.apache.archiva.repository.EditableRemoteRepository;
import org.apache.archiva.repository.ManagedRepository;
-import org.apache.archiva.repository.content.FilesystemStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.scanner.mock.ManagedRepositoryContentMock;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.apache.commons.io.FileUtils;
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.VersionUtil;
import org.apache.archiva.metadata.model.ArtifactMetadata;
import org.apache.archiva.metadata.model.maven2.MavenArtifactFacet;
import org.apache.archiva.model.ProjectReference;
import org.apache.archiva.model.VersionedReference;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
+import java.io.IOException;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
private ManagedRepository repository;
+ private FilesystemStorage fsStorage;
public ManagedRepositoryContentMock(ManagedRepository repo) {
this.repository = repo;
@Override
public String getRepoRoot( )
{
- return Paths.get("", "target", "test-repository", "managed").toString();
+ return getRepoRootAsset().getFilePath().toString();
+ }
+
+ private StorageAsset getRepoRootAsset() {
+ if (fsStorage==null) {
+ try {
+ fsStorage = new FilesystemStorage(Paths.get("", "target", "test-repository", "managed"), new DefaultFileLockManager());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ return fsStorage.getAsset("");
}
@Override
@Override
public StorageAsset toFile( ArtifactReference reference )
{
- return Paths.get(getRepoRoot(), refs.get(reference));
+ return getRepoRootAsset().resolve(refs.get(reference));
}
@Override
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>archiva-base</artifactId>
+ <groupId>org.apache.archiva</groupId>
+ <version>3.0.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>archiva-storage-api</artifactId>
+
+ <name>Archiva Base :: Repository API</name>
+
+ <properties>
+ <site.staging.base>${project.parent.parent.basedir}</site.staging.base>
+ </properties>
+
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <systemPropertyVariables>
+ <basedir>${basedir}</basedir>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
\ No newline at end of file
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.CopyOption;
+import java.util.function.Consumer;
+
+/**
+ *
+ * This is the low level API to access artifacts in a repository. Each artifact is represented
+ * by one storage asset. Each asset can be accessed by a path that is independent on the underlying storage
+ * implementation. Paths always use '/' as path separator. The path is local to the repository and
+ * is unique for each asset.
+ * The storage API knows nothing about the repository layout or repository specific metadata.
+ * If you use this API you must either have knowledge about the specific repository layout or use the structure
+ * as it is, e.g. for browsing.
+ *
+ * The base implementation for the storage uses a directory structure on the local filesystem.
+ *
+ *
+ * It is the decision of the repository type specific implementation, if this API provides access to all elements, that
+ * is really stored or just a selected view.
+ *
+ * Checking access is not part of this API.
+ */
+public interface RepositoryStorage {
+ /**
+ * Returns information about a specific storage asset.
+ * @param path
+ * @return
+ */
+ StorageAsset getAsset(String path);
+
+ /**
+ * Consumes the data and sets a lock for the file during the operation.
+ *
+ * @param asset The asset from which the data is consumed.
+ * @param consumerFunction The consumer that reads the data
+ * @param readLock If true, a read lock is acquired on the asset.
+ * @throws IOException
+ */
+ void consumeData(StorageAsset asset, Consumer<InputStream> consumerFunction, boolean readLock) throws IOException;
+
+ /**
+ * Consumes the data and sets a lock for the file during the operation.
+ *
+ * @param asset The asset from which the data is consumed.
+ * @param consumerFunction The consumer that reads the data
+ * @param readLock If true, a read lock is acquired on the asset.
+ * @throws IOException
+ */
+ void consumeDataFromChannel( StorageAsset asset, Consumer<ReadableByteChannel> consumerFunction, boolean readLock) throws IOException;
+
+ /**
+ * Writes data to the asset using a write lock.
+ *
+ * @param asset The asset to which the data is written.
+ * @param consumerFunction The function that provides the data.
+ * @param writeLock If true, a write lock is acquired on the destination.
+ */
+ void writeData( StorageAsset asset, Consumer<OutputStream> consumerFunction, boolean writeLock) throws IOException;;
+
+ /**
+ * Writes data and sets a lock during the operation.
+ *
+ * @param asset The asset to which the data is written.
+ * @param consumerFunction The function that provides the data.
+ * @param writeLock If true, a write lock is acquired on the destination.
+ * @throws IOException
+ */
+ void writeDataToChannel( StorageAsset asset, Consumer<WritableByteChannel> consumerFunction, boolean writeLock) throws IOException;
+
+ /**
+ * Adds a new asset to the underlying storage.
+ * @param path The path to the asset.
+ * @param container True, if the asset should be a container, false, if it is a file.
+ * @return
+ */
+ StorageAsset addAsset(String path, boolean container);
+
+ /**
+ * Removes the given asset from the storage.
+ *
+ * @param asset
+ * @throws IOException
+ */
+ void removeAsset(StorageAsset asset) throws IOException;
+
+ /**
+ * Moves the asset to the given location and returns the asset object for the destination. Moves only assets that
+ * belong to the same storage instance. It will throw a IOException if the assets are from differents storage
+ * instances.
+ *
+ * @param origin The original asset
+ * @param destination The destination path pointing to the new asset.
+ * @param copyOptions The copy options.
+ * @return The asset representation of the moved object.
+ */
+ StorageAsset moveAsset(StorageAsset origin, String destination, CopyOption... copyOptions) throws IOException;
+
+ /**
+ * Moves the asset to the given location and returns the asset object for the destination. Moves only assets that
+ * belong to the same storage instance. It will throw a IOException if the assets are from differents storage
+ * instances.
+ * *
+ * @param origin The original asset
+ * @param destination The destination path.
+ * @param copyOptions The copy options (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
+ * @throws IOException If it was not possible to copy the asset.
+ */
+ void moveAsset(StorageAsset origin, StorageAsset destination, CopyOption... copyOptions) throws IOException;
+
+ /**
+ * Copies the given asset to the new destination. Copies only assets that belong to the same storage instance.
+ * It will throw a IOException if the assets are from differents storage instances.
+ *
+ * @param origin The original asset
+ * @param destination The path to the new asset
+ * @param copyOptions The copy options, e.g. (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
+ * @return The asset representation of the copied object
+ * @throws IOException If it was not possible to copy the asset
+ */
+ StorageAsset copyAsset(StorageAsset origin, String destination, CopyOption... copyOptions) throws IOException;
+
+ /**
+ * Copies the given asset to the new destination. Copies only assets that belong to the same storage instance.
+ * It will throw a IOException if the assets are from differents storage instances.
+ *
+ * @param origin The original asset
+ * @param destination The path to the new asset
+ * @param copyOptions The copy options, e.g. (e.g. {@link java.nio.file.StandardCopyOption#REPLACE_EXISTING}
+ * @throws IOException If it was not possible to copy the asset
+ */
+ void copyAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions) throws IOException;
+
+
+}
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.Path;
+import java.time.Instant;
+import java.util.List;
+
+/**
+ * A instance of this interface represents information about a specific asset in a repository.
+ * The asset may be an real artifact, a directory, or a virtual asset.
+ *
+ * Each asset has a unique path relative to the repository.
+ *
+ * The implementation may read the data directly from the filesystem or underlying storage implementation.
+ *
+ * @author Martin Stockhammer <martin_s@apache.org>
+ */
+public interface StorageAsset
+{
+
+ /**
+ * Returns the storage this asset belongs to.
+ * @return
+ */
+ RepositoryStorage getStorage();
+
+ /**
+ * Returns the complete path relative to the repository to the given asset.
+ *
+ * @return A path starting with '/' that uniquely identifies the asset in the repository.
+ */
+ String getPath();
+
+ /**
+ * Returns the name of the asset. It may be just the filename.
+ * @return
+ */
+ String getName();
+
+ /**
+ * Returns the time of the last modification.
+ *
+ * @return
+ */
+ Instant getModificationTime();
+
+ /**
+ * Returns true, if this asset is a container type and contains further child assets.
+ * @return
+ */
+ boolean isContainer();
+
+ /**
+ * List the child assets.
+ *
+ * @return The list of children. If there are no children and if the asset is not a container, a empty list will be returned.
+ */
+ List<StorageAsset> list();
+
+ /**
+ * The size in bytes of the asset. If the asset does not have a size, -1 should be returned.
+ *
+ * @return The size if the asset has a size, otherwise -1
+ */
+ long getSize();
+
+ /**
+ * Returns the input stream of the artifact content.
+ * It will throw a IOException, if the stream could not be created.
+ * Implementations should create a new stream instance for each invocation and make sure that the
+ * stream is proper closed after usage.
+ *
+ * @return The InputStream representing the content of the artifact.
+ * @throws IOException
+ */
+ InputStream getReadStream() throws IOException;
+
+ /**
+ * Returns a NIO representation of the data.
+ *
+ * @return A channel to the asset data.
+ * @throws IOException
+ */
+ ReadableByteChannel getReadChannel() throws IOException;
+
+ /**
+ *
+ * Returns an output stream where you can write data to the asset. The operation is not locked or synchronized.
+ * User of this method have to make sure, that the stream is proper closed after usage.
+ *
+ * @param replace If true, the original data will be replaced, otherwise the data will be appended.
+ * @return The OutputStream where the data can be written.
+ * @throws IOException
+ */
+ OutputStream getWriteStream( boolean replace) throws IOException;
+
+ /**
+ * Returns a NIO representation of the asset where you can write the data.
+ *
+ * @param replace True, if the content should be replaced by the data written to the stream.
+ * @return The Channel for writing the data.
+ * @throws IOException
+ */
+ WritableByteChannel getWriteChannel( boolean replace) throws IOException;
+
+ /**
+ * Replaces the content. The implementation may do an atomic move operation, or keep a backup. If
+ * the operation fails, the implementation should try to restore the old data, if possible.
+ *
+ * The original file may be deleted, if the storage was successful.
+ *
+ * @param newData Replaces the data by the content of the given file.
+ */
+ boolean replaceDataFromFile( Path newData) throws IOException;
+
+ /**
+ * Returns true, if the asset exists.
+ *
+ * @return True, if the asset exists, otherwise false.
+ */
+ boolean exists();
+
+ /**
+ * Creates the asset in the underlying storage, if it does not exist.
+ */
+ void create() throws IOException;
+
+ /**
+ * Returns the real path to the asset, if it exist. Not all implementations may implement this method.
+ * The method throws {@link UnsupportedOperationException}, if and only if {@link #isFileBased()} returns false.
+ *
+ * @return The filesystem path to the asset.
+ * @throws UnsupportedOperationException If the underlying storage is not file based.
+ */
+ Path getFilePath() throws UnsupportedOperationException;
+
+ /**
+ * Returns true, if the asset can return a file path for the given asset. If this is true, the {@link #getFilePath()}
+ * will not throw a {@link UnsupportedOperationException}
+ *
+ * @return
+ */
+ boolean isFileBased();
+
+ /**
+ * Returns true, if there is a parent to this asset.
+ * @return
+ */
+ boolean hasParent();
+
+ /**
+ * Returns the parent of this asset.
+ * @return The asset, or <code>null</code>, if it does not exist.
+ */
+ StorageAsset getParent();
+
+ /**
+ * Returns the asset relative to the given path
+ * @param toPath
+ * @return
+ */
+ StorageAsset resolve(String toPath);
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <artifactId>archiva-base</artifactId>
+ <groupId>org.apache.archiva</groupId>
+ <version>3.0.0-SNAPSHOT</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+
+ <artifactId>archiva-storage-fs</artifactId>
+
+ <name>Archiva Base :: Storage Filesystem Based</name>
+
+ <properties>
+ <site.staging.base>${project.parent.parent.basedir}</site.staging.base>
+ </properties>
+
+ <dependencies>
+
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-filelock</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ </dependency>
+
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <systemPropertyVariables>
+ <basedir>${basedir}</basedir>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
\ No newline at end of file
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.FileChannel;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.*;
+import java.nio.file.attribute.*;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Implementation of an asset that is stored on the filesystem.
+ * <p>
+ * The implementation does not check the given paths. Caller should normalize the asset path
+ * and check, if the base path is a parent of the resulting path.
+ * <p>
+ * The file must not exist for all operations.
+ *
+ * @author Martin Stockhammer <martin_s@apache.org>
+ */
+public class FilesystemAsset implements StorageAsset {
+
+ private final static Logger log = LoggerFactory.getLogger(FilesystemAsset.class);
+
+ private final Path basePath;
+ private final Path assetPath;
+ private final String relativePath;
+
+ public static final String DEFAULT_POSIX_FILE_PERMS = "rw-rw----";
+ public static final String DEFAULT_POSIX_DIR_PERMS = "rwxrwx---";
+
+ public static final Set<PosixFilePermission> DEFAULT_POSIX_FILE_PERMISSIONS;
+ public static final Set<PosixFilePermission> DEFAULT_POSIX_DIR_PERMISSIONS;
+
+ public static final AclEntryPermission[] DEFAULT_ACL_FILE_PERMISSIONS = new AclEntryPermission[]{
+ AclEntryPermission.DELETE, AclEntryPermission.READ_ACL, AclEntryPermission.READ_ATTRIBUTES, AclEntryPermission.READ_DATA, AclEntryPermission.WRITE_ACL,
+ AclEntryPermission.WRITE_ATTRIBUTES, AclEntryPermission.WRITE_DATA, AclEntryPermission.APPEND_DATA
+ };
+
+ public static final AclEntryPermission[] DEFAULT_ACL_DIR_PERMISSIONS = new AclEntryPermission[]{
+ AclEntryPermission.ADD_FILE, AclEntryPermission.ADD_SUBDIRECTORY, AclEntryPermission.DELETE_CHILD,
+ AclEntryPermission.DELETE, AclEntryPermission.READ_ACL, AclEntryPermission.READ_ATTRIBUTES, AclEntryPermission.READ_DATA, AclEntryPermission.WRITE_ACL,
+ AclEntryPermission.WRITE_ATTRIBUTES, AclEntryPermission.WRITE_DATA, AclEntryPermission.APPEND_DATA
+ };
+
+ static {
+
+ DEFAULT_POSIX_FILE_PERMISSIONS = PosixFilePermissions.fromString(DEFAULT_POSIX_FILE_PERMS);
+ DEFAULT_POSIX_DIR_PERMISSIONS = PosixFilePermissions.fromString(DEFAULT_POSIX_DIR_PERMS);
+ }
+
+ Set<PosixFilePermission> defaultPosixFilePermissions = DEFAULT_POSIX_FILE_PERMISSIONS;
+ Set<PosixFilePermission> defaultPosixDirectoryPermissions = DEFAULT_POSIX_DIR_PERMISSIONS;
+
+ List<AclEntry> defaultFileAcls;
+ List<AclEntry> defaultDirectoryAcls;
+
+ boolean supportsAcl = false;
+ boolean supportsPosix = false;
+ final boolean setPermissionsForNew;
+ final RepositoryStorage storage;
+
+ boolean directoryHint = false;
+
+ private static final OpenOption[] REPLACE_OPTIONS = new OpenOption[]{StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE};
+ private static final OpenOption[] APPEND_OPTIONS = new OpenOption[]{StandardOpenOption.APPEND};
+
+
+ FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath) {
+ this.assetPath = assetPath;
+ this.relativePath = normalizePath(path);
+ this.setPermissionsForNew=false;
+ this.basePath = basePath;
+ this.storage = storage;
+ init();
+ }
+
+ /**
+ * Creates an asset for the given path. The given paths are not checked.
+ * The base path should be an absolute path.
+ *
+ * @param path The logical path for the asset relative to the repository.
+ * @param assetPath The asset path.
+ */
+ public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath) {
+ this.assetPath = assetPath;
+ this.relativePath = normalizePath(path);
+ this.setPermissionsForNew = false;
+ this.basePath = null;
+ this.storage = storage;
+ init();
+ }
+
+ /**
+ * Creates an asset for the given path. The given paths are not checked.
+ * The base path should be an absolute path.
+ *
+ * @param path The logical path for the asset relative to the repository
+ * @param assetPath The asset path.
+ * @param directory This is only relevant, if the represented file or directory does not exist yet and
+ * is a hint.
+ */
+ public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath, boolean directory) {
+ this.assetPath = assetPath;
+ this.relativePath = normalizePath(path);
+ this.directoryHint = directory;
+ this.setPermissionsForNew = false;
+ this.basePath = basePath;
+ this.storage = storage;
+ init();
+ }
+
+ /**
+ * Creates an asset for the given path. The given paths are not checked.
+ * The base path should be an absolute path.
+ *
+ * @param path The logical path for the asset relative to the repository
+ * @param assetPath The asset path.
+ * @param directory This is only relevant, if the represented file or directory does not exist yet and
+ * is a hint.
+ */
+ public FilesystemAsset(RepositoryStorage storage, String path, Path assetPath, Path basePath, boolean directory, boolean setPermissionsForNew) {
+ this.assetPath = assetPath;
+ this.relativePath = normalizePath(path);
+ this.directoryHint = directory;
+ this.setPermissionsForNew = setPermissionsForNew;
+ this.basePath = basePath;
+ this.storage = storage;
+ init();
+ }
+
+ private String normalizePath(String path) {
+ if (!path.startsWith("/")) {
+ return "/"+path;
+ } else {
+ return path;
+ }
+ }
+
+ private void init() {
+
+ if (setPermissionsForNew) {
+ try {
+ supportsAcl = Files.getFileStore(assetPath.getRoot()).supportsFileAttributeView(AclFileAttributeView.class);
+ } catch (IOException e) {
+ log.error("Could not check filesystem capabilities {}", e.getMessage());
+ }
+ try {
+ supportsPosix = Files.getFileStore(assetPath.getRoot()).supportsFileAttributeView(PosixFileAttributeView.class);
+ } catch (IOException e) {
+ log.error("Could not check filesystem capabilities {}", e.getMessage());
+ }
+
+ if (supportsAcl) {
+ AclFileAttributeView aclView = Files.getFileAttributeView(assetPath.getParent(), AclFileAttributeView.class);
+ UserPrincipal owner = null;
+ try {
+ owner = aclView.getOwner();
+ setDefaultFileAcls(processPermissions(owner, DEFAULT_ACL_FILE_PERMISSIONS));
+ setDefaultDirectoryAcls(processPermissions(owner, DEFAULT_ACL_DIR_PERMISSIONS));
+
+ } catch (IOException e) {
+ supportsAcl = false;
+ }
+
+
+ }
+ }
+ }
+
+ private List<AclEntry> processPermissions(UserPrincipal owner, AclEntryPermission[] defaultAclFilePermissions) {
+ AclEntry.Builder aclBuilder = AclEntry.newBuilder();
+ aclBuilder.setPermissions(defaultAclFilePermissions);
+ aclBuilder.setType(AclEntryType.ALLOW);
+ aclBuilder.setPrincipal(owner);
+ ArrayList<AclEntry> aclList = new ArrayList<>();
+ aclList.add(aclBuilder.build());
+ return aclList;
+ }
+
+
+ @Override
+ public RepositoryStorage getStorage( )
+ {
+ return storage;
+ }
+
+ @Override
+ public String getPath() {
+ return relativePath;
+ }
+
+ @Override
+ public String getName() {
+ return assetPath.getFileName().toString();
+ }
+
+ @Override
+ public Instant getModificationTime() {
+ try {
+ return Files.getLastModifiedTime(assetPath).toInstant();
+ } catch (IOException e) {
+ log.error("Could not read modification time of {}", assetPath);
+ return Instant.now();
+ }
+ }
+
+ /**
+ * Returns true, if the path of this asset points to a directory
+ *
+ * @return
+ */
+ @Override
+ public boolean isContainer() {
+ if (Files.exists(assetPath)) {
+ return Files.isDirectory(assetPath);
+ } else {
+ return directoryHint;
+ }
+ }
+
+ /**
+ * Returns the list of directory entries, if this asset represents a directory.
+ * Otherwise a empty list will be returned.
+ *
+ * @return The list of entries in the directory, if it exists.
+ */
+ @Override
+ public List<StorageAsset> list() {
+ try {
+ return Files.list(assetPath).map(p -> new FilesystemAsset(storage, relativePath + "/" + p.getFileName().toString(), assetPath.resolve(p)))
+ .collect(Collectors.toList());
+ } catch (IOException e) {
+ return Collections.EMPTY_LIST;
+ }
+ }
+
+ /**
+ * Returns the size of the represented file. If it cannot be determined, -1 is returned.
+ *
+ * @return
+ */
+ @Override
+ public long getSize() {
+ try {
+ return Files.size(assetPath);
+ } catch (IOException e) {
+ return -1;
+ }
+ }
+
+ /**
+ * Returns a input stream to the underlying file, if it exists. The caller has to make sure, that
+ * the stream is closed after it was used.
+ *
+ * @return
+ * @throws IOException
+ */
+ @Override
+ public InputStream getReadStream() throws IOException {
+ if (isContainer()) {
+ throw new IOException("Can not create input stream for container");
+ }
+ return Files.newInputStream(assetPath);
+ }
+
+ @Override
+ public ReadableByteChannel getReadChannel( ) throws IOException
+ {
+ return FileChannel.open( assetPath, StandardOpenOption.READ );
+ }
+
+ private OpenOption[] getOpenOptions(boolean replace) {
+ return replace ? REPLACE_OPTIONS : APPEND_OPTIONS;
+ }
+
+ @Override
+ public OutputStream getWriteStream( boolean replace) throws IOException {
+ OpenOption[] options = getOpenOptions( replace );
+ if (!Files.exists( assetPath )) {
+ create();
+ }
+ return Files.newOutputStream(assetPath, options);
+ }
+
+ @Override
+ public WritableByteChannel getWriteChannel( boolean replace ) throws IOException
+ {
+ OpenOption[] options = getOpenOptions( replace );
+ return FileChannel.open( assetPath, options );
+ }
+
+ @Override
+ public boolean replaceDataFromFile( Path newData) throws IOException {
+ final boolean createNew = !Files.exists(assetPath);
+ Path backup = null;
+ if (!createNew) {
+ backup = findBackupFile(assetPath);
+ }
+ try {
+ if (!createNew) {
+ Files.move(assetPath, backup);
+ }
+ Files.move(newData, assetPath, StandardCopyOption.REPLACE_EXISTING);
+ applyDefaultPermissions(assetPath);
+ return true;
+ } catch (IOException e) {
+ log.error("Could not overwrite file {}", assetPath);
+ // Revert if possible
+ if (backup != null && Files.exists(backup)) {
+ Files.move(backup, assetPath, StandardCopyOption.REPLACE_EXISTING);
+ }
+ throw e;
+ } finally {
+ if (backup != null) {
+ try {
+ Files.deleteIfExists(backup);
+ } catch (IOException e) {
+ log.error("Could not delete backup file {}", backup);
+ }
+ }
+ }
+
+ }
+
+ private void applyDefaultPermissions(Path filePath) {
+ try {
+ if (supportsPosix) {
+ Set<PosixFilePermission> perms;
+ if (Files.isDirectory(filePath)) {
+ perms = defaultPosixFilePermissions;
+ } else {
+ perms = defaultPosixDirectoryPermissions;
+ }
+ Files.setPosixFilePermissions(filePath, perms);
+ } else if (supportsAcl) {
+ List<AclEntry> perms;
+ if (Files.isDirectory(filePath)) {
+ perms = getDefaultDirectoryAcls();
+ } else {
+ perms = getDefaultFileAcls();
+ }
+ AclFileAttributeView aclAttr = Files.getFileAttributeView(filePath, AclFileAttributeView.class);
+ aclAttr.setAcl(perms);
+ }
+ } catch (IOException e) {
+ log.error("Could not set permissions for {}: {}", filePath, e.getMessage());
+ }
+ }
+
+ private Path findBackupFile(Path file) {
+ String ext = ".bak";
+ Path backupPath = file.getParent().resolve(file.getFileName().toString() + ext);
+ int idx = 0;
+ while (Files.exists(backupPath)) {
+ backupPath = file.getParent().resolve(file.getFileName().toString() + ext + idx++);
+ }
+ return backupPath;
+ }
+
+ @Override
+ public boolean exists() {
+ return Files.exists(assetPath);
+ }
+
+ @Override
+ public Path getFilePath() throws UnsupportedOperationException {
+ return assetPath;
+ }
+
+ @Override
+ public boolean isFileBased( )
+ {
+ return true;
+ }
+
+ @Override
+ public boolean hasParent( )
+ {
+ if (basePath!=null && assetPath.equals(basePath)) {
+ return false;
+ }
+ return assetPath.getParent()!=null;
+ }
+
+ @Override
+ public StorageAsset getParent( )
+ {
+ Path parentPath;
+ if (basePath!=null && assetPath.equals( basePath )) {
+ parentPath=null;
+ } else
+ {
+ parentPath = assetPath.getParent( );
+ }
+ String relativeParent = StringUtils.substringBeforeLast( relativePath,"/");
+ if (parentPath!=null) {
+ return new FilesystemAsset(storage, relativeParent, parentPath, basePath, true, setPermissionsForNew );
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public StorageAsset resolve(String toPath) {
+ return storage.getAsset(this.getPath()+"/"+toPath);
+ }
+
+
+ public void setDefaultFileAcls(List<AclEntry> acl) {
+ defaultFileAcls = acl;
+ }
+
+ public List<AclEntry> getDefaultFileAcls() {
+ return defaultFileAcls;
+ }
+
+ public void setDefaultPosixFilePermissions(Set<PosixFilePermission> perms) {
+ defaultPosixFilePermissions = perms;
+ }
+
+ public Set<PosixFilePermission> getDefaultPosixFilePermissions() {
+ return defaultPosixFilePermissions;
+ }
+
+ public void setDefaultDirectoryAcls(List<AclEntry> acl) {
+ defaultDirectoryAcls = acl;
+ }
+
+ public List<AclEntry> getDefaultDirectoryAcls() {
+ return defaultDirectoryAcls;
+ }
+
+ public void setDefaultPosixDirectoryPermissions(Set<PosixFilePermission> perms) {
+ defaultPosixDirectoryPermissions = perms;
+ }
+
+ public Set<PosixFilePermission> getDefaultPosixDirectoryPermissions() {
+ return defaultPosixDirectoryPermissions;
+ }
+
+ @Override
+ public void create() throws IOException {
+ if (!Files.exists(assetPath)) {
+ if (directoryHint) {
+ Files.createDirectories(assetPath);
+ } else {
+ if (!Files.exists( assetPath.getParent() )) {
+ Files.createDirectories( assetPath.getParent( ) );
+ }
+ Files.createFile(assetPath);
+ }
+ if (setPermissionsForNew) {
+ applyDefaultPermissions(assetPath);
+ }
+ }
+ }
+
+ @Override
+ public String toString() {
+ return relativePath+":"+assetPath;
+ }
+
+}
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.common.filelock.FileLockException;
+import org.apache.archiva.common.filelock.FileLockManager;
+import org.apache.archiva.common.filelock.FileLockTimeoutException;
+import org.apache.archiva.common.filelock.Lock;
+import org.apache.commons.io.FileUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.channels.FileChannel;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.CopyOption;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
+import java.util.function.Consumer;
+
+/**
+ * Implementation of <code>{@link RepositoryStorage}</code> where data is stored in the filesystem.
+ *
+ * All files are relative to a given base path. Path values are separated by '/', '..' is allowed to navigate
+ * to a parent directory, but navigation out of the base path will lead to a exception.
+ */
+public class FilesystemStorage implements RepositoryStorage {
+
+ private static final Logger log = LoggerFactory.getLogger(FilesystemStorage.class);
+
+ private final Path basePath;
+ private final FileLockManager fileLockManager;
+
+ public FilesystemStorage(Path basePath, FileLockManager fileLockManager) throws IOException {
+ if (!Files.exists(basePath)) {
+ Files.createDirectories(basePath);
+ }
+ this.basePath = basePath.normalize().toRealPath();
+ this.fileLockManager = fileLockManager;
+ }
+
+ private Path normalize(final String path) {
+ String nPath = path;
+ while (nPath.startsWith("/")) {
+ nPath = nPath.substring(1);
+ }
+ return Paths.get(nPath);
+ }
+
+ private Path getAssetPath(String path) throws IOException {
+ Path assetPath = basePath.resolve(normalize(path)).normalize();
+ if (!assetPath.startsWith(basePath))
+ {
+ throw new IOException("Path navigation out of allowed scope: "+path);
+ }
+ return assetPath;
+ }
+
+ @Override
+ public void consumeData(StorageAsset asset, Consumer<InputStream> consumerFunction, boolean readLock ) throws IOException
+ {
+ final Path path = asset.getFilePath();
+ try {
+ if (readLock) {
+ consumeDataLocked( path, consumerFunction );
+ } else
+ {
+ try ( InputStream is = Files.newInputStream( path ) )
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not read the input stream from file {}", path);
+ throw e;
+ }
+ }
+ } catch (RuntimeException e)
+ {
+ log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
+ throw new IOException( e );
+ }
+
+ }
+
+ @Override
+ public void consumeDataFromChannel( StorageAsset asset, Consumer<ReadableByteChannel> consumerFunction, boolean readLock ) throws IOException
+ {
+ final Path path = asset.getFilePath();
+ try {
+ if (readLock) {
+ consumeDataFromChannelLocked( path, consumerFunction );
+ } else
+ {
+ try ( FileChannel is = FileChannel.open( path, StandardOpenOption.READ ) )
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not read the input stream from file {}", path);
+ throw e;
+ }
+ }
+ } catch (RuntimeException e)
+ {
+ log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
+ throw new IOException( e );
+ }
+ }
+
+ @Override
+ public void writeData( StorageAsset asset, Consumer<OutputStream> consumerFunction, boolean writeLock ) throws IOException
+ {
+ final Path path = asset.getFilePath();
+ try {
+ if (writeLock) {
+ writeDataLocked( path, consumerFunction );
+ } else
+ {
+ try ( OutputStream is = Files.newOutputStream( path ) )
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not write the output stream to file {}", path);
+ throw e;
+ }
+ }
+ } catch (RuntimeException e)
+ {
+ log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
+ throw new IOException( e );
+ }
+
+ }
+
+ @Override
+ public void writeDataToChannel( StorageAsset asset, Consumer<WritableByteChannel> consumerFunction, boolean writeLock ) throws IOException
+ {
+ final Path path = asset.getFilePath();
+ try {
+ if (writeLock) {
+ writeDataToChannelLocked( path, consumerFunction );
+ } else
+ {
+ try ( FileChannel os = FileChannel.open( path, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE ))
+ {
+ consumerFunction.accept( os );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not write the data to file {}", path);
+ throw e;
+ }
+ }
+ } catch (RuntimeException e)
+ {
+ log.error( "Runtime exception during data consume from artifact {}. Error: {}", path, e.getMessage() );
+ throw new IOException( e );
+ }
+ }
+
+ private void consumeDataLocked( Path file, Consumer<InputStream> consumerFunction) throws IOException
+ {
+
+ final Lock lock;
+ try
+ {
+ lock = fileLockManager.readFileLock( file );
+ try ( InputStream is = Files.newInputStream( lock.getFile()))
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not read the input stream from file {}", file);
+ throw e;
+ } finally
+ {
+ fileLockManager.release( lock );
+ }
+ }
+ catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
+ {
+ log.error("Locking error on file {}", file);
+ throw new IOException(e);
+ }
+ }
+
+ private void consumeDataFromChannelLocked( Path file, Consumer<ReadableByteChannel> consumerFunction) throws IOException
+ {
+
+ final Lock lock;
+ try
+ {
+ lock = fileLockManager.readFileLock( file );
+ try ( FileChannel is = FileChannel.open( lock.getFile( ), StandardOpenOption.READ ))
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not read the input stream from file {}", file);
+ throw e;
+ } finally
+ {
+ fileLockManager.release( lock );
+ }
+ }
+ catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
+ {
+ log.error("Locking error on file {}", file);
+ throw new IOException(e);
+ }
+ }
+
+
+ private void writeDataLocked( Path file, Consumer<OutputStream> consumerFunction) throws IOException
+ {
+
+ final Lock lock;
+ try
+ {
+ lock = fileLockManager.writeFileLock( file );
+ try ( OutputStream is = Files.newOutputStream( lock.getFile()))
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not write the output stream to file {}", file);
+ throw e;
+ } finally
+ {
+ fileLockManager.release( lock );
+ }
+ }
+ catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
+ {
+ log.error("Locking error on file {}", file);
+ throw new IOException(e);
+ }
+ }
+
+ private void writeDataToChannelLocked( Path file, Consumer<WritableByteChannel> consumerFunction) throws IOException
+ {
+
+ final Lock lock;
+ try
+ {
+ lock = fileLockManager.writeFileLock( file );
+ try ( FileChannel is = FileChannel.open( lock.getFile( ), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE ))
+ {
+ consumerFunction.accept( is );
+ }
+ catch ( IOException e )
+ {
+ log.error("Could not write to file {}", file);
+ throw e;
+ } finally
+ {
+ fileLockManager.release( lock );
+ }
+ }
+ catch ( FileLockException | FileNotFoundException | FileLockTimeoutException e)
+ {
+ log.error("Locking error on file {}", file);
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public StorageAsset getAsset( String path )
+ {
+ try {
+ return new FilesystemAsset(this, path, getAssetPath(path));
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Path navigates outside of base directory "+path);
+ }
+ }
+
+ @Override
+ public StorageAsset addAsset( String path, boolean container )
+ {
+ try {
+ return new FilesystemAsset(this, path, getAssetPath(path), basePath, container);
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Path navigates outside of base directory "+path);
+ }
+ }
+
+ @Override
+ public void removeAsset( StorageAsset asset ) throws IOException
+ {
+ Files.delete(asset.getFilePath());
+ }
+
+ @Override
+ public StorageAsset moveAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
+ {
+ boolean container = origin.isContainer();
+ FilesystemAsset newAsset = new FilesystemAsset(this, destination, getAssetPath(destination), basePath, container );
+ moveAsset( origin, newAsset, copyOptions );
+ return newAsset;
+ }
+
+ @Override
+ public void moveAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
+ {
+ if (origin.getStorage()!=this) {
+ throw new IOException("The origin asset does not belong to this storage instance. Cannot copy between different storage instances.");
+ }
+ if (destination.getStorage()!=this) {
+ throw new IOException("The destination asset does not belong to this storage instance. Cannot copy between different storage instances.");
+ }
+ Files.move(origin.getFilePath(), destination.getFilePath(), copyOptions);
+ }
+
+ @Override
+ public StorageAsset copyAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
+ {
+ boolean container = origin.isContainer();
+ FilesystemAsset newAsset = new FilesystemAsset(this, destination, getAssetPath(destination), basePath, container );
+ copyAsset( origin, newAsset, copyOptions );
+ return newAsset;
+ }
+
+ @Override
+ public void copyAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
+ {
+ if (origin.getStorage()!=this) {
+ throw new IOException("The origin asset does not belong to this storage instance. Cannot copy between different storage instances.");
+ }
+ if (destination.getStorage()!=this) {
+ throw new IOException("The destination asset does not belong to this storage instance. Cannot copy between different storage instances.");
+ }
+ Path destinationPath = destination.getFilePath();
+ boolean overwrite = false;
+ for (int i=0; i<copyOptions.length; i++) {
+ if (copyOptions[i].equals( StandardCopyOption.REPLACE_EXISTING )) {
+ overwrite=true;
+ }
+ }
+ if (Files.exists(destinationPath) && !overwrite) {
+ throw new IOException("Destination file exists already "+ destinationPath);
+ }
+ if (Files.isDirectory( origin.getFilePath() ))
+ {
+ FileUtils.copyDirectory(origin.getFilePath( ).toFile(), destinationPath.toFile() );
+ } else if (Files.isRegularFile( origin.getFilePath() )) {
+ if (!Files.exists( destinationPath )) {
+ Files.createDirectories( destinationPath );
+ }
+ Files.copy( origin.getFilePath( ), destinationPath, copyOptions );
+ }
+ }
+
+ public FileLockManager getFileLockManager() {
+ return fileLockManager;
+ }
+
+}
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.common.filelock.FileLockException;
+import org.apache.archiva.common.filelock.FileLockManager;
+import org.apache.archiva.common.filelock.FileLockTimeoutException;
+import org.apache.archiva.common.filelock.Lock;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.*;
+import java.util.HashSet;
+import java.util.function.Consumer;
+
+/**
+ *
+ * Utility class for assets. Allows to copy, move between different storage instances and
+ * recursively consume the tree.
+ *
+ * @author Martin Stockhammer <martin_s@apache.org>
+ */
+public class StorageUtil
+{
+ private static final int DEFAULT_BUFFER_SIZE = 4096;
+ private static final Logger log = LoggerFactory.getLogger(StorageUtil.class);
+
+ /**
+ * Copies the source asset to the target. The assets may be from different RepositoryStorage instances.
+ * If you know that source and asset are from the same storage instance, the copy method of the storage
+ * instance may be faster.
+ *
+ * @param source The source asset
+ * @param target The target asset
+ * @param locked If true, a readlock is set on the source and a write lock is set on the target.
+ * @param copyOptions Copy options
+ * @throws IOException
+ */
+ public static final void copyAsset( final StorageAsset source,
+ final StorageAsset target,
+ boolean locked,
+ final CopyOption... copyOptions ) throws IOException
+ {
+ if (source.isFileBased() && target.isFileBased()) {
+ // Short cut for FS operations
+ final Path sourcePath = source.getFilePath();
+ final Path targetPath = target.getFilePath( );
+ if (locked) {
+ final FileLockManager lmSource = ((FilesystemStorage)source.getStorage()).getFileLockManager();
+ final FileLockManager lmTarget = ((FilesystemStorage)target.getStorage()).getFileLockManager();
+ try (Lock lockRead = lmSource.readFileLock( sourcePath ); Lock lockWrite = lmTarget.writeFileLock( targetPath ) )
+ {
+ Files.copy( sourcePath, targetPath, copyOptions );
+ }
+ catch ( FileLockException e )
+ {
+ throw new IOException( e );
+ }
+ catch ( FileLockTimeoutException e )
+ {
+ throw new IOException( e );
+ }
+ } else
+ {
+ Files.copy( sourcePath, targetPath, copyOptions );
+ }
+ } else {
+ try {
+ final RepositoryStorage sourceStorage = source.getStorage();
+ final RepositoryStorage targetStorage = target.getStorage();
+ sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
+ } catch (IOException e) {
+ throw e;
+ } catch (Throwable e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof IOException) {
+ throw (IOException)cause;
+ } else
+ {
+ throw new IOException( e );
+ }
+ }
+ }
+ }
+
+ /**
+ * Moves a asset between different storage instances.
+ * If you know that source and asset are from the same storage instance, the move method of the storage
+ * instance may be faster.
+ *
+ * @param source The source asset
+ * @param target The target asset
+ * @param locked If true, a lock is used for the move operation.
+ * @param copyOptions Options for copying
+ * @throws IOException If the move fails
+ */
+ public static final void moveAsset(StorageAsset source, StorageAsset target, boolean locked, CopyOption... copyOptions) throws IOException
+ {
+ if (source.isFileBased() && target.isFileBased()) {
+ // Short cut for FS operations
+ // Move is atomic operation
+ Files.move( source.getFilePath(), target.getFilePath(), copyOptions );
+ } else {
+ try {
+ final RepositoryStorage sourceStorage = source.getStorage();
+ final RepositoryStorage targetStorage = target.getStorage();
+ sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
+ sourceStorage.removeAsset( source );
+ } catch (IOException e) {
+ throw e;
+ } catch (Throwable e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof IOException) {
+ throw (IOException)cause;
+ } else
+ {
+ throw new IOException( e );
+ }
+ }
+ }
+
+ }
+
+ private static final void wrapWriteFunction(ReadableByteChannel is, RepositoryStorage targetStorage, StorageAsset target, boolean locked) {
+ try {
+ targetStorage.writeDataToChannel( target, os -> copy(is, os), locked );
+ } catch (Exception e) {
+ throw new RuntimeException( e );
+ }
+ }
+
+
+ private static final void copy( final ReadableByteChannel is, final WritableByteChannel os ) {
+ if (is instanceof FileChannel) {
+ copy( (FileChannel) is, os );
+ } else if (os instanceof FileChannel) {
+ copy(is, (FileChannel)os);
+ } else
+ {
+ try
+ {
+ ByteBuffer buffer = ByteBuffer.allocate( DEFAULT_BUFFER_SIZE );
+ while ( is.read( buffer ) != -1 )
+ {
+ buffer.flip( );
+ while ( buffer.hasRemaining( ) )
+ {
+ os.write( buffer );
+ }
+ buffer.clear( );
+ }
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+ }
+
+ private static final void copy( final FileChannel is, final WritableByteChannel os ) {
+ try
+ {
+ is.transferTo( 0, is.size( ), os );
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+
+ private static final void copy( final ReadableByteChannel is, final FileChannel os ) {
+ try
+ {
+ os.transferFrom( is, 0, Long.MAX_VALUE );
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+
+ /**
+ * Runs the consumer function recursively on each asset found starting at the base path
+ * @param baseAsset The base path where to start search
+ * @param consumer The consumer function applied to each found asset
+ * @param depthFirst If true, the deepest elements are consumed first.
+ * @param maxDepth The maximum depth to recurse into. 0 means, only the baseAsset is consumed, 1 the base asset and its children and so forth.
+ */
+ public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst, final int maxDepth) throws IOException {
+ recurse(baseAsset, consumer, depthFirst, maxDepth, 0);
+ }
+
+ /**
+ * Runs the consumer function recursively on each asset found starting at the base path. The function descends into
+ * maximum depth.
+ *
+ * @param baseAsset The base path where to start search
+ * @param consumer The consumer function applied to each found asset
+ * @param depthFirst If true, the deepest elements are consumed first.
+ */
+ public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst) throws IOException {
+ recurse(baseAsset, consumer, depthFirst, Integer.MAX_VALUE, 0);
+ }
+
+ /**
+ * Runs the consumer function recursively on each asset found starting at the base path. It does not recurse with
+ * depth first and stops only if there are no more children available.
+ *
+ * @param baseAsset The base path where to start search
+ * @param consumer The consumer function applied to each found asset
+ */
+ public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer) throws IOException {
+ recurse(baseAsset, consumer, false, Integer.MAX_VALUE, 0);
+ }
+
+ private static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst, final int maxDepth, final int currentDepth)
+ throws IOException {
+ if (!depthFirst) {
+ consumer.accept(baseAsset);
+ }
+ if (currentDepth<maxDepth && baseAsset.isContainer()) {
+ for(StorageAsset asset : baseAsset.list() ) {
+ recurse(asset, consumer, depthFirst, maxDepth, currentDepth+1);
+ }
+ }
+ if (depthFirst) {
+ consumer.accept(baseAsset);
+ }
+ }
+
+ /**
+ * Deletes the given asset and all child assets recursively.
+ * @param baseDir The base asset to remove.
+ * @throws IOException
+ */
+ public static final void deleteRecursively(StorageAsset baseDir) throws IOException {
+ recurse(baseDir, a -> {
+ try {
+ a.getStorage().removeAsset(a);
+ } catch (IOException e) {
+ log.error("Could not delete asset {}", a.getPath());
+ }
+ },true);
+ }
+
+ /**
+ * Returns the extension of the name of a given asset. Extension is the substring after the last occurence of '.' in the
+ * string. If no '.' is found, the empty string is returned.
+ *
+ * @param asset The asset from which to return the extension string.
+ * @return The extension.
+ */
+ public static final String getExtension(StorageAsset asset) {
+ return StringUtils.substringAfterLast(asset.getName(),".");
+ }
+
+ public static final void copyToLocalFile(StorageAsset asset, Path destination, CopyOption... copyOptions) throws IOException {
+ if (asset.isFileBased()) {
+ Files.copy(asset.getFilePath(), destination, copyOptions);
+ } else {
+ try {
+
+ HashSet<OpenOption> openOptions = new HashSet<>();
+ for (CopyOption option : copyOptions) {
+ if (option == StandardCopyOption.REPLACE_EXISTING) {
+ openOptions.add(StandardOpenOption.CREATE);
+ openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
+ openOptions.add(StandardOpenOption.WRITE);
+ } else {
+ openOptions.add(StandardOpenOption.WRITE);
+ openOptions.add(StandardOpenOption.CREATE_NEW);
+ }
+ }
+ asset.getStorage().consumeDataFromChannel(asset, channel -> {
+ try {
+ FileChannel.open(destination, openOptions).transferFrom(channel, 0, Long.MAX_VALUE);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }, false);
+ } catch (Throwable e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e);
+ }
+ }
+ }
+ }
+
+ public static class PathInformation {
+ final Path path ;
+ final boolean tmpFile;
+
+ PathInformation(Path path, boolean tmpFile) {
+ this.path = path;
+ this.tmpFile = tmpFile;
+ }
+
+ public Path getPath() {
+ return path;
+ }
+
+ public boolean isTmpFile() {
+ return tmpFile;
+ }
+
+ }
+
+ public static final PathInformation getAssetDataAsPath(StorageAsset asset) throws IOException {
+ if (!asset.exists()) {
+ throw new IOException("Asset does not exist");
+ }
+ if (asset.isFileBased()) {
+ return new PathInformation(asset.getFilePath(), false);
+ } else {
+ Path tmpFile = Files.createTempFile(asset.getName(), getExtension(asset));
+ copyToLocalFile(asset, tmpFile, StandardCopyOption.REPLACE_EXISTING);
+ return new PathInformation(tmpFile, true);
+ }
+ }
+
+}
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+
+public class FilesystemAssetTest {
+
+ Path assetPathFile;
+ Path assetPathDir;
+ FilesystemStorage filesystemStorage;
+
+ @Before
+ public void init() throws IOException {
+ assetPathDir = Files.createTempDirectory("assetDir");
+ assetPathFile = Files.createTempFile(assetPathDir,"assetFile", "dat");
+ filesystemStorage = new FilesystemStorage(assetPathDir, new DefaultFileLockManager());
+ }
+
+ @After
+ public void cleanup() {
+
+ try {
+ Files.deleteIfExists(assetPathFile);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ FileUtils.deleteQuietly(assetPathDir.toFile());
+ }
+
+
+ @Test
+ public void getPath() {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, assetPathFile.getFileName().toString(), assetPathFile);
+ Assert.assertEquals("/"+assetPathFile.getFileName().toString(), asset.getPath());
+ }
+
+ @Test
+ public void getName() {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/"+assetPathFile.getFileName().toString(), assetPathFile);
+ Assert.assertEquals(assetPathFile.getFileName().toString(), asset.getName());
+
+ }
+
+ @Test
+ public void getModificationTime() throws IOException {
+ Instant modTime = Files.getLastModifiedTime(assetPathFile).toInstant();
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test123", assetPathFile);
+ Assert.assertTrue(modTime.equals(asset.getModificationTime()));
+ }
+
+ @Test
+ public void isContainer() {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1323", assetPathFile);
+ Assert.assertFalse(asset.isContainer());
+ FilesystemAsset asset2 = new FilesystemAsset(filesystemStorage, "/test1234", assetPathDir);
+ Assert.assertTrue(asset2.isContainer());
+ }
+
+ @Test
+ public void list() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Assert.assertEquals(0, asset.list().size());
+
+ FilesystemAsset asset2 = new FilesystemAsset(filesystemStorage, "/test1235", assetPathDir);
+ Assert.assertEquals(1, asset2.list().size());
+ Path f1 = Files.createTempFile(assetPathDir, "testfile", "dat");
+ Path f2 = Files.createTempFile(assetPathDir, "testfile", "dat");
+ Path d1 = Files.createTempDirectory(assetPathDir, "testdir");
+ Assert.assertEquals(4, asset2.list().size());
+ Assert.assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(f1.getFileName().toString())));
+ Assert.assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(f2.getFileName().toString())));
+ Assert.assertTrue(asset2.list().stream().anyMatch(p -> p.getName().equals(d1.getFileName().toString())));
+ Files.deleteIfExists(f1);
+ Files.deleteIfExists(f2);
+ Files.deleteIfExists(d1);
+
+
+ }
+
+ @Test
+ public void getSize() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Assert.assertEquals(0, asset.getSize());
+
+ Files.write(assetPathFile, new String("abcdef").getBytes("ASCII"));
+ Assert.assertTrue(asset.getSize()>=6);
+
+
+ }
+
+ @Test
+ public void getData() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
+ try(InputStream is = asset.getReadStream()) {
+ Assert.assertEquals("abcdef", IOUtils.toString(is, "ASCII"));
+ }
+
+ }
+
+ @Test
+ public void getDataExceptionOnDir() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathDir);
+ Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
+ try {
+ InputStream is = asset.getReadStream();
+ Assert.assertFalse("Exception expected for data on dir", true);
+ } catch (IOException e) {
+ // fine
+ }
+
+ }
+
+ @Test
+ public void writeData() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
+ try(OutputStream os = asset.getWriteStream(true)) {
+ IOUtils.write("test12345", os, "ASCII");
+ }
+ Assert.assertEquals("test12345", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
+ }
+
+ @Test
+ public void writeDataAppend() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Files.write(assetPathFile, "abcdef".getBytes("ASCII"));
+ try(OutputStream os = asset.getWriteStream(false)) {
+ IOUtils.write("test12345", os, "ASCII");
+ }
+ Assert.assertEquals("abcdeftest12345", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
+ }
+
+ @Test
+ public void writeDataExceptionOnDir() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathDir);
+ try {
+
+ OutputStream os = asset.getWriteStream(true);
+ Assert.assertTrue("Writing to a directory should throw a IOException", false);
+ } catch (IOException e) {
+ // Fine
+ }
+ }
+
+ @Test
+ public void storeDataFile() throws IOException {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Path dataFile = Files.createTempFile("testdata", "dat");
+ try(OutputStream os = Files.newOutputStream(dataFile)) {
+ IOUtils.write("testkdkdkd", os, "ASCII");
+ }
+ asset.replaceDataFromFile(dataFile);
+ Assert.assertEquals("testkdkdkd", IOUtils.toString(assetPathFile.toUri().toURL(), "ASCII"));
+ }
+
+ @Test
+ public void exists() {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Assert.assertTrue(asset.exists());
+ FilesystemAsset asset2 = new FilesystemAsset(filesystemStorage, "/test1234", Paths.get("abcdefgkdkdk"));
+ Assert.assertFalse(asset2.exists());
+
+ }
+
+ @Test
+ public void getFilePath() {
+ FilesystemAsset asset = new FilesystemAsset(filesystemStorage, "/test1234", assetPathFile);
+ Assert.assertEquals(assetPathFile, asset.getFilePath());
+ }
+}
\ No newline at end of file
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+
+import static org.junit.Assert.*;
+
+public class FilesystemStorageTest {
+
+ private FilesystemStorage fsStorage;
+ private FilesystemAsset file1Asset;
+ private FilesystemAsset dir1Asset;
+ private Path baseDir;
+ private Path file1;
+ private Path dir1;
+
+ @Before
+ public void init() throws IOException {
+ baseDir = Files.createTempDirectory("FsStorageTest");
+ DefaultFileLockManager fl = new DefaultFileLockManager();
+ fsStorage = new FilesystemStorage(baseDir,fl);
+ Files.createDirectories(baseDir.resolve("dir1"));
+ Files.createDirectories(baseDir.resolve("dir2"));
+ file1 = Files.createFile(baseDir.resolve("dir1/testfile1.dat"));
+ dir1 = Files.createDirectories(baseDir.resolve("dir1/testdir"));
+ file1Asset = new FilesystemAsset(fsStorage, "/dir1/testfile1.dat", file1);
+ dir1Asset = new FilesystemAsset(fsStorage, "/dir1/testdir", dir1);
+ }
+
+ private class StringResult {
+ public String getData() {
+ return data;
+ }
+
+ public void setData(String data) {
+ this.data = data;
+ }
+
+ String data;
+ }
+
+
+ @After
+ public void cleanup() {
+ FileUtils.deleteQuietly(file1.toFile());
+ FileUtils.deleteQuietly(dir1.toFile());
+ FileUtils.deleteQuietly(baseDir.resolve("dir1").toFile());
+ FileUtils.deleteQuietly(baseDir.resolve("dir2").toFile());
+ FileUtils.deleteQuietly(baseDir.toFile());
+ }
+
+
+
+
+ @Test
+ public void consumeData() throws IOException {
+ try(OutputStream os = Files.newOutputStream(file1)) {
+ IOUtils.write("abcdefghijkl", os, "ASCII");
+ }
+ StringResult result = new StringResult();
+ fsStorage.consumeData(file1Asset, is -> consume(is, result), false );
+ Assert.assertEquals("abcdefghijkl" ,result.getData());
+ }
+
+ private void consume(InputStream is, StringResult result) {
+ try {
+ result.setData(IOUtils.toString(is, "ASCII"));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+
+ @Test
+ public void getAsset() {
+ StorageAsset asset = fsStorage.getAsset("/dir1/testfile1.dat");
+ Assert.assertEquals(file1, asset.getFilePath());
+ }
+
+ @Test
+ public void addAsset() {
+ StorageAsset newAsset = fsStorage.addAsset("dir2/test", false);
+ Assert.assertNotNull(newAsset);
+ Assert.assertFalse(newAsset.isContainer());
+ Assert.assertFalse(newAsset.exists());
+
+ StorageAsset newDirAsset = fsStorage.addAsset("/dir2/testdir2", true);
+ Assert.assertNotNull(newDirAsset);
+ Assert.assertTrue(newDirAsset.isContainer());
+ Assert.assertFalse(newDirAsset.exists());
+ }
+
+ @Test
+ public void removeAsset() throws IOException {
+ Assert.assertTrue(Files.exists(file1));
+ fsStorage.removeAsset(file1Asset);
+ Assert.assertFalse(Files.exists(file1));
+
+ Assert.assertTrue(Files.exists(dir1));
+ fsStorage.removeAsset(dir1Asset);
+ Assert.assertFalse(Files.exists(dir1));
+ }
+
+ @Test
+ public void moveAsset() throws IOException {
+ Path newFile=null;
+ Path newDir=null;
+ try {
+ Assert.assertTrue(Files.exists(file1));
+ try (OutputStream os = Files.newOutputStream(file1)) {
+ IOUtils.write("testakdkkdkdkdk", os, "ASCII");
+ }
+ long fileSize = Files.size(file1);
+ fsStorage.moveAsset(file1Asset, "/dir2/testfile2.dat");
+ Assert.assertFalse(Files.exists(file1));
+ newFile = baseDir.resolve("dir2/testfile2.dat");
+ Assert.assertTrue(Files.exists(newFile));
+ Assert.assertEquals(fileSize, Files.size(newFile));
+
+
+ Assert.assertTrue(Files.exists(dir1));
+ newDir = baseDir.resolve("dir2/testdir2");
+ fsStorage.moveAsset(dir1Asset, "dir2/testdir2");
+ Assert.assertFalse(Files.exists(dir1));
+ Assert.assertTrue(Files.exists(newDir));
+ } finally {
+ if (newFile!=null) Files.deleteIfExists(newFile);
+ if (newDir!=null) Files.deleteIfExists(newDir);
+ }
+ }
+
+ @Test
+ public void copyAsset() throws IOException {
+ Path newFile=null;
+ Path newDir=null;
+ try {
+ Assert.assertTrue(Files.exists(file1));
+ try (OutputStream os = Files.newOutputStream(file1)) {
+ IOUtils.write("testakdkkdkdkdk", os, "ASCII");
+ }
+ long fileSize = Files.size(file1);
+ fsStorage.copyAsset(file1Asset, "/dir2/testfile2.dat", StandardCopyOption.REPLACE_EXISTING);
+ Assert.assertTrue(Files.exists(file1));
+ Assert.assertEquals(fileSize, Files.size(file1));
+ newFile = baseDir.resolve("dir2/testfile2.dat");
+ Assert.assertTrue(Files.exists(newFile));
+ Assert.assertEquals(fileSize, Files.size(newFile));
+
+ try {
+ fsStorage.copyAsset(file1Asset, "/dir2/testfile2.dat");
+ Assert.assertTrue("IOException should be thrown (File exists)", false);
+ } catch (IOException ex) {
+ Assert.assertTrue("Exception must contain 'file exists'", ex.getMessage().contains("file exists"));
+ }
+
+ Assert.assertTrue(Files.exists(dir1));
+ newDir = baseDir.resolve("dir2/testdir2");
+ fsStorage.copyAsset(dir1Asset, "dir2/testdir2");
+ Assert.assertTrue(Files.exists(dir1));
+ Assert.assertTrue(Files.exists(newDir));
+ } finally {
+ if (newFile!=null) Files.deleteIfExists(newFile);
+ if (newDir!=null) FileUtils.deleteQuietly(newDir.toFile());
+ }
+ }
+}
\ No newline at end of file
<module>archiva-repository-scanner</module>
<module>archiva-repository-admin</module>
<module>archiva-security-common</module>
+ <module>archiva-storage-api</module>
+ <module>archiva-storage-fs</module>
</modules>
</project>
import org.apache.archiva.consumers.KnownRepositoryContentConsumer;
import org.apache.archiva.converter.RepositoryConversionException;
import org.apache.archiva.repository.BasicManagedRepository;
-import org.apache.archiva.repository.content.FilesystemStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.content.maven2.ManagedDefaultRepositoryContent;
import org.apache.archiva.repository.scanner.RepositoryScanner;
import org.apache.archiva.repository.scanner.RepositoryScannerException;
import org.apache.maven.artifact.repository.ArtifactRepository;
-import org.apache.maven.artifact.repository.ArtifactRepositoryFactory;
import org.apache.maven.artifact.repository.MavenArtifactRepository;
import org.apache.maven.artifact.repository.layout.ArtifactRepositoryLayout;
import org.springframework.stereotype.Service;
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.archiva.common.filelock.FileLockManager;
import org.apache.archiva.indexer.ArchivaIndexingContext;
import org.apache.archiva.repository.Repository;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.index.context.IndexingContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
import java.sql.Date;
import java.time.ZonedDateTime;
import java.util.Set;
*/
public class MavenIndexContext implements ArchivaIndexingContext {
+ private static final Logger log = LoggerFactory.getLogger(ArchivaIndexingContext.class);
+
private IndexingContext delegate;
private Repository repository;
+ private StorageAsset dir = null;
protected MavenIndexContext(Repository repository, IndexingContext delegate) {
this.delegate = delegate;
}
@Override
- public URI getPath() {
- return delegate.getIndexDirectoryFile().toURI();
+ public StorageAsset getPath() {
+ if (dir==null) {
+ StorageAsset repositoryDirAsset = repository.getAsset("");
+ Path repositoryDir = repositoryDirAsset.getFilePath().toAbsolutePath();
+ Path indexDir = delegate.getIndexDirectoryFile().toPath();
+ if (indexDir.startsWith(repositoryDir)) {
+ dir = repository.getAsset(repositoryDir.relativize(indexDir).toString());
+ } else {
+ try {
+ FilesystemStorage storage = new FilesystemStorage(indexDir, new DefaultFileLockManager());
+ dir = storage.getAsset("");
+ } catch (IOException e) {
+ log.error("Error occured while creating storage for index dir");
+ }
+ }
+ }
+ return dir;
}
@Override
* under the License.
*/
-import org.apache.archiva.admin.model.RepositoryAdminException;
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.configuration.ArchivaConfiguration;
import org.apache.archiva.indexer.IndexCreationFailedException;
import org.apache.archiva.indexer.IndexUpdateFailedException;
import org.apache.archiva.indexer.UnsupportedBaseContextException;
-import org.apache.archiva.indexer.merger.IndexMergerException;
-import org.apache.archiva.indexer.merger.TemporaryGroupIndex;
import org.apache.archiva.proxy.ProxyRegistry;
import org.apache.archiva.proxy.maven.WagonFactory;
import org.apache.archiva.proxy.maven.WagonFactoryException;
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryType;
import org.apache.archiva.repository.UnsupportedRepositoryTypeException;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.RepositoryStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
private ProxyRegistry proxyRegistry;
- private ConcurrentSkipListSet<Path> activeContexts = new ConcurrentSkipListSet<>( );
+ private ConcurrentSkipListSet<StorageAsset> activeContexts = new ConcurrentSkipListSet<>( );
private static final int WAIT_TIME = 100;
private static final int MAX_WAIT = 10;
return context.getBaseContext( IndexingContext.class );
}
- private Path getIndexPath( ArchivaIndexingContext ctx )
+ private StorageAsset getIndexPath( ArchivaIndexingContext ctx )
{
- return PathUtil.getPathFromUri( ctx.getPath( ) );
+ return ctx.getPath( );
}
@FunctionalInterface
{
throw new IndexUpdateFailedException( "Maven index is not supported by this context", e );
}
- final Path ctxPath = getIndexPath( context );
+ final StorageAsset ctxPath = getIndexPath( context );
int loop = MAX_WAIT;
boolean active = false;
while ( loop-- > 0 && !active )
@Override
public void addArtifactsToIndex( final ArchivaIndexingContext context, final Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.addArtifactsToIndex(artifacts, indexingContext);
} catch (IOException e) {
@Override
public void removeArtifactsFromIndex( ArchivaIndexingContext context, Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.deleteArtifactsFromIndex(artifacts, indexingContext);
} catch (IOException e) {
throw new IndexCreationFailedException( "Could not create index context for repository " + repository.getId( )
+ ( StringUtils.isNotEmpty( e.getMessage( ) ) ? ": " + e.getMessage( ) : "" ), e );
}
- MavenIndexContext context = new MavenIndexContext( repository, mvnCtx );
- return context;
+ return new MavenIndexContext( repository, mvnCtx );
}
@Override
log.warn("Index close failed");
}
try {
- FileUtils.deleteDirectory(Paths.get(context.getPath()));
+ StorageUtil.deleteRecursively(context.getPath());
} catch (IOException e) {
throw new IndexUpdateFailedException("Could not delete index files");
}
}
}
- private StorageAsset getIndexPath(URI indexDir, Path repoDir, String defaultDir) throws IOException
+ private StorageAsset getIndexPath(URI indexDirUri, RepositoryStorage storage, String defaultDir) throws IOException
{
- String indexPath = indexDir.getPath();
- Path indexDirectory = null;
- if ( ! StringUtils.isEmpty(indexDir.toString( ) ) )
+ Path indexDirectory;
+ Path repositoryPath = storage.getAsset("").getFilePath().toAbsolutePath();
+ StorageAsset indexDir;
+ if ( ! StringUtils.isEmpty(indexDirUri.toString( ) ) )
{
- indexDirectory = PathUtil.getPathFromUri( indexDir );
+ indexDirectory = PathUtil.getPathFromUri( indexDirUri );
// not absolute so create it in repository directory
- if ( indexDirectory.isAbsolute( ) )
+ if ( indexDirectory.isAbsolute( ) && !indexDirectory.startsWith(repositoryPath))
{
- indexPath = indexDirectory.getFileName().toString();
+ if (storage instanceof FilesystemStorage) {
+ FilesystemStorage fsStorage = (FilesystemStorage) storage;
+ FilesystemStorage indexStorage = new FilesystemStorage(indexDirectory.getParent(), fsStorage.getFileLockManager());
+ indexDir = indexStorage.getAsset(indexDirectory.getFileName().toString());
+ } else {
+ throw new IOException("The given storage is not file based.");
+ }
+ } else if (indexDirectory.isAbsolute()) {
+ indexDir = storage.getAsset(repositoryPath.relativize(indexDirectory).toString());
}
else
{
- indexDirectory = repoDir.resolve( indexDirectory );
+ indexDir = storage.getAsset(indexDirectory.toString());
}
}
else
{
- indexDirectory = repoDir.resolve( defaultDir );
- indexPath = defaultDir;
+ indexDir = storage.getAsset( defaultDir );
}
- if ( !Files.exists( indexDirectory ) )
+ if ( !indexDir.exists() )
{
- Files.createDirectories( indexDirectory );
+ indexDir.create();
}
- return new FilesystemAsset( indexPath, indexDirectory);
+ return indexDir;
}
private StorageAsset getIndexPath( Repository repo) throws IOException {
IndexCreationFeature icf = repo.getFeature(IndexCreationFeature.class).get();
- return getIndexPath( icf.getIndexPath(), repo.getAsset( "" ).getFilePath(), DEFAULT_INDEX_PATH);
+ return getIndexPath( icf.getIndexPath(), repo, DEFAULT_INDEX_PATH);
}
private StorageAsset getPackedIndexPath(Repository repo) throws IOException {
IndexCreationFeature icf = repo.getFeature(IndexCreationFeature.class).get();
- return getIndexPath(icf.getPackedIndexPath(), repo.getAsset( "" ).getFilePath(), DEFAULT_PACKED_INDEX_PATH);
+ return getIndexPath(icf.getPackedIndexPath(), repo, DEFAULT_PACKED_INDEX_PATH);
}
private IndexingContext createRemoteContext(RemoteRepository remoteRepository ) throws IOException
{
- Path appServerBase = archivaConfiguration.getAppServerBaseDir( );
-
String contextKey = "remote-" + remoteRepository.getId( );
Files.createDirectories( repoDir );
}
- StorageAsset indexDirectory = null;
+ StorageAsset indexDirectory;
// is there configured indexDirectory ?
if ( remoteRepository.supportsFeature( RemoteIndexFeature.class ) )
}
}
- StorageAsset indexDirectory = null;
+ StorageAsset indexDirectory;
if ( repository.supportsFeature( IndexCreationFeature.class ) )
{
}
@Override
- public void connect( String id, String url )
- throws IOException
- {
+ public void connect( String id, String url ) {
//no op
}
@Override
- public void disconnect( )
- throws IOException
- {
+ public void disconnect( ) {
// no op
}
@Override
public InputStream retrieve( String name )
- throws IOException, FileNotFoundException
- {
+ throws IOException {
try
{
log.info( "index update retrieve file, name:{}", name );
assertNotNull(ctx);
assertEquals(repository, ctx.getRepository());
assertEquals("test-repo", ctx.getId());
- assertEquals(indexPath.toAbsolutePath(), Paths.get(ctx.getPath()).toAbsolutePath());
+ assertEquals(indexPath.toAbsolutePath(), ctx.getPath().getFilePath().toAbsolutePath());
assertTrue(Files.exists(indexPath));
List<Path> li = Files.list(indexPath).collect(Collectors.toList());
assertTrue(li.size()>0);
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-model</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-xml-tools</artifactId>
import org.apache.archiva.model.ArchivaRepositoryMetadata;
import org.apache.archiva.model.Plugin;
import org.apache.archiva.model.SnapshotVersion;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.archiva.xml.XMLReader;
import org.apache.commons.lang.math.NumberUtils;
private static final Logger log = LoggerFactory.getLogger( MavenMetadataReader.class );
+ public static ArchivaRepositoryMetadata read(StorageAsset metadataFile) throws XMLException, IOException {
+ if (metadataFile.isFileBased()) {
+ return read(metadataFile.getFilePath());
+ } else {
+ throw new IOException("StorageAsset is not file based");
+ }
+ }
+
/**
* Read and return the {@link org.apache.archiva.model.ArchivaRepositoryMetadata} object from the provided xml file.
*
* @throws XMLException
*/
public static ArchivaRepositoryMetadata read( Path metadataFile )
- throws XMLException
- {
+ throws XMLException, IOException {
XMLReader xml = new XMLReader( "metadata", metadataFile );
// invoke this to remove namespaces, see MRM-1136
metadata.setArtifactId( xml.getElementText( "//metadata/artifactId" ) );
metadata.setVersion( xml.getElementText( "//metadata/version" ) );
Date modTime;
- try
- {
- modTime = new Date(Files.getLastModifiedTime( metadataFile ).toMillis( ));
- }
- catch ( IOException e )
- {
- modTime = new Date();
- log.error("Could not read modification time of {}", metadataFile);
- }
+ modTime = new Date(Files.getLastModifiedTime(metadataFile).toMillis());
metadata.setFileLastModified( modTime );
- try
- {
- metadata.setFileSize( Files.size( metadataFile ) );
- }
- catch ( IOException e )
- {
- metadata.setFileSize( 0 );
- log.error("Could not read file size of {}", metadataFile);
- }
+ metadata.setFileSize( Files.size(metadataFile) );
metadata.setLastUpdated( xml.getElementText( "//metadata/versioning/lastUpdated" ) );
metadata.setLatestVersion( xml.getElementText( "//metadata/versioning/latest" ) );
import org.apache.archiva.proxy.model.NetworkProxy;
import org.apache.archiva.proxy.model.ProxyConnector;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.wagon.ConnectionException;
import org.apache.maven.wagon.ResourceDoesNotExistException;
* @throws NotModifiedException
*/
protected void transferResources( ProxyConnector connector, RemoteRepositoryContent remoteRepository,
- Path tmpResource, Path[] checksumFiles, String url, String remotePath, StorageAsset resource,
+ StorageAsset tmpResource, StorageAsset[] checksumFiles, String url, String remotePath, StorageAsset resource,
Path workingDirectory, ManagedRepositoryContent repository )
throws ProxyException, NotModifiedException {
Wagon wagon = null;
// to
// save on connections since md5 is rarely used
for (int i=0; i<checksumFiles.length; i++) {
- String ext = "."+StringUtils.substringAfterLast( checksumFiles[i].getFileName( ).toString( ), "." );
+ String ext = "."+StringUtils.substringAfterLast(checksumFiles[i].getName( ), "." );
transferChecksum(wagon, remoteRepository, remotePath, repository, resource.getFilePath(), ext,
- checksumFiles[i]);
+ checksumFiles[i].getFilePath());
}
}
} catch (NotFoundException e) {
protected void transferArtifact(Wagon wagon, RemoteRepositoryContent remoteRepository, String remotePath,
ManagedRepositoryContent repository, Path resource, Path tmpDirectory,
- Path destFile)
+ StorageAsset destFile)
throws ProxyException {
- transferSimpleFile(wagon, remoteRepository, remotePath, repository, resource, destFile);
+ transferSimpleFile(wagon, remoteRepository, remotePath, repository, resource, destFile.getFilePath());
}
/**
import org.apache.archiva.policies.ReleasesPolicy;
import org.apache.archiva.policies.SnapshotsPolicy;
import org.apache.archiva.policies.urlcache.UrlFailureCache;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.wagon.ResourceDoesNotExistException;
import org.easymock.EasyMock;
import org.junit.Test;
wagonMockControl.replay();
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
wagonMockControl.verify();
downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
wagonMockControl.verify();
- assertNotDownloaded( downloadedFile);
+ assertNotDownloaded( downloadedFile.getFilePath());
assertNoTempFiles( expectedFile );
}
wagonMockControl.replay();
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
wagonMockControl.verify();
wagonMockControl.verify();
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNoTempFiles( expectedFile );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied2", ChecksumPolicy.FIX, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.YES, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
// Validate that file actually came from proxied2 (as intended).
Path proxied2File = Paths.get( REPOPATH_PROXIED2, path );
- assertFileEquals( expectedFile, downloadedFile, proxied2File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied2File );
assertNoTempFiles( expectedFile );
}
import org.apache.archiva.policies.ChecksumPolicy;
import org.apache.archiva.policies.ReleasesPolicy;
import org.apache.archiva.policies.SnapshotsPolicy;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.wagon.ResourceDoesNotExistException;
import org.easymock.EasyMock;
import org.junit.Test;
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, true );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
assertNull( downloadedFile );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "066d76e459f7782c312c31e8a11b3c0f1e3e43a7 *get-checksum-both-right-1.0.jar",
"e58f30c6a150a2e843552438d18e15cb *get-checksum-both-right-1.0.jar" );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "748a3a013bf5eacf2bbb40a2ac7d37889b728837 *get-checksum-sha1-only-1.0.jar",
null );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, null, "f3af5201bf8da801da37db8842846e1c *get-checksum-md5-only-1.0.jar" );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, null, null );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "invalid checksum file", "invalid checksum file" );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FAIL, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertChecksums( expectedFile, null, null );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FIX, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "4ec20a12dc91557330bd0b39d1805be5e329ae56 get-checksum-both-bad-1.0.jar",
"a292491a35925465e693a44809a078b5 get-checksum-both-bad-1.0.jar" );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FAIL, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertChecksums( expectedFile, null, null );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FAIL, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
// This is a success situation. No SHA1 with a Good MD5.
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, null, "f3af5201bf8da801da37db8842846e1c *get-checksum-md5-only-1.0.jar" );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FAIL, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertChecksums( expectedFile, null, null );
}
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "3dd1a3a57b807d3ef3fbc6013d926c891cbb8670 *get-checksum-sha1-bad-md5-1.0.jar",
"invalid checksum file" );
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "3dd1a3a57b807d3ef3fbc6013d926c891cbb8670 *get-checksum-sha1-bad-md5-1.0.jar",
"c35f3b76268b73a4ba617f6f275c49ab get-checksum-sha1-bad-md5-1.0.jar" );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FIX, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "71f7dc3f72053a3f2d9fdd6fef9db055ef957ffb get-checksum-md5-only-1.0.jar",
"f3af5201bf8da801da37db8842846e1c *get-checksum-md5-only-1.0.jar" );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FIX, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "1f12821c5e43e1a0b76b9564a6ddb0548ccb9486 get-default-layout-1.0.jar",
"3f7341545f21226b6f49a3c2704cb9be get-default-layout-1.0.jar" );
wagonMockControl.replay();
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
wagonMockControl.verify();
// Test results.
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "748a3a013bf5eacf2bbb40a2ac7d37889b728837 *get-checksum-sha1-only-1.0.jar",
null );
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get( REPOPATH_PROXIED1, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
// There are no hashcodes on the proxy side to download, hence the local ones should remain invalid.
assertChecksums( expectedFile, "invalid checksum file", "invalid checksum file" );
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, ChecksumPolicy.FAIL, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNoTempFiles( expectedFile );
// There are no hashcodes on the proxy side to download.
// The FAIL policy will delete the checksums as bad.
saveConnector( ID_DEFAULT_MANAGED, "proxied1", ChecksumPolicy.FIX, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
assertChecksums( expectedFile, "96a08dc80a108cba8efd3b20aec91b32a0b2cbd4 get-bad-local-checksum-1.0.jar",
"46fdd6ca55bf1d7a7eb0c858f41e0ccd get-bad-local-checksum-1.0.jar" );
import org.apache.archiva.policies.ReleasesPolicy;
import org.apache.archiva.policies.SnapshotsPolicy;
import org.apache.archiva.repository.LayoutException;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.wagon.ResourceDoesNotExistException;
import org.apache.maven.wagon.TransferFailedException;
import org.apache.maven.wagon.authorization.AuthorizationException;
wagonMockControl.replay();
// Attempt the proxy fetch.
- Path downloadedFile = null;
+ StorageAsset downloadedFile = null;
try
{
downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository,
wagonMockControl.verify();
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
}
private void confirmSuccess( String path, Path expectedFile, String basedir )
throws Exception
{
- Path downloadedFile = performDownload( path );
+ StorageAsset downloadedFile = performDownload( path );
Path proxied1File = Paths.get( basedir, path );
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
}
private void confirmNotDownloadedNoError( String path )
throws Exception
{
- Path downloadedFile = performDownload( path );
+ StorageAsset downloadedFile = performDownload( path );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
}
- private Path performDownload( String path )
+ private StorageAsset performDownload( String path )
throws ProxyDownloadException, LayoutException
{
wagonMockControl.replay();
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository,
managedDefaultRepository.toArtifactReference( path ) );
wagonMockControl.verify();
import org.apache.archiva.policies.SnapshotsPolicy;
import org.apache.archiva.proxy.model.RepositoryProxyHandler;
import org.apache.archiva.repository.*;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
import org.apache.commons.io.FileUtils;
import org.assertj.core.api.Assertions;
ArtifactReference artifact = managedDefaultRepository.toArtifactReference( path );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path sourceFile = Paths.get( PROXIED_BASEDIR, path );
assertNotNull( "Expected File should not be null.", expectedFile );
assertNotNull( "Actual File should not be null.", downloadedFile );
- assertTrue( "Check actual file exists.", Files.exists(downloadedFile));
- assertTrue( "Check filename path is appropriate.", Files.isSameFile( expectedFile, downloadedFile));
- assertTrue( "Check file path matches.", Files.isSameFile( expectedFile, downloadedFile));
+ assertTrue( "Check actual file exists.", Files.exists(downloadedFile.getFilePath()));
+ assertTrue( "Check filename path is appropriate.", Files.isSameFile( expectedFile, downloadedFile.getFilePath()));
+ assertTrue( "Check file path matches.", Files.isSameFile( expectedFile, downloadedFile.getFilePath()));
String expectedContents = FileUtils.readFileToString( sourceFile.toFile(), Charset.defaultCharset() );
- String actualContents = FileUtils.readFileToString( downloadedFile.toFile(), Charset.defaultCharset() );
+ String actualContents = FileUtils.readFileToString( downloadedFile.getFilePath().toFile(), Charset.defaultCharset() );
assertEquals( "Check file contents.", expectedContents, actualContents );
Assertions.assertThat( System.getProperty( "http.proxyHost" , "") ).isEmpty();
import org.apache.archiva.policies.ChecksumPolicy;
import org.apache.archiva.policies.ReleasesPolicy;
import org.apache.archiva.policies.SnapshotsPolicy;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.wagon.ResourceDoesNotExistException;
CachedFailuresPolicy.NO, true );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
assertNull( "File should not have been downloaded", downloadedFile );
}
CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path sourceFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, sourceFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), sourceFile );
assertNoTempFiles( expectedFile );
}
CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, path );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, path );
Path sourceFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, sourceFile );
- assertFalse( Files.exists( downloadedFile.getParent().resolve(downloadedFile.getFileName() + ".sha1" )) );
- assertFalse( Files.exists(downloadedFile.getParent().resolve(downloadedFile.getFileName() + ".md5" ) ));
- assertFalse( Files.exists( downloadedFile.getParent().resolve(downloadedFile.getFileName() + ".asc" ) ));
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), sourceFile );
+ assertFalse( Files.exists( downloadedFile.getParent().getFilePath().resolve(downloadedFile.getName() + ".sha1" )) );
+ assertFalse( Files.exists(downloadedFile.getParent().getFilePath().resolve(downloadedFile.getName() + ".md5" ) ));
+ assertFalse( Files.exists( downloadedFile.getParent().getFilePath().resolve(downloadedFile.getName() + ".asc" ) ));
assertNoTempFiles( expectedFile );
}
CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertFileEquals( expectedFile, downloadedFile, expectedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), expectedFile );
assertNoTempFiles( expectedFile );
}
CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, path );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, path );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNotModified( expectedFile, originalModificationTime );
assertNoTempFiles( expectedFile );
}
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNotModified( expectedFile, originalModificationTime );
assertNoTempFiles( expectedFile );
}
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
CachedFailuresPolicy.NO, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED2, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied1File = Paths.get(REPOPATH_PROXIED1, path);
Path proxied2File = Paths.get(REPOPATH_PROXIED2, path);
- assertFileEquals( expectedFile, downloadedFile, proxied1File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied1File );
assertNoTempFiles( expectedFile );
// TODO: is this check even needed if it passes above?
- String actualContents = FileUtils.readFileToString( downloadedFile.toFile(), Charset.defaultCharset() );
+ String actualContents = FileUtils.readFileToString( downloadedFile.getFilePath().toFile(), Charset.defaultCharset() );
String badContents = FileUtils.readFileToString( proxied2File.toFile(), Charset.defaultCharset() );
assertFalse( "Downloaded file contents should not be that of proxy 2",
StringUtils.equals( actualContents, badContents ) );
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED2, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxied2File = Paths.get(REPOPATH_PROXIED2, path);
- assertFileEquals( expectedFile, downloadedFile, proxied2File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied2File );
assertNoTempFiles( expectedFile );
}
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED2, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
assertNull( "File returned was: " + downloadedFile + "; should have got a not found exception",
downloadedFile );
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED2, false );
// Attempt the proxy fetch.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
wagonMockControl.verify();
Path proxied2File = Paths.get(REPOPATH_PROXIED2, path);
- assertFileEquals( expectedFile, downloadedFile, proxied2File );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxied2File );
assertNoTempFiles( expectedFile );
}
wagonMockControl.replay();
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
wagonMockControl.verify();
assertNoTempFiles( expectedFile );
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.VersionUtil;
import org.apache.archiva.configuration.ProxyConnectorConfiguration;
import org.apache.archiva.maven2.metadata.MavenMetadataReader;
import org.apache.archiva.repository.metadata.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import org.apache.archiva.repository.metadata.RepositoryMetadataWriter;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.wagon.TransferFailedException;
import org.custommonkey.xmlunit.DetailedDiff;
ProjectReference metadata = createProjectReference( requestedResource );
- Path downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
managedDefaultRepository.toMetadataPath(
metadata ) ).getFile();
ProjectReference metadata = createProjectReference( requestedResource );
- Path downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
managedDefaultRepository.toMetadataPath(
metadata ) ).getFile();
Path expectedFile = managedDefaultDir.resolve(requestedResource);
ProjectReference metadata = createProjectReference( requestedResource );
- Path downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
managedDefaultRepository.toMetadataPath(
metadata ) ).getFile();
VersionedReference metadata = createVersionedReference( requestedResource );
- Path downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
managedDefaultRepository.toMetadataPath(
metadata ) ).getFile();
Path expectedFile = managedDefaultDir.resolve(requestedResource);
VersionedReference metadata = createVersionedReference( requestedResource );
- Path downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
+ StorageAsset downloadedFile = proxyHandler.fetchMetadataFromProxies( managedDefaultRepository,
managedDefaultRepository.toMetadataPath(
metadata ) ).getFile();
assertTrue( "Actual file exists.", Files.exists(actualFile) );
StringWriter actualContents = new StringWriter();
- ArchivaRepositoryMetadata metadata = MavenMetadataReader.read( actualFile );
+ FilesystemStorage fsStorage = new FilesystemStorage(actualFile.getParent(), new DefaultFileLockManager());
+ StorageAsset actualFileAsset = fsStorage.getAsset(actualFile.getFileName().toString());
+ ArchivaRepositoryMetadata metadata = MavenMetadataReader.read( actualFileAsset );
RepositoryMetadataWriter.write( metadata, actualContents );
DetailedDiff detailedDiff = new DetailedDiff( new Diff( expectedMetadataXml, actualContents.toString() ) );
import org.apache.archiva.policies.ChecksumPolicy;
import org.apache.archiva.policies.ReleasesPolicy;
import org.apache.archiva.policies.SnapshotsPolicy;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.junit.Test;
import java.nio.file.Files;
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false );
// Attempt to download.
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
// Should not have downloaded as managed is newer than remote.
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
- assertNotDownloaded( downloadedFile );
+ assertNotDownloaded( downloadedFile.getFilePath() );
assertNotModified( expectedFile, expectedTimestamp );
assertNoTempFiles( expectedFile );
}
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED2, ChecksumPolicy.IGNORE, ReleasesPolicy.ALWAYS,
SnapshotsPolicy.ALWAYS, CachedFailuresPolicy.YES , false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
// Configure Connector (usually done within archiva.xml configuration)
saveConnector( ID_DEFAULT_MANAGED, ID_PROXIED1, false);
- Path downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
+ StorageAsset downloadedFile = proxyHandler.fetchFromProxies( managedDefaultRepository, artifact );
Path proxiedFile = Paths.get(REPOPATH_PROXIED1, path);
- assertFileEquals( expectedFile, downloadedFile, proxiedFile );
+ assertFileEquals( expectedFile, downloadedFile.getFilePath(), proxiedFile );
assertNoTempFiles( expectedFile );
}
}
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.VersionUtil;
import org.apache.archiva.metadata.model.ArtifactMetadata;
import org.apache.archiva.metadata.model.maven2.MavenArtifactFacet;
import org.apache.archiva.model.ProjectReference;
import org.apache.archiva.model.VersionedReference;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import org.springframework.stereotype.Service;
+import java.io.IOException;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
private ManagedRepository repository;
+ private FilesystemStorage fsStorage;
ManagedRepositoryContentMock(ManagedRepository repo) {
this.repository = repo;
@Override
public String getRepoRoot( )
{
- return Paths.get("", "target", "test-repository", "managed").toString();
+ return getRepoRootAsset().getFilePath().toString();
+ }
+
+ private StorageAsset getRepoRootAsset() {
+ if (fsStorage==null) {
+ try {
+ fsStorage = new FilesystemStorage(Paths.get("", "target", "test-repository", "managed"), new DefaultFileLockManager());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ return fsStorage.getAsset("");
}
@Override
@Override
public StorageAsset toFile( ArtifactReference reference )
{
- return Paths.get(getRepoRoot(), refs.get(reference));
+ return getRepoRootAsset().resolve( refs.get(reference));
}
@Override
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.maven2.MavenSystemManager;
import org.apache.archiva.repository.metadata.MetadataTools;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.artifact.Artifact;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import javax.inject.Named;
-import java.nio.file.Files;
-import java.nio.file.Path;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
for ( String repoId : repositoryIds )
{
ManagedRepository managedRepo = repositoryRegistry.getManagedRepository(repoId);
- Path repoDir = managedRepo.getAsset("").getFilePath();
+ StorageAsset repoDir = managedRepo.getAsset("");
- Path file = pathTranslator.toFile( repoDir, projectArtifact.getGroupId(), projectArtifact.getArtifactId(),
+ StorageAsset file = pathTranslator.toFile( repoDir, projectArtifact.getGroupId(), projectArtifact.getArtifactId(),
projectArtifact.getBaseVersion(),
projectArtifact.getArtifactId() + "-" + projectArtifact.getVersion()
+ ".pom" );
- if ( Files.exists(file) )
+ if ( file.exists() )
{
return managedRepo;
}
// try with snapshot version
if ( StringUtils.endsWith( projectArtifact.getBaseVersion(), VersionUtil.SNAPSHOT ) )
{
- Path metadataFile = file.getParent().resolve( MetadataTools.MAVEN_METADATA );
- if ( Files.exists(metadataFile) )
+ StorageAsset metadataFile = file.getParent().resolve( MetadataTools.MAVEN_METADATA );
+ if ( metadataFile.exists() )
{
try
{
"-" + VersionUtil.SNAPSHOT ) ).append( '-' ).append(
timeStamp ).append( '-' ).append( Integer.toString( buildNumber ) ).append(
".pom" ).toString();
- Path timeStampFile = file.getParent().resolve( timeStampFileName );
+ StorageAsset timeStampFile = file.getParent().resolve( timeStampFileName );
log.debug( "try to find timestamped snapshot version file: {}", timeStampFile);
- if ( Files.exists(timeStampFile) )
+ if ( timeStampFile.exists() )
{
return managedRepo;
}
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
log.warn( "skip fail to find timestamped snapshot pom: {}", e.getMessage() );
}
import org.apache.archiva.metadata.model.ArtifactMetadata;
import org.apache.archiva.metadata.model.maven2.MavenArtifactFacet;
import org.apache.archiva.metadata.repository.storage.RepositoryPathTranslator;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
}
@Override
- public Path toFile(Path basedir, String namespace, String projectId, String projectVersion, String filename )
+ public StorageAsset toFile(StorageAsset basedir, String namespace, String projectId, String projectVersion, String filename )
{
return basedir.resolve( toPath( namespace, projectId, projectVersion, filename ) );
}
@Override
- public Path toFile( Path basedir, String namespace, String projectId, String projectVersion )
+ public StorageAsset toFile( StorageAsset basedir, String namespace, String projectId, String projectVersion )
{
return basedir.resolve( toPath( namespace, projectId, projectVersion ) );
}
}
@Override
- public Path toFile( Path basedir, String namespace, String projectId )
+ public StorageAsset toFile( StorageAsset basedir, String namespace, String projectId )
{
return basedir.resolve( toPath( namespace, projectId ) );
}
@Override
- public Path toFile( Path basedir, String namespace )
+ public StorageAsset toFile( StorageAsset basedir, String namespace )
{
return basedir.resolve( toPath( namespace ) );
}
import org.apache.archiva.repository.*;
import org.apache.archiva.repository.content.PathParser;
import org.apache.archiva.repository.maven2.MavenSystemManager;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.Reader;
+import java.nio.channels.Channels;
import java.nio.charset.Charset;
-import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
-import java.util.stream.Stream;
// import java.io.FileNotFoundException;
}
}
}
- Path basedir = Paths.get(managedRepository.getLocation());
+ StorageAsset basedir = managedRepository.getAsset("");
if (VersionUtil.isSnapshot(artifactVersion)) {
- Path metadataFile = pathTranslator.toFile(basedir, readMetadataRequest.getNamespace(),
+ StorageAsset metadataFile = pathTranslator.toFile(basedir, readMetadataRequest.getNamespace(),
readMetadataRequest.getProjectId(), artifactVersion,
METADATA_FILENAME);
try {
artifactVersion =
artifactVersion + snapshotVersion.getTimestamp() + "-" + snapshotVersion.getBuildNumber();
}
- } catch (XMLException e) {
+ } catch (XMLException | IOException e) {
// unable to parse metadata - LOGGER it, and continue with the version as the original SNAPSHOT version
LOGGER.warn("Invalid metadata: {} - {}", metadataFile, e.getMessage());
}
// TODO: won't work well with some other layouts, might need to convert artifact parts to ID by path translator
String id = readMetadataRequest.getProjectId() + "-" + artifactVersion + ".pom";
- Path file =
+ StorageAsset file =
pathTranslator.toFile(basedir, readMetadataRequest.getNamespace(), readMetadataRequest.getProjectId(),
readMetadataRequest.getProjectVersion(), id);
- if (!Files.exists(file)) {
+ if (!file.exists()) {
// metadata could not be resolved
throw new RepositoryStorageMetadataNotFoundException(
- "The artifact's POM file '" + file.toAbsolutePath() + "' was missing");
+ "The artifact's POM file '" + file.getPath() + "' was missing");
}
// TODO: this is a workaround until we can properly resolve using proxies as well - this doesn't cache
}
ModelBuildingRequest req =
- new DefaultModelBuildingRequest().setProcessPlugins(false).setPomFile(file.toFile()).setTwoPhaseBuilding(
+ new DefaultModelBuildingRequest().setProcessPlugins(false).setPomFile(file.getFilePath().toFile()).setTwoPhaseBuilding(
false).setValidationLevel(ModelBuildingRequest.VALIDATION_LEVEL_MINIMAL);
//MRM-1607. olamy this will resolve jdk profiles on the current running archiva jvm
@Override
public Collection<String> listRootNamespaces(String repoId, Filter<String> filter)
throws RepositoryStorageRuntimeException {
- Path dir = getRepositoryBasedir(repoId);
+ StorageAsset dir = getRepositoryBasedir(repoId);
return getSortedFiles(dir, filter);
}
- private static Collection<String> getSortedFiles(Path dir, Filter<String> filter) {
+ private static Collection<String> getSortedFiles(StorageAsset dir, Filter<String> filter) {
- try (Stream<Path> stream = Files.list(dir)) {
- final Predicate<Path> dFilter = new DirectoryFilter(filter);
- return stream.filter(Files::isDirectory)
+ final Predicate<StorageAsset> dFilter = new DirectoryFilter(filter);
+ return dir.list().stream().filter(f -> f.isContainer())
.filter(dFilter)
- .map(path -> path.getFileName().toString())
+ .map(path -> path.getName().toString())
.sorted().collect(Collectors.toList());
- } catch (IOException e) {
- LOGGER.error("Could not read directory list {}: {}", dir, e.getMessage(), e);
- return Collections.emptyList();
- }
}
- private Path getRepositoryBasedir(String repoId)
+ private StorageAsset getRepositoryBasedir(String repoId)
throws RepositoryStorageRuntimeException {
ManagedRepository repositoryConfiguration = repositoryRegistry.getManagedRepository(repoId);
- return Paths.get(repositoryConfiguration.getLocation());
+ return repositoryConfiguration.getAsset("");
}
@Override
public Collection<String> listNamespaces(String repoId, String namespace, Filter<String> filter)
throws RepositoryStorageRuntimeException {
- Path dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace);
- if (!(Files.exists(dir) && Files.isDirectory(dir))) {
+ StorageAsset dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace);
+ if (!(dir.exists()) && !dir.isContainer()) {
return Collections.emptyList();
}
// scan all the directories which are potential namespaces. Any directories known to be projects are excluded
- Predicate<Path> dFilter = new DirectoryFilter(filter);
- try (Stream<Path> stream = Files.list(dir)) {
- return stream.filter(dFilter).filter(path -> !isProject(path, filter)).map(path -> path.getFileName().toString())
+ Predicate<StorageAsset> dFilter = new DirectoryFilter(filter);
+ return dir.list().stream().filter(dFilter).filter(path -> !isProject(path, filter)).map(path -> path.getName().toString())
.sorted().collect(Collectors.toList());
- } catch (IOException e) {
- LOGGER.error("Could not read directory {}: {}", dir, e.getMessage(), e);
- return Collections.emptyList();
- }
}
@Override
public Collection<String> listProjects(String repoId, String namespace, Filter<String> filter)
throws RepositoryStorageRuntimeException {
- Path dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace);
- if (!(Files.exists(dir) && Files.isDirectory(dir))) {
+ StorageAsset dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace);
+ if (!(dir.exists() && dir.isContainer())) {
return Collections.emptyList();
}
// scan all directories in the namespace, and only include those that are known to be projects
- final Predicate<Path> dFilter = new DirectoryFilter(filter);
- try (Stream<Path> stream = Files.list(dir)) {
- return stream.filter(dFilter).filter(path -> isProject(path, filter)).map(path -> path.getFileName().toString())
+ final Predicate<StorageAsset> dFilter = new DirectoryFilter(filter);
+ return dir.list().stream().filter(dFilter).filter(path -> isProject(path, filter)).map(path -> path.getName().toString())
.sorted().collect(Collectors.toList());
- } catch (IOException e) {
- LOGGER.error("Could not read directory {}: {}", dir, e.getMessage(), e);
- return Collections.emptyList();
- }
}
public Collection<String> listProjectVersions(String repoId, String namespace, String projectId,
Filter<String> filter)
throws RepositoryStorageRuntimeException {
- Path dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace, projectId);
- if (!(Files.exists(dir) && Files.isDirectory(dir))) {
+ StorageAsset dir = pathTranslator.toFile(getRepositoryBasedir(repoId), namespace, projectId);
+ if (!(dir.exists() && dir.isContainer())) {
return Collections.emptyList();
}
@Override
public Collection<ArtifactMetadata> readArtifactsMetadata(ReadMetadataRequest readMetadataRequest)
throws RepositoryStorageRuntimeException {
- Path dir = pathTranslator.toFile(getRepositoryBasedir(readMetadataRequest.getRepositoryId()),
+ StorageAsset dir = pathTranslator.toFile(getRepositoryBasedir(readMetadataRequest.getRepositoryId()),
readMetadataRequest.getNamespace(), readMetadataRequest.getProjectId(),
readMetadataRequest.getProjectVersion());
- if (!(Files.exists(dir) && Files.isDirectory(dir))) {
+ if (!(dir.exists() && dir.isContainer())) {
return Collections.emptyList();
}
// all files that are not metadata and not a checksum / signature are considered artifacts
- final Predicate<Path> dFilter = new ArtifactDirectoryFilter(readMetadataRequest.getFilter());
- try (Stream<Path> stream = Files.list(dir)) {
+ final Predicate<StorageAsset> dFilter = new ArtifactDirectoryFilter(readMetadataRequest.getFilter());
// Returns a map TRUE -> (success values), FALSE -> (Exceptions)
- Map<Boolean, List<Try<ArtifactMetadata>>> result = stream.filter(dFilter).map(path -> {
+ Map<Boolean, List<Try<ArtifactMetadata>>> result = dir.list().stream().filter(dFilter).map(path -> {
try {
return Try.success(getArtifactFromFile(readMetadataRequest.getRepositoryId(), readMetadataRequest.getNamespace(),
readMetadataRequest.getProjectId(), readMetadataRequest.getProjectVersion(),
}
return result.get(Boolean.TRUE).stream().map(tr -> tr.get()).collect(Collectors.toList());
}
- } catch (IOException e) {
- LOGGER.error("Could not read directory {}: {}", dir, e.getMessage(), e);
- }
- return Collections.emptyList();
}
}
private ArtifactMetadata getArtifactFromFile(String repoId, String namespace, String projectId,
- String projectVersion, Path file) throws IOException {
+ String projectVersion, StorageAsset file) throws IOException {
ArtifactMetadata metadata =
- pathTranslator.getArtifactFromId(repoId, namespace, projectId, projectVersion, file.getFileName().toString());
+ pathTranslator.getArtifactFromId(repoId, namespace, projectId, projectVersion, file.getName());
populateArtifactMetadataFromFile(metadata, file);
proxyHandler.fetchFromProxies(managedRepository, pomReference);
// Open and read the POM from the managed repo
- Path pom = managedRepository.toFile(pomReference);
+ StorageAsset pom = managedRepository.toFile(pomReference);
- if (!Files.exists(pom)) {
+ if (!pom.exists()) {
return;
}
try {
// MavenXpp3Reader leaves the file open, so we need to close it ourselves.
- Model model = null;
- try (Reader reader = Files.newBufferedReader(pom, Charset.defaultCharset())) {
+ Model model;
+ try (Reader reader = Channels.newReader(pom.getReadChannel(), Charset.defaultCharset().name())) {
model = MAVEN_XPP_3_READER.read(reader);
}
@Override
public String getFilePathWithVersion(final String requestPath, ManagedRepositoryContent managedRepositoryContent)
- throws XMLException, RelocationException {
+ throws RelocationException, XMLException, IOException {
if (StringUtils.endsWith(requestPath, METADATA_FILENAME)) {
return getFilePath(requestPath, managedRepositoryContent.getRepository());
if (StringUtils.endsWith(artifactReference.getVersion(), VersionUtil.SNAPSHOT)) {
// read maven metadata to get last timestamp
- Path metadataDir = Paths.get(managedRepositoryContent.getRepoRoot(), filePath).getParent();
- if (!Files.exists(metadataDir)) {
+ StorageAsset metadataDir = managedRepositoryContent.getRepository().getAsset( filePath).getParent();
+ if (!metadataDir.exists()) {
return filePath;
}
- Path metadataFile = metadataDir.resolve(METADATA_FILENAME);
- if (!Files.exists(metadataFile)) {
+ StorageAsset metadataFile = metadataDir.resolve(METADATA_FILENAME);
+ if (!metadataFile.exists()) {
return filePath;
}
ArchivaRepositoryMetadata archivaRepositoryMetadata = MavenMetadataReader.read(metadataFile);
return joinedString;
}
- private static void populateArtifactMetadataFromFile(ArtifactMetadata metadata, Path file) throws IOException {
+ private static void populateArtifactMetadataFromFile(ArtifactMetadata metadata, StorageAsset file) throws IOException {
metadata.setWhenGathered(new Date());
- metadata.setFileLastModified(Files.getLastModifiedTime(file).toMillis());
- ChecksummedFile checksummedFile = new ChecksummedFile(file);
+ metadata.setFileLastModified(file.getModificationTime().toEpochMilli());
+ ChecksummedFile checksummedFile = new ChecksummedFile(file.getFilePath());
try {
metadata.setMd5(checksummedFile.calculateChecksum(ChecksumAlgorithm.MD5));
} catch (IOException e) {
} catch (IOException e) {
LOGGER.error("Unable to checksum file {}: {},SHA1", file, e.getMessage());
}
- metadata.setSize(Files.size(file));
+ metadata.setSize(file.getSize());
}
- private boolean isProject(Path dir, Filter<String> filter) {
+ private boolean isProject(StorageAsset dir, Filter<String> filter) {
// scan directories for a valid project version subdirectory, meaning this must be a project directory
- final Predicate<Path> dFilter = new DirectoryFilter(filter);
- try (Stream<Path> stream = Files.list(dir)) {
- boolean projFound = stream.filter(dFilter)
+ final Predicate<StorageAsset> dFilter = new DirectoryFilter(filter);
+ boolean projFound = dir.list().stream().filter(dFilter)
.anyMatch(path -> isProjectVersion(path));
if (projFound) {
return true;
}
- } catch (IOException e) {
- LOGGER.error("Could not read directory list {}: {}", dir, e.getMessage(), e);
- }
// if a metadata file is present, check if this is the "artifactId" directory, marking it as a project
ArchivaRepositoryMetadata metadata = readMetadata(dir);
- if (metadata != null && dir.getFileName().toString().equals(metadata.getArtifactId())) {
+ if (metadata != null && dir.getName().toString().equals(metadata.getArtifactId())) {
return true;
}
return false;
}
- private boolean isProjectVersion(Path dir) {
- final String artifactId = dir.getParent().getFileName().toString();
- final String projectVersion = dir.getFileName().toString();
+ private boolean isProjectVersion(StorageAsset dir) {
+ final String artifactId = dir.getParent().getName();
+ final String projectVersion = dir.getName();
// check if there is a POM artifact file to ensure it is a version directory
- Predicate<Path> filter;
+ Predicate<StorageAsset> filter;
if (VersionUtil.isSnapshot(projectVersion)) {
filter = new PomFilenameFilter(artifactId, projectVersion);
} else {
final String pomFile = artifactId + "-" + projectVersion + ".pom";
filter = new PomFileFilter(pomFile);
}
- try (Stream<Path> stream = Files.list(dir)) {
- if (stream.filter(Files::isRegularFile).anyMatch(filter)) {
+ if (dir.list().stream().filter(f -> !f.isContainer()).anyMatch(filter)) {
return true;
}
- } catch (IOException e) {
- LOGGER.error("Could not list directory {}: {}", dir, e.getMessage(), e);
- }
-
// if a metadata file is present, check if this is the "version" directory, marking it as a project version
ArchivaRepositoryMetadata metadata = readMetadata(dir);
if (metadata != null && projectVersion.equals(metadata.getVersion())) {
return false;
}
- private ArchivaRepositoryMetadata readMetadata(Path directory) {
+ private ArchivaRepositoryMetadata readMetadata(StorageAsset directory) {
ArchivaRepositoryMetadata metadata = null;
- Path metadataFile = directory.resolve(METADATA_FILENAME);
- if (Files.exists(metadataFile)) {
+ StorageAsset metadataFile = directory.resolve(METADATA_FILENAME);
+ if (metadataFile.exists()) {
try {
metadata = MavenMetadataReader.read(metadataFile);
- } catch (XMLException e) {
+ } catch (XMLException | IOException e) {
// ignore missing or invalid metadata
}
}
}
private static class DirectoryFilter
- implements Predicate<Path> {
+ implements Predicate<StorageAsset> {
private final Filter<String> filter;
public DirectoryFilter(Filter<String> filter) {
}
@Override
- public boolean test(Path dir) {
- final String name = dir.getFileName().toString();
+ public boolean test(StorageAsset dir) {
+ final String name = dir.getName();
if (!filter.accept(name)) {
return false;
} else if (name.startsWith(".")) {
return false;
- } else if (!Files.isDirectory(dir)) {
+ } else if (!dir.isContainer()) {
return false;
}
return true;
}
private static class ArtifactDirectoryFilter
- implements Predicate<Path> {
+ implements Predicate<StorageAsset> {
private final Filter<String> filter;
private ArtifactDirectoryFilter(Filter<String> filter) {
}
@Override
- public boolean test(Path dir) {
- final String name = dir.getFileName().toString();
+ public boolean test(StorageAsset dir) {
+ final String name = dir.getName().toString();
// TODO compare to logic in maven-repository-layer
if (!filter.accept(name)) {
return false;
return false;
} else if (Arrays.binarySearch(IGNORED_FILES, name) >= 0) {
return false;
- } else if (Files.isDirectory(dir)) {
+ } else if (dir.isContainer()) {
return false;
}
// some files from remote repositories can have name like maven-metadata-archiva-vm-all-public.xml
private static final class PomFilenameFilter
- implements Predicate<Path> {
+ implements Predicate<StorageAsset> {
private final String artifactId, projectVersion;
}
@Override
- public boolean test(Path dir) {
- final String name = dir.getFileName().toString();
+ public boolean test(StorageAsset dir) {
+ final String name = dir.getName();
if (name.startsWith(artifactId + "-") && name.endsWith(".pom")) {
String v = name.substring(artifactId.length() + 1, name.length() - 4);
v = VersionUtil.getBaseVersion(v);
}
private static class PomFileFilter
- implements Predicate<Path> {
+ implements Predicate<StorageAsset> {
private final String pomFile;
private PomFileFilter(String pomFile) {
}
@Override
- public boolean test(Path dir) {
- return pomFile.equals(dir.getFileName().toString());
+ public boolean test(StorageAsset dir) {
+ return pomFile.equals(dir.getName());
}
}
import org.apache.archiva.repository.RemoteRepository;
import org.apache.archiva.repository.RepositoryCredentials;
import org.apache.archiva.repository.maven2.MavenSystemManager;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.commons.lang.StringUtils;
import org.apache.http.auth.UsernamePasswordCredentials;
private RepositorySystemSession session;
private VersionRangeResolver versionRangeResolver;
- private Path basedir;
+ private StorageAsset basedir;
private RepositoryPathTranslator pathTranslator;
private ManagedRepository managedRepository;
- public RepositoryModelResolver( Path basedir, RepositoryPathTranslator pathTranslator )
+ public RepositoryModelResolver(StorageAsset basedir, RepositoryPathTranslator pathTranslator )
{
this.basedir = basedir;
Map<String, NetworkProxy> networkProxiesMap, ManagedRepository targetRepository,
MavenSystemManager mavenSystemManager)
{
- this( Paths.get( managedRepository.getLocation() ), pathTranslator );
+ this( managedRepository.getAsset(""), pathTranslator );
this.managedRepository = managedRepository;
String filename = artifactId + "-" + version + ".pom";
// TODO: we need to convert 1.0-20091120.112233-1 type paths to baseVersion for the below call - add a test
- Path model = pathTranslator.toFile( basedir, groupId, artifactId, version, filename );
+ StorageAsset model = pathTranslator.toFile( basedir, groupId, artifactId, version, filename );
- if ( !Files.exists(model) )
+ if ( !model.exists() )
{
/**
*
try
{
boolean success = getModelFromProxy( remoteRepository, groupId, artifactId, version, filename );
- if ( success && Files.exists(model) )
+ if ( success && model.exists() )
{
log.info( "Model '{}' successfully retrieved from remote repository '{}'",
- model.toAbsolutePath(), remoteRepository.getId() );
+ model.getPath(), remoteRepository.getId() );
break;
}
}
{
log.info(
"An exception was caught while attempting to retrieve model '{}' from remote repository '{}'.Reason:{}",
- model.toAbsolutePath(), remoteRepository.getId(), e.getMessage() );
+ model.getPath(), remoteRepository.getId(), e.getMessage() );
}
catch ( Exception e )
{
log.warn(
"An exception was caught while attempting to retrieve model '{}' from remote repository '{}'.Reason:{}",
- model.toAbsolutePath(), remoteRepository.getId(), e.getMessage() );
+ model.getPath(), remoteRepository.getId(), e.getMessage() );
continue;
}
}
}
- return new FileModelSource( model.toFile() );
+ return new FileModelSource( model.getFilePath().toFile() );
}
public ModelSource resolveModel(Parent parent) throws UnresolvableModelException {
log.debug( "use snapshot path {} for maven coordinate {}:{}:{}", snapshotPath, groupId, artifactId,
version );
- Path model = basedir.resolve( snapshotPath );
+ StorageAsset model = basedir.resolve( snapshotPath );
//model = pathTranslator.toFile( basedir, groupId, artifactId, lastVersion, filename );
- if ( Files.exists(model) )
+ if ( model.exists() )
{
- return model;
+ return model.getFilePath();
}
}
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
log.warn( "fail to read {}, {}", mavenMetadata.toAbsolutePath(), e.getCause() );
}
import org.apache.archiva.repository.ManagedRepository;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.RepositoryException;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
import java.io.IOException;
import org.apache.archiva.common.filelock.FileLockManager;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.FilesystemStorage;
-import org.apache.archiva.repository.content.RepositoryStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.content.maven2.MavenRepositoryRequestInfo;
import org.apache.archiva.repository.features.ArtifactCleanupFeature;
import org.apache.archiva.repository.features.IndexCreationFeature;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Locale;
-import java.util.function.Function;
/**
* Maven2 managed repository implementation.
import org.apache.archiva.repository.RepositoryType;
import org.apache.archiva.repository.StandardCapabilities;
import org.apache.archiva.repository.UnsupportedFeatureException;
-import org.apache.archiva.repository.content.FilesystemStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
import org.apache.archiva.repository.features.RepositoryFeature;
import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.filelock.FileLockManager;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.FilesystemStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Locale;
import org.apache.archiva.common.filelock.FileLockManager;
import org.apache.archiva.configuration.*;
import org.apache.archiva.repository.*;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.FilesystemStorage;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.repository.features.ArtifactCleanupFeature;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
IndexCreationFeature indexCreationFeature = repositoryGroup.getFeature( IndexCreationFeature.class ).get();
indexCreationFeature.setIndexPath( getURIFromString(configuration.getMergedIndexPath()) );
Path localPath = Paths.get(configuration.getMergedIndexPath());
- if (localPath.isAbsolute()) {
- indexCreationFeature.setLocalIndexPath( new FilesystemAsset(localPath.getFileName().toString(), localPath) );
+ Path repoGroupPath = repositoryGroup.getAsset("").getFilePath().toAbsolutePath();
+ if (localPath.isAbsolute() && !localPath.startsWith(repoGroupPath)) {
+ try {
+ FilesystemStorage storage = new FilesystemStorage(localPath.getParent(), fileLockManager);
+ indexCreationFeature.setLocalIndexPath(storage.getAsset(localPath.getFileName().toString()));
+ } catch (IOException e) {
+ throw new RepositoryException("Could not initialize storage for index path "+localPath);
+ }
+ } else if (localPath.isAbsolute()) {
+ indexCreationFeature.setLocalIndexPath(repositoryGroup.getAsset(repoGroupPath.relativize(localPath).toString()));
} else
{
- indexCreationFeature.setLocalIndexPath( new FilesystemAsset(localPath.toString(), archivaConfiguration.getRepositoryGroupBaseDir( ).resolve( localPath )));
+ indexCreationFeature.setLocalIndexPath(repositoryGroup.getAsset(localPath.toString()));
}
}
// References to other repositories are set filled by the registry
import org.junit.Test;
import org.junit.runner.RunWith;
+import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
@Test
public void testGroupMetadata()
- throws XMLException
- {
+ throws XMLException, IOException {
Path metadataFile = defaultRepoDir.resolve( "org/apache/maven/plugins/maven-metadata.xml" );
ArchivaRepositoryMetadata metadata = MavenMetadataReader.read( metadataFile );
@Test
public void testProjectMetadata()
- throws XMLException
- {
+ throws XMLException, IOException {
Path metadataFile = defaultRepoDir.resolve( "org/apache/maven/shared/maven-downloader/maven-metadata.xml" );
ArchivaRepositoryMetadata metadata = MavenMetadataReader.read( metadataFile);
@Test
public void testProjectVersionMetadata()
- throws XMLException
- {
+ throws XMLException, IOException {
Path metadataFile = defaultRepoDir.resolve( "org/apache/apache/5-SNAPSHOT/maven-metadata.xml" );
ArchivaRepositoryMetadata metadata = MavenMetadataReader.read( metadataFile );
* under the License.
*/
-import org.apache.archiva.admin.model.RepositoryAdminException;
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.configuration.ArchivaConfiguration;
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryType;
import org.apache.archiva.repository.UnsupportedRepositoryTypeException;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
import org.apache.commons.lang.StringUtils;
private Path getIndexPath( ArchivaIndexingContext ctx )
{
- return PathUtil.getPathFromUri( ctx.getPath( ) );
+ return ctx.getPath( ).getFilePath();
}
@FunctionalInterface
@Override
public void addArtifactsToIndex( final ArchivaIndexingContext context, final Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.addArtifactsToIndex(artifacts, indexingContext);
} catch (IOException e) {
@Override
public void removeArtifactsFromIndex( ArchivaIndexingContext context, Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.deleteArtifactsFromIndex(artifacts, indexingContext);
} catch (IOException e) {
throw new IndexCreationFailedException( "Could not create index context for repository " + repository.getId( )
+ ( StringUtils.isNotEmpty( e.getMessage( ) ) ? ": " + e.getMessage( ) : "" ), e );
}
- MavenIndexContextMock context = new MavenIndexContextMock( repository, mvnCtx );
+ MavenIndexContextMock context = null;
+ try {
+ context = new MavenIndexContextMock( repository, mvnCtx );
+ } catch (IOException e) {
+ throw new IndexCreationFailedException(e);
+ }
return context;
}
log.warn("Index close failed");
}
try {
- FileUtils.deleteDirectory(Paths.get(context.getPath()));
+ FileUtils.deleteDirectory(context.getPath().getFilePath());
} catch (IOException e) {
throw new IndexUpdateFailedException("Could not delete index files");
}
}
+
private StorageAsset getIndexPath( Repository repo) throws IOException {
IndexCreationFeature icf = repo.getFeature(IndexCreationFeature.class).get();
Path repoDir = repo.getAsset( "" ).getFilePath();
URI indexDir = icf.getIndexPath();
String indexPath = indexDir.getPath();
Path indexDirectory = null;
+ FilesystemStorage fsStorage = (FilesystemStorage) repo.getAsset("").getStorage();
if ( ! StringUtils.isEmpty(indexDir.toString( ) ) )
{
if ( indexDirectory.isAbsolute( ) )
{
indexPath = indexDirectory.getFileName().toString();
+ fsStorage = new FilesystemStorage(indexDirectory.getParent(), new DefaultFileLockManager());
}
else
{
{
Files.createDirectories( indexDirectory );
}
- return new FilesystemAsset( indexPath, indexDirectory );
+ return new FilesystemAsset( fsStorage, indexPath, indexDirectory );
}
private IndexingContext createRemoteContext(RemoteRepository remoteRepository ) throws IOException
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.indexer.ArchivaIndexingContext;
import org.apache.archiva.repository.Repository;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.index.context.IndexingContext;
import java.io.IOException;
-import java.net.URI;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.sql.Date;
private IndexingContext delegate;
private Repository repository;
+ private FilesystemStorage indexStorage;
- MavenIndexContextMock(Repository repository, IndexingContext delegate) {
+ MavenIndexContextMock(Repository repository, IndexingContext delegate) throws IOException {
this.delegate = delegate;
this.repository = repository;
+ indexStorage = new FilesystemStorage(delegate.getIndexDirectoryFile().toPath(), new DefaultFileLockManager());
}
}
@Override
- public URI getPath() {
- return delegate.getIndexDirectoryFile().toURI();
+ public StorageAsset getPath() {
+ return indexStorage.getAsset("");
}
@Override
import org.junit.Test;
import org.junit.runner.RunWith;
+import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
{
@Test
public void testLoadSimple()
- throws XMLException
- {
+ throws XMLException, IOException {
Path defaultRepoDir = Paths.get( "src/test/repositories/default-repository" );
Path metadataFile = defaultRepoDir.resolve( "org/apache/maven/shared/maven-downloader/maven-metadata.xml" );
@Test
public void testLoadComplex()
- throws XMLException
- {
+ throws XMLException, IOException {
Path defaultRepoDir = Paths.get( "src/test/repositories/default-repository" );
Path metadataFile = defaultRepoDir.resolve( "org/apache/maven/samplejar/maven-metadata.xml" );
import org.apache.archiva.repository.ManagedRepository;
import org.apache.archiva.repository.ReleaseScheme;
import org.apache.archiva.repository.RepositoryRegistry;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.scheduler.indexing.ArtifactIndexingTask;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
package org.apache.archiva.scheduler.repository.model;
import org.apache.archiva.redback.components.taskqueue.Task;
-import org.apache.archiva.repository.content.StorageAsset;
-
-import java.nio.file.Path;
+import org.apache.archiva.repository.storage.StorageAsset;
/*
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.configuration.ArchivaConfiguration;
import org.apache.archiva.indexer.IndexCreationFailedException;
import org.apache.archiva.indexer.IndexUpdateFailedException;
import org.apache.archiva.indexer.UnsupportedBaseContextException;
-import org.apache.archiva.proxy.ProxyRegistry;
import org.apache.archiva.proxy.maven.WagonFactory;
import org.apache.archiva.proxy.maven.WagonFactoryException;
import org.apache.archiva.proxy.maven.WagonFactoryRequest;
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryType;
import org.apache.archiva.repository.UnsupportedRepositoryTypeException;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
private Path getIndexPath( ArchivaIndexingContext ctx )
{
- return PathUtil.getPathFromUri( ctx.getPath( ) );
+ return ctx.getPath().getFilePath();
}
@FunctionalInterface
@Override
public void addArtifactsToIndex( final ArchivaIndexingContext context, final Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.addArtifactsToIndex(artifacts, indexingContext);
} catch (IOException e) {
@Override
public void removeArtifactsFromIndex( ArchivaIndexingContext context, Collection<URI> artifactReference ) throws IndexUpdateFailedException
{
- final URI ctxUri = context.getPath();
+ final StorageAsset ctxUri = context.getPath();
executeUpdateFunction(context, indexingContext -> {
- Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.resolve(r)).toFile())).collect(Collectors.toList());
+ Collection<ArtifactContext> artifacts = artifactReference.stream().map(r -> artifactContextProducer.getArtifactContext(indexingContext, Paths.get(ctxUri.getFilePath().toUri().resolve(r)).toFile())).collect(Collectors.toList());
try {
indexer.deleteArtifactsFromIndex(artifacts, indexingContext);
} catch (IOException e) {
log.warn("Index close failed");
}
try {
- FileUtils.deleteDirectory(Paths.get(context.getPath()));
+ StorageUtil.deleteRecursively(context.getPath());
} catch (IOException e) {
throw new IndexUpdateFailedException("Could not delete index files");
}
URI indexDir = icf.getIndexPath();
String indexPath = indexDir.getPath();
Path indexDirectory = null;
+ FilesystemStorage filesystemStorage = (FilesystemStorage) repo.getAsset("").getStorage();
if ( ! StringUtils.isEmpty(indexDir.toString( ) ) )
{
if ( indexDirectory.isAbsolute( ) )
{
indexPath = indexDirectory.getFileName().toString();
+ filesystemStorage = new FilesystemStorage(indexDirectory, new DefaultFileLockManager());
}
else
{
{
Files.createDirectories( indexDirectory );
}
- return new FilesystemAsset( indexPath, indexDirectory);
+ return new FilesystemAsset( filesystemStorage, indexPath, indexDirectory);
}
private IndexingContext createRemoteContext(RemoteRepository remoteRepository ) throws IOException
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.indexer.ArchivaIndexingContext;
import org.apache.archiva.repository.Repository;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.maven.index.context.IndexingContext;
import java.io.IOException;
private IndexingContext delegate;
private Repository repository;
+ private FilesystemStorage filesystemStorage;
MavenIndexContextMock( Repository repository, IndexingContext delegate) {
this.delegate = delegate;
this.repository = repository;
+ try {
+ filesystemStorage = new FilesystemStorage(delegate.getIndexDirectoryFile().toPath().getParent(), new DefaultFileLockManager());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
}
}
@Override
- public URI getPath() {
- return delegate.getIndexDirectoryFile().toURI();
+ public StorageAsset getPath() {
+ return new FilesystemAsset(filesystemStorage, delegate.getIndexDirectoryFile().toPath().getFileName().toString(), delegate.getIndexDirectoryFile().toPath());
+
}
@Override
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-security</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>archiva-repository-admin-api</artifactId>
import org.apache.archiva.repository.RepositoryException;
import org.apache.archiva.repository.RepositoryNotFoundException;
import org.apache.archiva.repository.metadata.MetadataTools;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.rest.api.model.*;
import org.apache.archiva.rest.api.services.ArchivaRestServiceException;
import org.apache.archiva.rest.api.services.BrowseService;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.nio.file.StandardOpenOption;
import java.util.*;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
ArchivaArtifact archivaArtifact = new ArchivaArtifact( groupId, artifactId, version, classifier,
StringUtils.isEmpty( type ) ? "jar" : type,
repoId );
- Path file = managedRepositoryContent.toFile( archivaArtifact );
- if ( Files.exists(file) )
+ StorageAsset file = managedRepositoryContent.toFile( archivaArtifact );
+ if ( file.exists() )
{
return readFileEntries( file, path, repoId );
}
ArchivaArtifact archivaArtifact = new ArchivaArtifact( groupId, artifactId, version, classifier,
StringUtils.isEmpty( type ) ? "jar" : type,
repoId );
- Path file = managedRepositoryContent.toFile( archivaArtifact );
- if ( !Files.exists(file) )
+ StorageAsset file = managedRepositoryContent.toFile( archivaArtifact );
+ if ( !file.exists() )
{
log.debug( "file: {} not exists for repository: {} try next repository", file, repoId );
continue;
if ( StringUtils.isNotBlank( path ) )
{
// zip entry of the path -> path must a real file entry of the archive
- JarFile jarFile = new JarFile( file.toFile() );
+ StorageUtil.PathInformation pathInfo = StorageUtil.getAssetDataAsPath(file);
+ JarFile jarFile = new JarFile( pathInfo.getPath().toFile());
ZipEntry zipEntry = jarFile.getEntry( path );
try (InputStream inputStream = jarFile.getInputStream( zipEntry ))
{
finally
{
closeQuietly( jarFile );
+ if (pathInfo.isTmpFile()) {
+ Files.deleteIfExists(pathInfo.getPath());
+ }
}
}
- return new ArtifactContent( new String(Files.readAllBytes( file ), ARTIFACT_CONTENT_ENCODING), repoId );
+ try(InputStream readStream = file.getReadStream()) {
+ return new ArtifactContent(IOUtils.toString(readStream, ARTIFACT_CONTENT_ENCODING), repoId);
+ }
}
}
catch ( IOException e )
StringUtils.isEmpty( classifier )
? ""
: classifier, "jar", repoId );
- Path file = managedRepositoryContent.toFile( archivaArtifact );
+ StorageAsset file = managedRepositoryContent.toFile( archivaArtifact );
- if ( file != null && Files.exists(file) )
+ if ( file != null && file.exists() )
{
return true;
}
// in case of SNAPSHOT we can have timestamped version locally !
if ( StringUtils.endsWith( version, VersionUtil.SNAPSHOT ) )
{
- Path metadataFile = file.getParent().resolve(MetadataTools.MAVEN_METADATA );
- if ( Files.exists(metadataFile) )
+ StorageAsset metadataFile = file.getStorage().getAsset(file.getParent().getPath()+"/"+MetadataTools.MAVEN_METADATA );
+ if ( metadataFile.exists() )
{
try
{
.append( ( StringUtils.isEmpty( classifier ) ? "" : "-" + classifier ) ) //
.append( ".jar" ).toString();
- Path timeStampFile = file.getParent().resolve( timeStampFileName );
- log.debug( "try to find timestamped snapshot version file: {}", timeStampFile.toAbsolutePath() );
- if ( Files.exists(timeStampFile) )
+ StorageAsset timeStampFile = file.getStorage().getAsset(file.getParent().getPath() + "/" + timeStampFileName );
+ log.debug( "try to find timestamped snapshot version file: {}", timeStampFile.getPath() );
+ if ( timeStampFile.exists() )
{
return true;
}
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
log.warn( "skip fail to find timestamped snapshot file: {}", e.getMessage() );
}
file = proxyHandler.fetchFromProxies( managedRepositoryContent, path );
- if ( file != null && Files.exists(file) )
+ if ( file != null && file.exists() )
{
// download pom now
String pomPath = StringUtils.substringBeforeLast( path, ".jar" ) + ".pom";
}
}
- protected List<ArtifactContentEntry> readFileEntries(final Path file, final String filterPath, final String repoId )
+ protected List<ArtifactContentEntry> readFileEntries(final StorageAsset file, final String filterPath, final String repoId )
throws IOException
{
String cleanedfilterPath = filterPath==null ? "" : (StringUtils.startsWith(filterPath, "/") ?
if (!StringUtils.endsWith(cleanedfilterPath,"/") && !StringUtils.isEmpty(cleanedfilterPath)) {
filterDepth++;
}
- JarFile jarFile = new JarFile( file.toFile() );
+
+ StorageUtil.PathInformation pathInfo = StorageUtil.getAssetDataAsPath(file);
+ JarFile jarFile = new JarFile(pathInfo.getPath().toFile());
try
{
Enumeration<JarEntry> jarEntryEnumeration = jarFile.entries();
{
jarFile.close();
}
+ if (pathInfo.isTmpFile()) {
+ Files.deleteIfExists(pathInfo.getPath());
+ }
}
List<ArtifactContentEntry> sorted = new ArrayList<>( artifactContentEntryMap.values() );
Collections.sort( sorted, ArtifactContentEntryComparator.INSTANCE );
import org.apache.archiva.repository.RepositoryException;
import org.apache.archiva.repository.RepositoryNotFoundException;
import org.apache.archiva.repository.RepositoryRegistry;
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
-import org.apache.archiva.repository.content.StorageUtil;
+import org.apache.archiva.repository.storage.RepositoryStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.repository.events.RepositoryListener;
import org.apache.archiva.repository.metadata.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import javax.inject.Named;
import javax.ws.rs.core.Response;
import java.io.IOException;
-import java.nio.file.FileSystems;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.nio.file.StandardCopyOption;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
{
metadata = MavenMetadataReader.read( metadataFile.getFilePath() );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
throw new RepositoryMetadataException( e.getMessage(), e );
}
throws IOException
{
- StorageUtil.copyAsset( sourceStorage, sourceFile, targetStorage, targetPath, true );
+ StorageUtil.copyAsset( sourceFile, targetPath, true );
if ( fixChecksums )
{
fixChecksums( targetPath );
projectMetadata.setReleasedVersion( latestVersion );
}
- RepositoryMetadataWriter.write( projectMetadata, projectMetadataFile.getFilePath());
+ try(OutputStreamWriter writer = new OutputStreamWriter(projectMetadataFile.getWriteStream(true))) {
+ RepositoryMetadataWriter.write(projectMetadata, writer);
+ } catch (IOException e) {
+ throw new RepositoryMetadataException(e);
+ }
if ( fixChecksums )
{
metadata.setLastUpdatedTimestamp( lastUpdatedTimestamp );
metadata.setAvailableVersions( availableVersions );
- RepositoryMetadataWriter.write( metadata, metadataFile.getFilePath());
+ try (OutputStreamWriter writer = new OutputStreamWriter(metadataFile.getWriteStream(true))) {
+ RepositoryMetadataWriter.write(metadata, writer);
+ } catch (IOException e) {
+ throw new RepositoryMetadataException(e);
+ }
ChecksummedFile checksum = new ChecksummedFile( metadataFile.getFilePath() );
checksum.fixChecksums( algorithms );
}
import org.apache.archiva.metadata.model.maven2.MavenArtifactFacet;
import org.apache.archiva.model.ArtifactReference;
import org.apache.archiva.repository.ManagedRepositoryContent;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.io.FilenameUtils;
import java.nio.file.Path;
ref.setClassifier( classifier );
ref.setType( type );
- Path file = managedRepositoryContent.toFile( ref );
+ StorageAsset file = managedRepositoryContent.toFile( ref );
String extension = getExtensionFromFile(file);
/**
* Extract file extension
*/
- String getExtensionFromFile( Path file )
+ String getExtensionFromFile( StorageAsset file )
{
// we are just interested in the section after the last -
- String[] parts = file.getFileName().toString().split( "-" );
+ String[] parts = file.getName().split( "-" );
if ( parts.length > 0 )
{
// get anything after a dot followed by a letter a-z, including other dots
}
}
// just in case
- return FilenameUtils.getExtension( file.toFile().getName() );
+ return StorageUtil.getExtension( file );
}
}
*/
import junit.framework.TestCase;
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
import org.apache.archiva.rest.api.model.ArtifactContentEntry;
import org.apache.archiva.test.utils.ArchivaBlockJUnit4ClassRunner;
import org.junit.Test;
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
- List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( file, null, "foo" );
+ List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( new FilesystemAsset(filesystemStorage, file.toString(), file), null, "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
- List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( file, "", "foo" );
+ List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries(
+ new FilesystemAsset(filesystemStorage, file.toString(), file), "", "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
+
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
- List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( file, "/", "foo" );
+ List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( new FilesystemAsset(filesystemStorage, file.toString(),file), "/", "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
+
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
- List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( file, "org", "foo" );
+ List<ArtifactContentEntry> artifactContentEntries = browseService.readFileEntries( new FilesystemAsset(filesystemStorage, file.toString(), file), "org", "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
+
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
List<ArtifactContentEntry> artifactContentEntries =
- browseService.readFileEntries( file, "org/apache/commons/logging/impl/", "foo" );
+ browseService.readFileEntries( new FilesystemAsset(filesystemStorage, file.toString(), file), "org/apache/commons/logging/impl/", "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
throws Exception
{
+ FilesystemStorage filesystemStorage = new FilesystemStorage(Paths.get(getBasedir()), new DefaultFileLockManager());
+
Path file = Paths.get( getBasedir(),
"src/test/repo-with-osgi/commons-logging/commons-logging/1.1/commons-logging-1.1.jar" );
List<ArtifactContentEntry> artifactContentEntries =
- browseService.readFileEntries( file, "org/apache/commons/logging/", "foo" );
+ browseService.readFileEntries( new FilesystemAsset(filesystemStorage, file.toString(), file), "org/apache/commons/logging/", "foo" );
log.info( "artifactContentEntries: {}", artifactContentEntries );
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.easymock.TestSubject;
import org.junit.Test;
+import java.io.IOException;
+import java.nio.file.Path;
import java.nio.file.Paths;
import static org.assertj.core.api.Assertions.assertThat;
@TestSubject
private ArtifactBuilder builder = new ArtifactBuilder();
+ StorageAsset getFile(String path) throws IOException {
+ Path filePath = Paths.get(path);
+ FilesystemStorage filesystemStorage = new FilesystemStorage(filePath.getParent(), new DefaultFileLockManager());
+ return new FilesystemAsset(filesystemStorage, filePath.getFileName().toString(), filePath);
+ }
+
@Test
- public void testBuildSnapshot()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-2.3-20141119.064321-40.jar" ) ) ).isEqualTo( "jar" );
+ public void testBuildSnapshot() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-2.3-20141119.064321-40.jar" ) ) ).isEqualTo( "jar" );
}
@Test
- public void testBuildPom()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-1.0.pom" ) ) ).isEqualTo( "pom" );
+ public void testBuildPom() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-1.0.pom" ) ) ).isEqualTo( "pom" );
}
@Test
- public void testBuildJar()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-1.0-sources.jar" ) ) ).isEqualTo( "jar" );
+ public void testBuildJar() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-1.0-sources.jar" ) ) ).isEqualTo( "jar" );
}
@Test
- public void testBuildTarGz()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-1.0.tar.gz" ) ) ).isEqualTo( "tar.gz" );
+ public void testBuildTarGz() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-1.0.tar.gz" ) ) ).isEqualTo( "tar.gz" );
}
@Test
- public void testBuildPomZip()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-1.0.pom.zip" ) ) ).isEqualTo( "pom.zip" );
+ public void testBuildPomZip() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-1.0.pom.zip" ) ) ).isEqualTo( "pom.zip" );
}
@Test
- public void testBuildR00()
- {
- assertThat( builder.getExtensionFromFile( Paths.get( "/tmp/foo-1.0.r00" ) ) ).isEqualTo( "r00" );
+ public void testBuildR00() throws IOException {
+ assertThat( builder.getExtensionFromFile( getFile( "/tmp/foo-1.0.r00" ) ) ).isEqualTo( "r00" );
}
}
import org.apache.archiva.metadata.repository.MetadataRepository;
import org.apache.archiva.metadata.repository.RepositorySession;
import org.apache.archiva.filter.Filter;
-import org.apache.archiva.metadata.repository.storage.ReadMetadataRequest;
-import org.apache.archiva.metadata.repository.storage.RepositoryStorage;
-import org.apache.archiva.metadata.repository.storage.RepositoryStorageMetadataException;
-import org.apache.archiva.metadata.repository.storage.RepositoryStorageMetadataInvalidException;
-import org.apache.archiva.metadata.repository.storage.RepositoryStorageMetadataNotFoundException;
-import org.apache.archiva.metadata.repository.storage.RepositoryStorageRuntimeException;
+import org.apache.archiva.metadata.repository.storage.*;
import org.apache.archiva.model.ArtifactReference;
import org.apache.archiva.policies.ProxyDownloadException;
import org.apache.archiva.redback.components.taskqueue.TaskQueueException;
import org.apache.archiva.repository.events.RepositoryListener;
import org.apache.archiva.scheduler.repository.model.RepositoryArchivaTaskScheduler;
import org.apache.archiva.scheduler.repository.model.RepositoryTask;
+import org.apache.archiva.xml.XMLException;
+import java.io.IOException;
import java.util.Collection;
/**
}
@Override
- public String getFilePathWithVersion( String requestPath, ManagedRepositoryContent managedRepositoryContent )
+ public String getFilePathWithVersion( String requestPath, ManagedRepositoryContent managedRepositoryContent ) throws RelocationException, XMLException, IOException
{
return null;
}
import org.apache.archiva.repository.metadata.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import org.apache.archiva.repository.metadata.RepositoryMetadataWriter;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.rest.api.services.ArchivaRestServiceException;
import org.apache.archiva.rest.services.AbstractRestService;
import org.apache.archiva.scheduler.ArchivaTaskScheduler;
import javax.servlet.http.HttpSession;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
+import java.io.*;
import java.net.URLDecoder;
import java.nio.file.*;
import java.text.DateFormat;
ArtifactReference artifactReference = createArtifactRef(fileMetadata, groupId, artifactId, version);
artifactReference.setType(packaging);
- Path pomPath = artifactUtil.getArtifactPath(repoConfig, artifactReference);
- Path targetPath = pomPath.getParent();
+ StorageAsset pomPath = artifactUtil.getArtifactAsset(repoConfig, artifactReference);
+ StorageAsset targetPath = pomPath.getParent();
- String pomFilename = pomPath.getFileName().toString();
+ String pomFilename = pomPath.getName();
if (StringUtils.isNotEmpty(fileMetadata.getClassifier())) {
pomFilename = StringUtils.remove(pomFilename, "-" + fileMetadata.getClassifier());
}
artifactReference.setType(
StringUtils.isEmpty(fileMetadata.getPackaging()) ? packaging : fileMetadata.getPackaging());
- Path artifactPath = artifactUtil.getArtifactPath(repoConfig, artifactReference);
- Path targetPath = artifactPath.getParent();
+ StorageAsset artifactPath = artifactUtil.getArtifactAsset(repoConfig, artifactReference);
+ StorageAsset targetPath = artifactPath.getParent();
log.debug("artifactPath: {} found targetPath: {}", artifactPath, targetPath);
int newBuildNumber = -1;
String timestamp = null;
- Path versionMetadataFile = targetPath.resolve(MetadataTools.MAVEN_METADATA);
+ StorageAsset versionMetadataFile = targetPath.resolve(MetadataTools.MAVEN_METADATA);
ArchivaRepositoryMetadata versionMetadata = getMetadata(versionMetadataFile);
if (VersionUtil.isSnapshot(version)) {
}
}
- if (!Files.exists(targetPath)) {
- Files.createDirectories(targetPath);
+ if (!targetPath.exists()) {
+ targetPath.create();
}
- String filename = artifactPath.getFileName().toString();
+ String filename = artifactPath.getName().toString();
if (VersionUtil.isSnapshot(version)) {
filename = filename.replaceAll(VersionUtil.SNAPSHOT, timestamp + "-" + newBuildNumber);
}
// !(archivaAdministration.getKnownContentConsumers().contains("create-missing-checksums"));
try {
- Path targetFile = targetPath.resolve(filename);
- if (Files.exists(targetFile) && !VersionUtil.isSnapshot(version) && repoConfig.blocksRedeployments()) {
+ StorageAsset targetFile = targetPath.resolve(filename);
+ if (targetFile.exists() && !VersionUtil.isSnapshot(version) && repoConfig.blocksRedeployments()) {
throw new ArchivaRestServiceException(
"Overwriting released artifacts in repository '" + repoConfig.getId() + "' is not allowed.",
Response.Status.BAD_REQUEST.getStatusCode(), null);
pomFilename = FilenameUtils.removeExtension(pomFilename) + ".pom";
try {
- Path generatedPomFile =
+ StorageAsset generatedPomFile =
createPom(targetPath, pomFilename, fileMetadata, groupId, artifactId, version, packaging);
triggerAuditEvent(repoConfig.getId(), targetPath.resolve(pomFilename).toString(), AuditEvent.UPLOAD_FILE);
if (fixChecksums) {
// explicitly update only if metadata-updater consumer is not enabled!
if (!archivaAdministration.getKnownContentConsumers().contains("metadata-updater")) {
- updateProjectMetadata(targetPath.toAbsolutePath().toString(), lastUpdatedTimestamp, timestamp, newBuildNumber,
+ updateProjectMetadata(targetPath, lastUpdatedTimestamp, timestamp, newBuildNumber,
fixChecksums, fileMetadata, groupId, artifactId, version, packaging);
if (VersionUtil.isSnapshot(version)) {
return artifactReference;
}
- private ArchivaRepositoryMetadata getMetadata(Path metadataFile)
+ private ArchivaRepositoryMetadata getMetadata(StorageAsset metadataFile)
throws RepositoryMetadataException {
ArchivaRepositoryMetadata metadata = new ArchivaRepositoryMetadata();
- if (Files.exists(metadataFile)) {
+ if (metadataFile.exists()) {
try {
metadata = MavenMetadataReader.read(metadataFile);
- } catch (XMLException e) {
+ } catch (XMLException | IOException e) {
throw new RepositoryMetadataException(e.getMessage(), e);
}
}
return metadata;
}
- private Path createPom(Path targetPath, String filename, FileMetadata fileMetadata, String groupId,
+ private StorageAsset createPom(StorageAsset targetPath, String filename, FileMetadata fileMetadata, String groupId,
String artifactId, String version, String packaging)
throws IOException {
Model projectModel = new Model();
projectModel.setVersion(version);
projectModel.setPackaging(packaging);
- Path pomFile = targetPath.resolve(filename);
+ StorageAsset pomFile = targetPath.resolve(filename);
MavenXpp3Writer writer = new MavenXpp3Writer();
- try (FileWriter w = new FileWriter(pomFile.toFile())) {
+ try (Writer w = new OutputStreamWriter(pomFile.getWriteStream(true))) {
writer.write(w, projectModel);
}
return pomFile;
}
- private void fixChecksums(Path file) {
- ChecksummedFile checksum = new ChecksummedFile(file);
+ private void fixChecksums(StorageAsset file) {
+ ChecksummedFile checksum = new ChecksummedFile(file.getFilePath());
checksum.fixChecksums(algorithms);
}
- private void queueRepositoryTask(String repositoryId, Path localFile) {
+ private void queueRepositoryTask(String repositoryId, StorageAsset localFile) {
RepositoryTask task = new RepositoryTask();
task.setRepositoryId(repositoryId);
task.setResourceFile(localFile);
scheduler.queueTask(task);
} catch (TaskQueueException e) {
log.error("Unable to queue repository task to execute consumers on resource file ['{}"
- + "'].", localFile.getFileName());
+ + "'].", localFile.getName());
}
}
- private void copyFile(Path sourceFile, Path targetPath, String targetFilename, boolean fixChecksums)
+ private void copyFile(Path sourceFile, StorageAsset targetPath, String targetFilename, boolean fixChecksums)
throws IOException {
- Files.copy(sourceFile, targetPath.resolve(targetFilename), StandardCopyOption.REPLACE_EXISTING,
- StandardCopyOption.COPY_ATTRIBUTES);
+ targetPath.resolve(targetFilename).replaceDataFromFile(sourceFile);
if (fixChecksums) {
fixChecksums(targetPath.resolve(targetFilename));
/**
* Update artifact level metadata. If it does not exist, create the metadata and fix checksums if necessary.
*/
- private void updateProjectMetadata(String targetPath, Date lastUpdatedTimestamp, String timestamp, int buildNumber,
+ private void updateProjectMetadata(StorageAsset targetPath, Date lastUpdatedTimestamp, String timestamp, int buildNumber,
boolean fixChecksums, FileMetadata fileMetadata, String groupId,
String artifactId, String version, String packaging)
throws RepositoryMetadataException {
List<String> availableVersions = new ArrayList<>();
String latestVersion = version;
- Path projectDir = Paths.get(targetPath).getParent();
- Path projectMetadataFile = projectDir.resolve(MetadataTools.MAVEN_METADATA);
+ StorageAsset projectDir = targetPath.getParent();
+ StorageAsset projectMetadataFile = projectDir.resolve(MetadataTools.MAVEN_METADATA);
ArchivaRepositoryMetadata projectMetadata = getMetadata(projectMetadataFile);
- if (Files.exists(projectMetadataFile)) {
+ if (projectMetadataFile.exists()) {
availableVersions = projectMetadata.getAvailableVersions();
Collections.sort(availableVersions, VersionComparator.getInstance());
* Update version level metadata for snapshot artifacts. If it does not exist, create the metadata and fix checksums
* if necessary.
*/
- private void updateVersionMetadata(ArchivaRepositoryMetadata metadata, Path metadataFile,
+ private void updateVersionMetadata(ArchivaRepositoryMetadata metadata, StorageAsset metadataFile,
Date lastUpdatedTimestamp, String timestamp, int buildNumber,
boolean fixChecksums, FileMetadata fileMetadata, String groupId,
String artifactId, String version, String packaging)
throws RepositoryMetadataException {
- if (!Files.exists(metadataFile)) {
+ if (!metadataFile.exists()) {
metadata.setGroupId(groupId);
metadata.setArtifactId(artifactId);
metadata.setVersion(version);
<scope>runtime</scope>
-->
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ </dependency>
<dependency>
<groupId>org.apache.archiva.maven</groupId>
<artifactId>archiva-maven-repository</artifactId>
import edu.emory.mathcs.backport.java.util.Collections;
import org.apache.archiva.metadata.model.facets.AuditEvent;
import org.apache.archiva.repository.LayoutException;
-import org.apache.archiva.repository.content.RepositoryStorage;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.RepositoryStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.events.AuditListener;
import org.apache.archiva.scheduler.ArchivaTaskScheduler;
import org.apache.archiva.scheduler.repository.model.RepositoryArchivaTaskScheduler;
import org.apache.archiva.checksum.ChecksumAlgorithm;
import org.apache.archiva.checksum.ChecksumUtil;
import org.apache.archiva.checksum.StreamingChecksum;
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.filelock.FileLockManager;
import org.apache.archiva.common.plexusbridge.PlexusSisuBridgeException;
import org.apache.archiva.common.utils.PathUtil;
import org.apache.archiva.repository.RepositoryGroup;
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.RepositoryRequestInfo;
-import org.apache.archiva.repository.content.FilesystemAsset;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.events.AuditListener;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.metadata.MetadataTools;
ArchivaRepositoryMetadata repoMetadata = MavenMetadataReader.read( metadataFile );
mergedMetadata = RepositoryMetadataMerge.merge( mergedMetadata, repoMetadata );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
throw new DavException( HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
"Error occurred while reading metadata file." );
{
// we are in the case of index file request
String requestedFileName = StringUtils.substringAfterLast( pathInfo, "/" );
- Path temporaryIndexDirectory =
+ StorageAsset temporaryIndexDirectory =
buildMergedIndexDirectory( activePrincipal, request, repoGroup );
- FilesystemAsset asset = new FilesystemAsset( pathInfo, temporaryIndexDirectory.resolve(requestedFileName) );
+ StorageAsset asset = temporaryIndexDirectory.getStorage().getAsset(requestedFileName);
- Path resourceFile = temporaryIndexDirectory.resolve( requestedFileName );
try {
resource = new ArchivaDavResource( asset, requestedFileName, repoGroup,
request.getRemoteAddr(), activePrincipal, request.getDavSession(),
throw new BrowserRedirectException( addHrefPrefix( contextPath, path ), e.getRelocationType() );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
log.error( e.getMessage(), e );
throw new DavException( HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e );
RepositoryProxyHandler proxyHandler = proxyRegistry.getHandler(managedRepository.getRepository().getType()).get(0);
if ( repositoryRequestInfo.isSupportFile( path ) )
{
- Path proxiedFile = proxyHandler.fetchFromProxies( managedRepository, path );
+ StorageAsset proxiedFile = proxyHandler.fetchFromProxies( managedRepository, path );
return ( proxiedFile != null );
}
if ( repositoryRequestInfo.isArchetypeCatalog( path ) )
{
// FIXME we must implement a merge of remote archetype catalog from remote servers.
- Path proxiedFile = proxyHandler.fetchFromProxies( managedRepository, path );
+ StorageAsset proxiedFile = proxyHandler.fetchFromProxies( managedRepository, path );
return ( proxiedFile != null );
}
this.applicationContext.getBean( "repositoryStorage#" + repositoryLayout, RepositoryStorage.class );
repositoryStorage.applyServerSideRelocation( managedRepository, artifact );
- Path proxiedFile = proxyHandler.fetchFromProxies( managedRepository, artifact );
+ StorageAsset proxiedFile = proxyHandler.fetchFromProxies( managedRepository, artifact );
resource.setPath( managedRepository.toPath( artifact ) );
if ( StringUtils.endsWith( pathInfo, mergedIndexPath ) )
{
- Path mergedRepoDirPath =
+ StorageAsset mergedRepoDirPath =
buildMergedIndexDirectory( activePrincipal, request, repositoryGroup );
- FilesystemAsset mergedRepoDir = new FilesystemAsset(pathInfo, mergedRepoDirPath);
- mergedRepositoryContents.add( mergedRepoDir );
+ mergedRepositoryContents.add( mergedRepoDirPath );
}
else
{
}
}
}
- FilesystemAsset parentDir = new FilesystemAsset(pathInfo, tmpDirectory.getParent());
- mergedRepositoryContents.add( parentDir );
+ try {
+ FilesystemStorage storage = new FilesystemStorage(tmpDirectory.getParent(), new DefaultFileLockManager());
+ mergedRepositoryContents.add( storage.getAsset("") );
+ } catch (IOException e) {
+ throw new DavException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "Could not create storage for " + tmpDirectory);
+ }
}
for ( ManagedRepository repo : repositories )
{
}
}
- protected Path buildMergedIndexDirectory( String activePrincipal,
+ protected StorageAsset buildMergedIndexDirectory( String activePrincipal,
DavServletRequest request,
RepositoryGroup repositoryGroup )
throws DavException
final String id = repositoryGroup.getId();
TemporaryGroupIndex tmp = temporaryGroupIndexMap.get(id);
- if ( tmp != null && tmp.getDirectory() != null && Files.exists(tmp.getDirectory()))
+ if ( tmp != null && tmp.getDirectory() != null && tmp.getDirectory().exists())
{
if ( System.currentTimeMillis() - tmp.getCreationTime() > (
repositoryGroup.getMergedIndexTTL() * 60 * 1000 ) )
{
Path tempRepoFile = Files.createTempDirectory( "temp" );
tempRepoFile.toFile( ).deleteOnExit( );
+ FilesystemStorage storage = new FilesystemStorage(tempRepoFile, new DefaultFileLockManager());
+ StorageAsset tmpAsset = storage.getAsset("");
IndexMergerRequest indexMergerRequest =
new IndexMergerRequest( authzRepos, true, id,
indexPath.toString( ),
repositoryGroup.getMergedIndexTTL( ) ).mergedIndexDirectory(
- tempRepoFile ).temporary( true );
+ tmpAsset ).temporary( true );
MergedRemoteIndexesTaskRequest taskRequest =
new MergedRemoteIndexesTaskRequest( indexMergerRequest, indexMerger );
ArchivaIndexingContext indexingContext = job.execute( ).getIndexingContext( );
- Path mergedRepoDir = Paths.get( indexingContext.getPath( ) );
+ StorageAsset mergedRepoDir = indexingContext.getPath( );
TemporaryGroupIndex temporaryGroupIndex =
new TemporaryGroupIndex( mergedRepoDir, indexingContext.getId( ), id,
repositoryGroup.getMergedIndexTTL( ) ) //
* under the License.
*/
-import org.apache.archiva.repository.ManagedRepositoryContent;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.webdav.util.IndexWriter;
import org.apache.archiva.webdav.util.MimeTypes;
import org.apache.jackrabbit.util.Text;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
import java.util.*;
import java.util.stream.Collectors;
import org.apache.archiva.redback.integration.filter.authentication.HttpAuthenticator;
import org.apache.archiva.repository.ManagedRepository;
import org.apache.archiva.repository.RepositoryRegistry;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.security.ServletAuthenticator;
import org.apache.jackrabbit.webdav.DavException;
import org.apache.jackrabbit.webdav.DavLocatorFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.locks.ReentrantReadWriteLock;
* under the License.
*/
-import org.apache.archiva.repository.ManagedRepositoryContent;
-import org.apache.archiva.repository.content.StorageAsset;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.lang.StringUtils;
-import org.apache.jackrabbit.webdav.DavResource;
import org.apache.jackrabbit.webdav.io.OutputContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
for ( TemporaryGroupIndex temporaryGroupIndex : tempFilesPerKey.values() )
{
log.info( "cleanup temporaryGroupIndex {} directory {}", temporaryGroupIndex.getIndexId(),
- temporaryGroupIndex.getDirectory().toAbsolutePath() );
+ temporaryGroupIndex.getDirectory().getPath() );
getIndexMerger( httpSessionEvent ).cleanTemporaryGroupIndex( temporaryGroupIndex );
}
}
import org.apache.archiva.common.utils.FileUtils;
import org.apache.archiva.repository.LayoutException;
import org.apache.archiva.repository.RepositoryRegistry;
-import org.apache.archiva.repository.content.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemAsset;
import org.apache.archiva.repository.events.AuditListener;
import org.apache.archiva.repository.maven2.MavenManagedRepository;
import org.apache.archiva.test.utils.ArchivaSpringJUnit4ClassRunner;
private DavResource getDavResource( String logicalPath, Path file ) throws LayoutException
{
- return new ArchivaDavResource( new FilesystemAsset( logicalPath, file.toAbsolutePath()) , logicalPath, repository, session, resourceLocator,
+ return new ArchivaDavResource( new FilesystemAsset( repository, logicalPath, file.toAbsolutePath()) , logicalPath, repository, session, resourceLocator,
resourceFactory, mimeTypes, Collections.<AuditListener> emptyList(), null);
}
{
try
{
- return new ArchivaDavResource( new FilesystemAsset( "/" , baseDir.toAbsolutePath()), "/", repository, session, resourceLocator,
+ return new ArchivaDavResource( new FilesystemAsset(repository, "/" , baseDir.toAbsolutePath()), "/", repository, session, resourceLocator,
resourceFactory, mimeTypes, Collections.<AuditListener> emptyList(),
null );
}
import org.apache.archiva.proxy.maven.MavenRepositoryProxyHandler;
import org.apache.archiva.proxy.model.ProxyFetchResult;
import org.apache.archiva.repository.ManagedRepositoryContent;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.commons.io.FileUtils;
import java.io.IOException;
@Override
public ProxyFetchResult fetchMetadataFromProxies(ManagedRepositoryContent repository, String logicalPath )
{
- Path target = Paths.get(repository.getRepoRoot(), logicalPath );
+ StorageAsset target = repository.getRepository().getAsset( logicalPath );
try
{
- FileUtils.copyFile( archivaDavResourceFactoryTest.getProjectBase().resolve( "target/test-classes/maven-metadata.xml" ).toFile(), target.toFile() );
+ FileUtils.copyFile( archivaDavResourceFactoryTest.getProjectBase().resolve( "target/test-classes/maven-metadata.xml" ).toFile(), target.getFilePath().toFile() );
}
catch ( IOException e )
{
*/
import org.apache.archiva.metadata.model.ArtifactMetadata;
+import org.apache.archiva.repository.storage.StorageAsset;
import java.nio.file.Path;
String toPath( String namespace, String projectId );
- Path toFile( Path basedir, String namespace, String projectId, String projectVersion, String filename );
+ StorageAsset toFile(StorageAsset basedir, String namespace, String projectId, String projectVersion, String filename );
- Path toFile( Path basedir, String namespace, String projectId );
+ StorageAsset toFile(StorageAsset basedir, String namespace, String projectId );
- Path toFile(Path basedir, String namespace );
+ StorageAsset toFile(StorageAsset basedir, String namespace );
- Path toFile( Path basedir, String namespace, String projectId, String projectVersion );
+ StorageAsset toFile( StorageAsset basedir, String namespace, String projectId, String projectVersion );
ArtifactMetadata getArtifactForPath( String repoId, String relativePath );
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.xml.XMLException;
+import java.io.IOException;
import java.util.Collection;
// FIXME: we should drop the repositoryId parameters and attach this to an instance of a repository storage
String getFilePath( String requestPath, org.apache.archiva.repository.ManagedRepository managedRepository );
String getFilePathWithVersion( final String requestPath, ManagedRepositoryContent managedRepositoryContent )
- throws RelocationException, XMLException;
+ throws RelocationException, XMLException, IOException;
}
import org.apache.archiva.repository.events.RepositoryListener;
import org.apache.archiva.xml.XMLException;
+import java.io.IOException;
import java.util.Collection;
/**
@Override
public String getFilePathWithVersion( String requestPath, ManagedRepositoryContent managedRepositoryContent )
- throws RelocationException, XMLException
+ throws RelocationException, XMLException, IOException
{
return null;
}
* under the License.
*/
+import org.apache.archiva.common.filelock.DefaultFileLockManager;
import org.apache.archiva.common.utils.VersionComparator;
import org.apache.archiva.common.utils.VersionUtil;
import org.apache.archiva.configuration.ArchivaConfiguration;
import org.apache.archiva.repository.RepositoryException;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import org.apache.archiva.repository.metadata.RepositoryMetadataWriter;
+import org.apache.archiva.repository.storage.FilesystemAsset;
+import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.xml.XMLException;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import javax.inject.Inject;
import javax.inject.Named;
+import java.io.BufferedWriter;
import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.nio.Buffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
{
// updating version metadata files
- Path versionMetaDataFileInSourceRepo =
- pathTranslator.toFile( Paths.get( sourceRepoPath ), artifactMetadata.getNamespace(),
+ FilesystemStorage fsStorage = new FilesystemStorage(Paths.get(sourceRepoPath), new DefaultFileLockManager());
+
+ StorageAsset versionMetaDataFileInSourceRepo =
+ pathTranslator.toFile( new FilesystemAsset(fsStorage, "", Paths.get(sourceRepoPath)), artifactMetadata.getNamespace(),
artifactMetadata.getProject(), artifactMetadata.getVersion(),
METADATA_FILENAME );
- if ( Files.exists(versionMetaDataFileInSourceRepo) )
+ if ( versionMetaDataFileInSourceRepo.exists() )
{//Pattern quote for windows path
String relativePathToVersionMetadataFile =
- versionMetaDataFileInSourceRepo.toAbsolutePath().toString().split( Pattern.quote( sourceRepoPath ) )[1];
+ versionMetaDataFileInSourceRepo.getPath().toString().split( Pattern.quote( sourceRepoPath ) )[1];
Path versionMetaDataFileInTargetRepo = Paths.get( targetRepoPath, relativePathToVersionMetadataFile );
if ( !Files.exists(versionMetaDataFileInTargetRepo) )
{
- copyFile( versionMetaDataFileInSourceRepo, versionMetaDataFileInTargetRepo );
+ copyFile( versionMetaDataFileInSourceRepo.getFilePath(), versionMetaDataFileInTargetRepo );
}
else
{
}
// updating project meta data file
- Path projectDirectoryInSourceRepo = versionMetaDataFileInSourceRepo.getParent().getParent();
- Path projectMetadataFileInSourceRepo = projectDirectoryInSourceRepo.resolve(METADATA_FILENAME );
+ StorageAsset projectDirectoryInSourceRepo = versionMetaDataFileInSourceRepo.getParent().getParent();
+ StorageAsset projectMetadataFileInSourceRepo = projectDirectoryInSourceRepo.resolve(METADATA_FILENAME );
- if ( Files.exists(projectMetadataFileInSourceRepo) )
+ if ( projectMetadataFileInSourceRepo.exists() )
{
String relativePathToProjectMetadataFile =
- projectMetadataFileInSourceRepo.toAbsolutePath().toString().split( Pattern.quote( sourceRepoPath ) )[1];
+ projectMetadataFileInSourceRepo.getPath().split( Pattern.quote( sourceRepoPath ) )[1];
Path projectMetadataFileInTargetRepo = Paths.get( targetRepoPath, relativePathToProjectMetadataFile );
if ( !Files.exists(projectMetadataFileInTargetRepo) )
{
- copyFile( projectMetadataFileInSourceRepo, projectMetadataFileInTargetRepo );
+ copyFile( projectMetadataFileInSourceRepo.getFilePath(), projectMetadataFileInTargetRepo );
}
else
{
projectMetadata.setReleasedVersion( latestVersion );
}
- RepositoryMetadataWriter.write( projectMetadata, projectMetaDataFileIntargetRepo );
+ try(BufferedWriter writer = Files.newBufferedWriter(projectMetaDataFileIntargetRepo)) {
+ RepositoryMetadataWriter.write( projectMetadata, writer );
+ } catch (IOException e) {
+ throw new RepositoryMetadataException(e);
+ }
}
}
versionMetadata.setLastUpdatedTimestamp( lastUpdatedTimestamp );
- RepositoryMetadataWriter.write( versionMetadata, versionMetaDataFileInTargetRepo );
+ try(BufferedWriter writer = Files.newBufferedWriter(versionMetaDataFileInTargetRepo) ) {
+ RepositoryMetadataWriter.write( versionMetadata, writer);
+ } catch (IOException e) {
+ throw new RepositoryMetadataException(e);
+ }
}
private ArchivaRepositoryMetadata getMetadata( Path metadataFile )
{
metadata = MavenMetadataReader.read( metadataFile );
}
- catch ( XMLException e )
+ catch (XMLException | IOException e )
{
throw new RepositoryMetadataException( e.getMessage(), e );
}
<artifactId>archiva-metadata-consumer</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.archiva</groupId>
+ <artifactId>archiva-storage-fs</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<dependency>
<groupId>org.apache.archiva</groupId>
<artifactId>test-repository</artifactId>