import org.apache.archiva.repository.ContentNotFoundException;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.metadata.audit.RepositoryListener;
+import org.apache.archiva.repository.storage.FsStorageUtil;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
+import org.apache.archiva.repository.storage.util.StorageUtil;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
final String artifactName = artifactFile.getName( );
- try
- {
-
- StorageUtil.recurse(parentDir, a -> {
- if (!a.isContainer() && a.getName().startsWith(artifactName)) deleteSilently(a);
- }, true, 3 );
- }
- catch ( IOException e )
- {
- log.error( "Purge of support files failed {}: {}", artifactFile, e.getMessage( ), e );
- }
+ StorageUtil.walk(parentDir, a -> {
+ if (!a.isContainer() && a.getName().startsWith(artifactName)) deleteSilently(a);
+ });
}
import org.apache.archiva.repository.metadata.base.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
import org.apache.archiva.repository.storage.FilesystemStorage;
+import org.apache.archiva.repository.storage.FsStorageUtil;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.scheduler.ArchivaTaskScheduler;
import org.apache.archiva.scheduler.repository.model.RepositoryTask;
import org.apache.commons.collections4.CollectionUtils;
try
{
- StorageUtil.moveAsset( temp, target, true , StandardCopyOption.REPLACE_EXISTING);
+ org.apache.archiva.repository.storage.util.StorageUtil.moveAsset( temp, target, true , StandardCopyOption.REPLACE_EXISTING);
}
catch ( IOException e )
{
log.error( "Move failed from {} to {}, trying copy.", temp, target );
try
{
- StorageUtil.copyAsset( temp, target, true );
+ FsStorageUtil.copyAsset( temp, target, true );
if (temp.exists()) {
temp.getStorage( ).removeAsset( temp );
}
import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
} catch (IOException e) {
log.warn("Index close failed");
}
- try {
- StorageUtil.deleteRecursively(context.getPath());
- } catch (IOException e) {
- throw new IndexUpdateFailedException("Could not delete index files");
- }
+ org.apache.archiva.repository.storage.util.StorageUtil.deleteRecursively(context.getPath());
});
try {
Repository repo = context.getRepository();
import org.apache.archiva.repository.Repository;
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang3.time.StopWatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
StorageAsset directory = temporaryGroupIndex.getDirectory();
if ( directory != null && directory.exists() )
{
- StorageUtil.deleteRecursively( directory );
+ org.apache.archiva.repository.storage.util.StorageUtil.deleteRecursively( directory );
}
}
}
</properties>
+ <dependencies>
+
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+
<build>
<plugins>
<plugin>
*
* @return The list of children. If there are no children and if the asset is not a container, a empty list will be returned.
*/
- List<StorageAsset> list();
+ List<? extends StorageAsset> list();
/**
* The size in bytes of the asset. If the asset does not have a size, -1 should be returned.
}
// Assets are returned in reverse order
- List<StorageAsset> getChildContainers( StorageAsset parent) {
- final List<StorageAsset> children = parent.list( );
+ List<? extends StorageAsset> getChildContainers( StorageAsset parent) {
+ final List<? extends StorageAsset> children = parent.list( );
final int len = children.size( );
return IntStream.range( 0, children.size( ) ).mapToObj( i ->
children.get(len - i - 1)).filter( StorageAsset::isContainer ).collect( Collectors.toList( ) );
}
// Assets are returned in reverse order
- List<StorageAsset> getChildFiles(StorageAsset parent) {
- final List<StorageAsset> children = parent.list( );
+ List<? extends StorageAsset> getChildFiles(StorageAsset parent) {
+ final List<? extends StorageAsset> children = parent.list( );
final int len = children.size( );
return IntStream.range( 0, children.size( ) ).mapToObj( i ->
children.get(len - i - 1)).filter( StorageAsset::isLeaf ).collect( Collectors.toList( ) );
* under the License.
*/
+import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.CopyOption;
+import java.nio.file.Files;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;
*/
public class StorageUtil
{
+
+ private static final Logger LOG = LoggerFactory.getLogger( StorageUtil.class );
+
+ private static final int DEFAULT_BUFFER_SIZE = 4096;
+
/**
* Walk the tree starting at the given asset. The consumer is called for each asset found.
* It runs a depth-first search where children are consumed before their parents.
return newAssetStream( start, false );
}
+ /**
+ * Deletes the given asset and all child assets recursively.
+ * IOExceptions during deletion are ignored.
+ *
+ * @param baseDir The base asset to remove.
+ *
+ */
+ public static final void deleteRecursively(StorageAsset baseDir) {
+ RepositoryStorage storage = baseDir.getStorage( );
+ walk( baseDir, a -> {
+ try {
+ storage.removeAsset(a);
+ } catch (IOException e) {
+ LOG.error( "Could not delete asset {}: {}", a.getPath( ), e.getMessage( ), e );
+ }
+ });
+ }
+
+ /**
+ * Deletes the given asset and all child assets recursively.
+ * @param baseDir The base asset to remove.
+ * @param stopOnError if <code>true</code> the traversal stops, if an exception is encountered
+ * @return returns <code>true</code>, if every item was removed. If an IOException was encountered during
+ * traversal it returns <code>false</code>
+ */
+ public static final boolean deleteRecursively(final StorageAsset baseDir, final boolean stopOnError) {
+ final RepositoryStorage storage = baseDir.getStorage( );
+ try(Stream<StorageAsset> stream = newAssetStream( baseDir ))
+ {
+ if ( stopOnError )
+ {
+ // Return true, if no exception occurred
+ // anyMatch is short-circuiting, that means it stops if the condition matches
+ return !stream.map( a -> {
+ try
+ {
+ storage.removeAsset( a );
+ // Returning false, if OK
+ return Boolean.FALSE;
+ }
+ catch ( IOException e )
+ {
+ LOG.error( "Could not delete asset {}: {}", a.getPath( ), e.getMessage( ), e );
+ // Returning true, if exception
+ return Boolean.TRUE;
+ }
+ } ).anyMatch( r -> r );
+ } else {
+ // Return true, if all removals were OK
+ // We want to consume all, so we use allMatch
+ return stream.map( a -> {
+ try
+ {
+ storage.removeAsset( a );
+ // Returning true, if OK
+ return Boolean.TRUE;
+ }
+ catch ( IOException e )
+ {
+ LOG.error( "Could not delete asset {}: {}", a.getPath( ), e.getMessage( ), e );
+ // Returning false, if exception
+ return Boolean.FALSE;
+ }
+ } ).allMatch( r -> r );
+ }
+ }
+ }
+
+ /**
+ * Moves a asset between different storage instances.
+ * If you know that source and asset are from the same storage instance, the move method of the storage
+ * instance may be faster.
+ *
+ * @param source The source asset
+ * @param target The target asset
+ * @param locked If true, a lock is used for the move operation.
+ * @param copyOptions Options for copying
+ * @throws IOException If the move fails
+ */
+ public static final void moveAsset(StorageAsset source, StorageAsset target, boolean locked, CopyOption... copyOptions) throws IOException
+ {
+ if (source.isFileBased() && target.isFileBased()) {
+ // Short cut for FS operations
+ // Move is atomic operation
+ if (!Files.exists(target.getFilePath().getParent())) {
+ Files.createDirectories(target.getFilePath().getParent());
+ }
+ Files.move( source.getFilePath(), target.getFilePath(), copyOptions );
+ } else {
+ try {
+ final RepositoryStorage sourceStorage = source.getStorage();
+ final RepositoryStorage targetStorage = target.getStorage();
+ sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
+ sourceStorage.removeAsset( source );
+ } catch (IOException e) {
+ throw e;
+ } catch (Throwable e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof IOException) {
+ throw (IOException)cause;
+ } else
+ {
+ throw new IOException( e );
+ }
+ }
+ }
+
+ }
+
+ public static final void wrapWriteFunction( ReadableByteChannel is, RepositoryStorage targetStorage, StorageAsset target, boolean locked) {
+ try {
+ targetStorage.writeDataToChannel( target, os -> copy(is, os), locked );
+ } catch (Exception e) {
+ throw new RuntimeException( e );
+ }
+ }
+
+ public static final void copy( final ReadableByteChannel is, final WritableByteChannel os ) {
+ if (is instanceof FileChannel ) {
+ copy( (FileChannel) is, os );
+ } else if (os instanceof FileChannel) {
+ copy(is, (FileChannel)os);
+ } else
+ {
+ try
+ {
+ ByteBuffer buffer = ByteBuffer.allocate( DEFAULT_BUFFER_SIZE );
+ while ( is.read( buffer ) != -1 )
+ {
+ buffer.flip( );
+ while ( buffer.hasRemaining( ) )
+ {
+ os.write( buffer );
+ }
+ buffer.clear( );
+ }
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+ }
+
+ public static final void copy( final FileChannel is, final WritableByteChannel os ) {
+ try
+ {
+ is.transferTo( 0, is.size( ), os );
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+
+ public static final void copy( final ReadableByteChannel is, final FileChannel os ) {
+ try
+ {
+ os.transferFrom( is, 0, Long.MAX_VALUE );
+ }
+ catch ( IOException e )
+ {
+ throw new RuntimeException( e );
+ }
+ }
+
+ /**
+ * Returns the extension of the name of a given asset. Extension is the substring after the last occurence of '.' in the
+ * string. If no '.' is found, the empty string is returned.
+ *
+ * @param asset The asset from which to return the extension string.
+ * @return The extension.
+ */
+ public static final String getExtension(StorageAsset asset) {
+ return StringUtils.substringAfterLast(asset.getName(),".");
+ }
}
public class MockAsset implements StorageAsset
{
- private StorageAsset parent;
+ private MockAsset parent;
private String path;
private String name;
- private LinkedHashMap<String, StorageAsset> children = new LinkedHashMap<>( );
+ private LinkedHashMap<String, MockAsset> children = new LinkedHashMap<>( );
private boolean container = false;
+ private RepositoryStorage storage;
+
+
+
+ private boolean throwException;
public MockAsset( String name ) {
this.name = name;
this.parent = parent;
this.path = (parent.hasParent()?parent.getPath( ):"") + "/" + name;
this.name = name;
+ this.storage = parent.getStorage( );
parent.registerChild( this );
}
- public void registerChild(StorageAsset child) {
+ public void registerChild(MockAsset child) {
children.putIfAbsent( child.getName(), child );
this.container = true;
}
+ public void unregisterChild(MockAsset child) {
+ children.remove( child.getName( ) );
+ }
+
+
+ public void setStorage(RepositoryStorage storage) {
+ this.storage = storage;
+ }
+
+ public boolean isThrowException( )
+ {
+ return throwException;
+ }
+
+ public void setThrowException( boolean throwException )
+ {
+ this.throwException = throwException;
+ }
+
@Override
public RepositoryStorage getStorage( )
{
- return null;
+ return storage;
}
@Override
}
@Override
- public List<StorageAsset> list( )
+ public List<MockAsset> list( )
{
return new ArrayList( children.values( ) );
}
}
@Override
- public StorageAsset getParent( )
+ public MockAsset getParent( )
{
return this.parent;
}
--- /dev/null
+package org.apache.archiva.repository.storage.mock;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.repository.storage.RepositoryStorage;
+import org.apache.archiva.repository.storage.StorageAsset;
+import org.apache.archiva.repository.storage.util.VisitStatus;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.CopyOption;
+import java.util.LinkedHashMap;
+import java.util.Optional;
+import java.util.function.Consumer;
+
+/**
+ * @author Martin Stockhammer <martin_s@apache.org>
+ */
+public class MockStorage implements RepositoryStorage
+{
+ public static final String ADD = "ADD";
+ public static final String REMOVE = "REMOVE";
+ private MockAsset root;
+ private LinkedHashMap<String, MockAsset> assets = new LinkedHashMap<>( );
+
+ private VisitStatus status = new VisitStatus( );
+
+ public MockStorage( MockAsset root )
+ {
+ this.root = root;
+ root.setStorage( this );
+ }
+
+ public MockStorage() {
+ this.root = new MockAsset( "" );
+ this.root.setStorage( this );
+ }
+
+ public VisitStatus getStatus() {
+ return status;
+ }
+
+ @Override
+ public URI getLocation( )
+ {
+ return null;
+ }
+
+ @Override
+ public void updateLocation( URI newLocation ) throws IOException
+ {
+
+ }
+
+ private String[] splitPath(String path) {
+ if (path.equals("/")) {
+ return new String[0];
+ } else
+ {
+ if (path.startsWith( "/" )) {
+ return path.substring( 1, path.length( ) ).split( "/" );
+ }
+ return path.split( "/" );
+ }
+ }
+
+ @Override
+ public StorageAsset getAsset( String path )
+ {
+ if (assets.containsKey( path )) {
+ return assets.get( path );
+ }
+ String[] pathArr = splitPath( path );
+ StorageAsset parent = root;
+ for (String pathElement : pathArr) {
+ Optional<? extends StorageAsset> next = parent.list( ).stream( ).filter( a -> a.getName( ).equals( pathElement ) ).findFirst( );
+ if (next.isPresent()) {
+ parent = next.get( );
+ } else {
+ MockAsset asset = new MockAsset( (MockAsset)parent, pathElement );
+ assets.put( asset.getPath( ), asset );
+ parent = asset;
+ }
+ }
+ return parent;
+ }
+
+ @Override
+ public void consumeData( StorageAsset asset, Consumer<InputStream> consumerFunction, boolean readLock ) throws IOException
+ {
+
+ }
+
+ @Override
+ public void consumeDataFromChannel( StorageAsset asset, Consumer<ReadableByteChannel> consumerFunction, boolean readLock ) throws IOException
+ {
+
+ }
+
+ @Override
+ public void writeData( StorageAsset asset, Consumer<OutputStream> consumerFunction, boolean writeLock ) throws IOException
+ {
+
+ }
+
+ @Override
+ public void writeDataToChannel( StorageAsset asset, Consumer<WritableByteChannel> consumerFunction, boolean writeLock ) throws IOException
+ {
+
+ }
+
+ @Override
+ public StorageAsset addAsset( String path, boolean container )
+ {
+ String[] pathArr = splitPath( path );
+ StorageAsset parent = root;
+ for (String pathElement : pathArr) {
+ Optional<? extends StorageAsset> next = parent.list( ).stream( ).filter( a -> a.getName( ).equals( pathElement ) ).findFirst( );
+ if (next.isPresent()) {
+ parent = next.get( );
+ } else {
+ MockAsset asset = new MockAsset( (MockAsset)parent, pathElement );
+ assets.put( asset.getPath( ), asset );
+ parent = asset;
+ }
+ }
+ status.add( ADD, parent );
+ return parent;
+ }
+
+ @Override
+ public void removeAsset( StorageAsset assetArg ) throws IOException
+ {
+ MockAsset asset = (MockAsset) assetArg;
+ if (asset.hasParent())
+ {
+ asset.getParent( ).unregisterChild( asset );
+ }
+ assets.remove( asset.getPath( ) );
+ status.add( REMOVE, asset );
+ if (asset.isThrowException()) {
+ throw new IOException( "Mocked IOException for " + asset.getPath( ) );
+ }
+ }
+
+ @Override
+ public StorageAsset moveAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
+ {
+ return null;
+ }
+
+ @Override
+ public void moveAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
+ {
+
+ }
+
+ @Override
+ public StorageAsset copyAsset( StorageAsset origin, String destination, CopyOption... copyOptions ) throws IOException
+ {
+ return null;
+ }
+
+ @Override
+ public void copyAsset( StorageAsset origin, StorageAsset destination, CopyOption... copyOptions ) throws IOException
+ {
+
+ }
+}
import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.storage.mock.MockAsset;
+import org.apache.archiva.repository.storage.mock.MockStorage;
import org.junit.jupiter.api.Test;
+import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
- private StorageAsset createTree() {
+ private MockAsset createTree() {
return createTree( LEVEL1, LEVEL2, LEVEL3 );
}
- private StorageAsset createTree(int... levelElements) {
+ private MockAsset createTree(int... levelElements) {
MockAsset root = new MockAsset( "" );
recurseSubTree( root, 0, levelElements );
return root;
int expected = LEVEL2 * LEVEL3 + LEVEL2 + 1;
assertEquals( expected, result.size( ) );
}
+
+
+ @Test
+ void testDelete() throws IOException
+ {
+ MockAsset root = createTree( );
+ MockStorage storage = new MockStorage( root );
+
+ StorageUtil.deleteRecursively( root );
+ int expected = LEVEL1 * LEVEL2 * LEVEL3 + LEVEL1 * LEVEL2 + LEVEL1 + 1;
+ assertEquals( expected, storage.getStatus( ).size( MockStorage.REMOVE ) );
+
+ }
+
+ @Test
+ void testDeleteWithException() throws IOException
+ {
+ MockAsset root = createTree( );
+ MockStorage storage = new MockStorage( root );
+ root.list( ).get( 1 ).list( ).get( 2 ).setThrowException( true );
+
+ StorageUtil.deleteRecursively( root );
+ int expected = LEVEL1 * LEVEL2 * LEVEL3 + LEVEL1 * LEVEL2 + LEVEL1 + 1;
+ assertEquals( expected, storage.getStatus( ).size( MockStorage.REMOVE ) );
+
+ }
+
+ @Test
+ void testDeleteWithExceptionFailFast() throws IOException
+ {
+ MockAsset root = createTree( );
+ MockStorage storage = new MockStorage( root );
+ root.list( ).get( 1 ).list( ).get( 2 ).setThrowException( true );
+
+ StorageUtil.deleteRecursively( root, true );
+ int expected = 113;
+ assertEquals( expected, storage.getStatus( ).size( MockStorage.REMOVE ) );
+
+ }
}
\ No newline at end of file
import org.apache.archiva.repository.storage.StorageAsset;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.function.Consumer;
/**
* @author Martin Stockhammer <martin_s@apache.org>
*/
-class VisitStatus
+public class VisitStatus
{
+
+ LinkedHashMap<String, LinkedList<StorageAsset>> applied = new LinkedHashMap<>( );
LinkedList<StorageAsset> visited = new LinkedList<>( );
- VisitStatus( )
+ public VisitStatus( )
{
}
visited.addLast( asset );
}
+ public void add(String type, StorageAsset asset) {
+ if (!applied.containsKey( type )) {
+ applied.put( type, new LinkedList<>( ) );
+ }
+ applied.get( type ).add( asset );
+ }
+
public StorageAsset getLast( )
{
return visited.getLast( );
return visited.size( );
}
+ public int size(String type) {
+ return applied.get( type ).size( );
+ }
+
}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing,
+ ~ software distributed under the License is distributed on an
+ ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ ~ KIND, either express or implied. See the License for the
+ ~ specific language governing permissions and limitations
+ ~ under the License.
+ -->
+
+
+<configuration status="error">
+ <appenders>
+ <Console name="console" target="SYSTEM_OUT">
+ <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
+ </Console>
+ </appenders>
+ <loggers>
+
+ <logger name="org.apache.archiva.repository" level="info"/>
+ <logger name="org.springframework" level="error"/>
+
+ <root level="info">
+ <appender-ref ref="console"/>
+ </root>
+ </loggers>
+</configuration>
+
+
--- /dev/null
+package org.apache.archiva.repository.storage;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.archiva.common.filelock.FileLockException;
+import org.apache.archiva.common.filelock.FileLockManager;
+import org.apache.archiva.common.filelock.Lock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.*;
+import java.util.HashSet;
+
+/**
+ *
+ * Utility class for assets. Allows to copy, move between different storage instances and
+ * recursively consume the tree.
+ *
+ * @author Martin Stockhammer <martin_s@apache.org>
+ */
+public class FsStorageUtil
+{
+ private static final Logger log = LoggerFactory.getLogger( FsStorageUtil.class);
+
+ /**
+ * Copies the source asset to the target. The assets may be from different RepositoryStorage instances.
+ * If you know that source and asset are from the same storage instance, the copy method of the storage
+ * instance may be faster.
+ *
+ * @param source The source asset
+ * @param target The target asset
+ * @param locked If true, a readlock is set on the source and a write lock is set on the target.
+ * @param copyOptions Copy options
+ * @throws IOException
+ */
+ public static final void copyAsset( final StorageAsset source,
+ final StorageAsset target,
+ boolean locked,
+ final CopyOption... copyOptions ) throws IOException
+ {
+ if (source.isFileBased() && target.isFileBased()) {
+ // Short cut for FS operations
+ final Path sourcePath = source.getFilePath();
+ final Path targetPath = target.getFilePath( );
+ if (locked) {
+ final FileLockManager lmSource = ((FilesystemStorage)source.getStorage()).getFileLockManager();
+ final FileLockManager lmTarget = ((FilesystemStorage)target.getStorage()).getFileLockManager();
+ Lock lockRead = null;
+ Lock lockWrite = null;
+ try {
+ lockRead = lmSource.readFileLock(sourcePath);
+ } catch (Exception e) {
+ log.error("Could not create read lock on {}", sourcePath);
+ throw new IOException(e);
+ }
+ try {
+ lockWrite = lmTarget.writeFileLock(targetPath);
+ } catch (Exception e) {
+ log.error("Could not create write lock on {}", targetPath);
+ throw new IOException(e);
+ }
+ try {
+ Files.copy(sourcePath, targetPath, copyOptions);
+ } finally {
+ if (lockRead!=null) {
+ try {
+ lmSource.release(lockRead);
+ } catch (FileLockException e) {
+ log.error("Error during lock release of read lock {}", lockRead.getFile());
+ }
+ }
+ if (lockWrite!=null) {
+ try {
+ lmTarget.release(lockWrite);
+ } catch (FileLockException e) {
+ log.error("Error during lock release of write lock {}", lockWrite.getFile());
+ }
+ }
+ }
+ } else
+ {
+ Files.copy( sourcePath, targetPath, copyOptions );
+ }
+ } else {
+ try {
+ final RepositoryStorage sourceStorage = source.getStorage();
+ final RepositoryStorage targetStorage = target.getStorage();
+ sourceStorage.consumeDataFromChannel( source, is -> org.apache.archiva.repository.storage.util.StorageUtil.wrapWriteFunction( is, targetStorage, target, locked ), locked);
+ } catch (IOException e) {
+ throw e;
+ } catch (Throwable e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof IOException) {
+ throw (IOException)cause;
+ } else
+ {
+ throw new IOException( e );
+ }
+ }
+ }
+ }
+
+
+ public static final void copyToLocalFile(StorageAsset asset, Path destination, CopyOption... copyOptions) throws IOException {
+ if (asset.isFileBased()) {
+ Files.copy(asset.getFilePath(), destination, copyOptions);
+ } else {
+ try {
+
+ HashSet<OpenOption> openOptions = new HashSet<>();
+ for (CopyOption option : copyOptions) {
+ if (option == StandardCopyOption.REPLACE_EXISTING) {
+ openOptions.add(StandardOpenOption.CREATE);
+ openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
+ openOptions.add(StandardOpenOption.WRITE);
+ } else {
+ openOptions.add(StandardOpenOption.WRITE);
+ openOptions.add(StandardOpenOption.CREATE_NEW);
+ }
+ }
+ asset.getStorage().consumeDataFromChannel(asset, channel -> {
+ try {
+ FileChannel.open(destination, openOptions).transferFrom(channel, 0, Long.MAX_VALUE);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }, false);
+ } catch (Throwable e) {
+ if (e.getCause() instanceof IOException) {
+ throw (IOException)e.getCause();
+ } else {
+ throw new IOException(e);
+ }
+ }
+ }
+ }
+
+ public static class PathInformation {
+ final Path path ;
+ final boolean tmpFile;
+
+ PathInformation(Path path, boolean tmpFile) {
+ this.path = path;
+ this.tmpFile = tmpFile;
+ }
+
+ public Path getPath() {
+ return path;
+ }
+
+ public boolean isTmpFile() {
+ return tmpFile;
+ }
+
+ }
+
+ public static final PathInformation getAssetDataAsPath(StorageAsset asset) throws IOException {
+ if (!asset.exists()) {
+ throw new IOException("Asset does not exist");
+ }
+ if (asset.isFileBased()) {
+ return new PathInformation(asset.getFilePath(), false);
+ } else {
+ Path tmpFile = Files.createTempFile(asset.getName(), org.apache.archiva.repository.storage.util.StorageUtil.getExtension(asset));
+ copyToLocalFile(asset, tmpFile, StandardCopyOption.REPLACE_EXISTING);
+ return new PathInformation(tmpFile, true);
+ }
+ }
+
+}
+++ /dev/null
-package org.apache.archiva.repository.storage;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.archiva.common.filelock.FileLockException;
-import org.apache.archiva.common.filelock.FileLockManager;
-import org.apache.archiva.common.filelock.FileLockTimeoutException;
-import org.apache.archiva.common.filelock.Lock;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.WritableByteChannel;
-import java.nio.file.*;
-import java.util.HashSet;
-import java.util.function.Consumer;
-
-/**
- *
- * Utility class for assets. Allows to copy, move between different storage instances and
- * recursively consume the tree.
- *
- * @author Martin Stockhammer <martin_s@apache.org>
- */
-public class StorageUtil
-{
- private static final int DEFAULT_BUFFER_SIZE = 4096;
- private static final Logger log = LoggerFactory.getLogger(StorageUtil.class);
-
- /**
- * Copies the source asset to the target. The assets may be from different RepositoryStorage instances.
- * If you know that source and asset are from the same storage instance, the copy method of the storage
- * instance may be faster.
- *
- * @param source The source asset
- * @param target The target asset
- * @param locked If true, a readlock is set on the source and a write lock is set on the target.
- * @param copyOptions Copy options
- * @throws IOException
- */
- public static final void copyAsset( final StorageAsset source,
- final StorageAsset target,
- boolean locked,
- final CopyOption... copyOptions ) throws IOException
- {
- if (source.isFileBased() && target.isFileBased()) {
- // Short cut for FS operations
- final Path sourcePath = source.getFilePath();
- final Path targetPath = target.getFilePath( );
- if (locked) {
- final FileLockManager lmSource = ((FilesystemStorage)source.getStorage()).getFileLockManager();
- final FileLockManager lmTarget = ((FilesystemStorage)target.getStorage()).getFileLockManager();
- Lock lockRead = null;
- Lock lockWrite = null;
- try {
- lockRead = lmSource.readFileLock(sourcePath);
- } catch (Exception e) {
- log.error("Could not create read lock on {}", sourcePath);
- throw new IOException(e);
- }
- try {
- lockWrite = lmTarget.writeFileLock(targetPath);
- } catch (Exception e) {
- log.error("Could not create write lock on {}", targetPath);
- throw new IOException(e);
- }
- try {
- Files.copy(sourcePath, targetPath, copyOptions);
- } finally {
- if (lockRead!=null) {
- try {
- lmSource.release(lockRead);
- } catch (FileLockException e) {
- log.error("Error during lock release of read lock {}", lockRead.getFile());
- }
- }
- if (lockWrite!=null) {
- try {
- lmTarget.release(lockWrite);
- } catch (FileLockException e) {
- log.error("Error during lock release of write lock {}", lockWrite.getFile());
- }
- }
- }
- } else
- {
- Files.copy( sourcePath, targetPath, copyOptions );
- }
- } else {
- try {
- final RepositoryStorage sourceStorage = source.getStorage();
- final RepositoryStorage targetStorage = target.getStorage();
- sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
- } catch (IOException e) {
- throw e;
- } catch (Throwable e) {
- Throwable cause = e.getCause();
- if (cause instanceof IOException) {
- throw (IOException)cause;
- } else
- {
- throw new IOException( e );
- }
- }
- }
- }
-
- /**
- * Moves a asset between different storage instances.
- * If you know that source and asset are from the same storage instance, the move method of the storage
- * instance may be faster.
- *
- * @param source The source asset
- * @param target The target asset
- * @param locked If true, a lock is used for the move operation.
- * @param copyOptions Options for copying
- * @throws IOException If the move fails
- */
- public static final void moveAsset(StorageAsset source, StorageAsset target, boolean locked, CopyOption... copyOptions) throws IOException
- {
- if (source.isFileBased() && target.isFileBased()) {
- // Short cut for FS operations
- // Move is atomic operation
- if (!Files.exists(target.getFilePath().getParent())) {
- Files.createDirectories(target.getFilePath().getParent());
- }
- Files.move( source.getFilePath(), target.getFilePath(), copyOptions );
- } else {
- try {
- final RepositoryStorage sourceStorage = source.getStorage();
- final RepositoryStorage targetStorage = target.getStorage();
- sourceStorage.consumeDataFromChannel( source, is -> wrapWriteFunction( is, targetStorage, target, locked ), locked);
- sourceStorage.removeAsset( source );
- } catch (IOException e) {
- throw e;
- } catch (Throwable e) {
- Throwable cause = e.getCause();
- if (cause instanceof IOException) {
- throw (IOException)cause;
- } else
- {
- throw new IOException( e );
- }
- }
- }
-
- }
-
- private static final void wrapWriteFunction(ReadableByteChannel is, RepositoryStorage targetStorage, StorageAsset target, boolean locked) {
- try {
- targetStorage.writeDataToChannel( target, os -> copy(is, os), locked );
- } catch (Exception e) {
- throw new RuntimeException( e );
- }
- }
-
-
- private static final void copy( final ReadableByteChannel is, final WritableByteChannel os ) {
- if (is instanceof FileChannel) {
- copy( (FileChannel) is, os );
- } else if (os instanceof FileChannel) {
- copy(is, (FileChannel)os);
- } else
- {
- try
- {
- ByteBuffer buffer = ByteBuffer.allocate( DEFAULT_BUFFER_SIZE );
- while ( is.read( buffer ) != -1 )
- {
- buffer.flip( );
- while ( buffer.hasRemaining( ) )
- {
- os.write( buffer );
- }
- buffer.clear( );
- }
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
- }
-
- private static final void copy( final FileChannel is, final WritableByteChannel os ) {
- try
- {
- is.transferTo( 0, is.size( ), os );
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
-
- private static final void copy( final ReadableByteChannel is, final FileChannel os ) {
- try
- {
- os.transferFrom( is, 0, Long.MAX_VALUE );
- }
- catch ( IOException e )
- {
- throw new RuntimeException( e );
- }
- }
-
- /**
- * Runs the consumer function recursively on each asset found starting at the base path
- * @param baseAsset The base path where to start search
- * @param consumer The consumer function applied to each found asset
- * @param depthFirst If true, the deepest elements are consumed first.
- * @param maxDepth The maximum depth to recurse into. 0 means, only the baseAsset is consumed, 1 the base asset and its children and so forth.
- */
- public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst, final int maxDepth) throws IOException {
- recurse(baseAsset, consumer, depthFirst, maxDepth, 0);
- }
-
- /**
- * Runs the consumer function recursively on each asset found starting at the base path. The function descends into
- * maximum depth.
- *
- * @param baseAsset The base path where to start search
- * @param consumer The consumer function applied to each found asset
- * @param depthFirst If true, the deepest elements are consumed first.
- */
- public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst) throws IOException {
- recurse(baseAsset, consumer, depthFirst, Integer.MAX_VALUE, 0);
- }
-
- /**
- * Runs the consumer function recursively on each asset found starting at the base path. It does not recurse with
- * depth first and stops only if there are no more children available.
- *
- * @param baseAsset The base path where to start search
- * @param consumer The consumer function applied to each found asset
- */
- public static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer) throws IOException {
- recurse(baseAsset, consumer, false, Integer.MAX_VALUE, 0);
- }
-
- private static final void recurse(final StorageAsset baseAsset, final Consumer<StorageAsset> consumer, final boolean depthFirst, final int maxDepth, final int currentDepth)
- throws IOException {
- if (!depthFirst) {
- consumer.accept(baseAsset);
- }
- if (currentDepth<maxDepth && baseAsset.isContainer()) {
- for(StorageAsset asset : baseAsset.list() ) {
- recurse(asset, consumer, depthFirst, maxDepth, currentDepth+1);
- }
- }
- if (depthFirst) {
- consumer.accept(baseAsset);
- }
- }
-
- /**
- * Deletes the given asset and all child assets recursively.
- * @param baseDir The base asset to remove.
- * @throws IOException
- */
- public static final void deleteRecursively(StorageAsset baseDir) throws IOException {
- recurse(baseDir, a -> {
- try {
- a.getStorage().removeAsset(a);
- } catch (IOException e) {
- log.error("Could not delete asset {}", a.getPath());
- }
- },true);
- }
-
- /**
- * Returns the extension of the name of a given asset. Extension is the substring after the last occurence of '.' in the
- * string. If no '.' is found, the empty string is returned.
- *
- * @param asset The asset from which to return the extension string.
- * @return The extension.
- */
- public static final String getExtension(StorageAsset asset) {
- return StringUtils.substringAfterLast(asset.getName(),".");
- }
-
- public static final void copyToLocalFile(StorageAsset asset, Path destination, CopyOption... copyOptions) throws IOException {
- if (asset.isFileBased()) {
- Files.copy(asset.getFilePath(), destination, copyOptions);
- } else {
- try {
-
- HashSet<OpenOption> openOptions = new HashSet<>();
- for (CopyOption option : copyOptions) {
- if (option == StandardCopyOption.REPLACE_EXISTING) {
- openOptions.add(StandardOpenOption.CREATE);
- openOptions.add(StandardOpenOption.TRUNCATE_EXISTING);
- openOptions.add(StandardOpenOption.WRITE);
- } else {
- openOptions.add(StandardOpenOption.WRITE);
- openOptions.add(StandardOpenOption.CREATE_NEW);
- }
- }
- asset.getStorage().consumeDataFromChannel(asset, channel -> {
- try {
- FileChannel.open(destination, openOptions).transferFrom(channel, 0, Long.MAX_VALUE);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }, false);
- } catch (Throwable e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException)e.getCause();
- } else {
- throw new IOException(e);
- }
- }
- }
- }
-
- public static class PathInformation {
- final Path path ;
- final boolean tmpFile;
-
- PathInformation(Path path, boolean tmpFile) {
- this.path = path;
- this.tmpFile = tmpFile;
- }
-
- public Path getPath() {
- return path;
- }
-
- public boolean isTmpFile() {
- return tmpFile;
- }
-
- }
-
- public static final PathInformation getAssetDataAsPath(StorageAsset asset) throws IOException {
- if (!asset.exists()) {
- throw new IOException("Asset does not exist");
- }
- if (asset.isFileBased()) {
- return new PathInformation(asset.getFilePath(), false);
- } else {
- Path tmpFile = Files.createTempFile(asset.getName(), getExtension(asset));
- copyToLocalFile(asset, tmpFile, StandardCopyOption.REPLACE_EXISTING);
- return new PathInformation(tmpFile, true);
- }
- }
-
-}
import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
} catch (IOException e) {
log.warn("Index close failed");
}
- try {
- StorageUtil.deleteRecursively(context.getPath());
- } catch (IOException e) {
- throw new IndexUpdateFailedException("Could not delete index files");
- }
+ org.apache.archiva.repository.storage.util.StorageUtil.deleteRecursively(context.getPath());
});
try {
Repository repo = context.getRepository();
// First gather up the versions found as artifacts in the managed repository.
- try (Stream<StorageAsset> stream = artifactDir.list().stream() ) {
+ try (Stream<? extends StorageAsset> stream = artifactDir.list().stream() ) {
return stream.filter(asset -> !asset.isContainer()).map(path -> {
try {
ArtifactReference artifact = toArtifactReference(path.getPath());
// First gather up the versions found as artifacts in the managed repository.
- try (Stream<StorageAsset> stream = repoDir.list().stream() ) {
+ try (Stream<? extends StorageAsset> stream = repoDir.list().stream() ) {
return stream.filter(
asset -> !asset.isContainer())
.map(path -> {
import org.apache.archiva.repository.storage.StorageAsset;
import org.apache.archiva.repository.features.IndexCreationFeature;
import org.apache.archiva.repository.features.RemoteIndexFeature;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.index.ArtifactContext;
import org.apache.maven.index.ArtifactContextProducer;
} catch (IOException e) {
log.warn("Index close failed");
}
- try {
- StorageUtil.deleteRecursively(context.getPath());
- } catch (IOException e) {
- throw new IndexUpdateFailedException("Could not delete index files");
- }
+ org.apache.archiva.repository.storage.util.StorageUtil.deleteRecursively(context.getPath());
});
try {
Repository repo = context.getRepository();
import org.apache.archiva.common.utils.VersionComparator;
import org.apache.archiva.common.utils.VersionUtil;
import org.apache.archiva.dependency.tree.maven2.DependencyTreeBuilder;
-import org.apache.archiva.maven2.metadata.MavenMetadataReader;
import org.apache.archiva.maven2.model.Artifact;
import org.apache.archiva.maven2.model.TreeEntry;
import org.apache.archiva.metadata.generic.GenericMetadataFacet;
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.metadata.MetadataReader;
import org.apache.archiva.repository.metadata.base.MetadataTools;
+import org.apache.archiva.repository.storage.FsStorageUtil;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.rest.api.model.*;
import org.apache.archiva.rest.api.services.ArchivaRestServiceException;
import org.apache.archiva.rest.api.services.BrowseService;
import org.apache.archiva.rest.services.utils.ArtifactContentEntryComparator;
import org.apache.archiva.security.ArchivaSecurityException;
-import org.apache.archiva.xml.XMLException;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
if ( StringUtils.isNotBlank( path ) )
{
// zip entry of the path -> path must a real file entry of the archive
- StorageUtil.PathInformation pathInfo = StorageUtil.getAssetDataAsPath(file);
+ FsStorageUtil.PathInformation pathInfo = FsStorageUtil.getAssetDataAsPath(file);
JarFile jarFile = new JarFile( pathInfo.getPath().toFile());
ZipEntry zipEntry = jarFile.getEntry( path );
try (InputStream inputStream = jarFile.getInputStream( zipEntry ))
filterDepth++;
}
- StorageUtil.PathInformation pathInfo = StorageUtil.getAssetDataAsPath(file);
+ FsStorageUtil.PathInformation pathInfo = FsStorageUtil.getAssetDataAsPath(file);
JarFile jarFile = new JarFile(pathInfo.getPath().toFile());
try
{
import org.apache.archiva.repository.RepositoryNotFoundException;
import org.apache.archiva.repository.RepositoryRegistry;
import org.apache.archiva.repository.RepositoryType;
+import org.apache.archiva.repository.storage.FsStorageUtil;
import org.apache.archiva.repository.storage.RepositoryStorage;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
import org.apache.archiva.metadata.audit.RepositoryListener;
import org.apache.archiva.repository.metadata.base.MetadataTools;
import org.apache.archiva.repository.metadata.RepositoryMetadataException;
throws IOException
{
- StorageUtil.copyAsset( sourceFile, targetPath, true );
+ FsStorageUtil.copyAsset( sourceFile, targetPath, true );
if ( fixChecksums )
{
fixChecksums( targetPath );
import org.apache.archiva.model.ArtifactReference;
import org.apache.archiva.repository.ManagedRepositoryContent;
import org.apache.archiva.repository.storage.StorageAsset;
-import org.apache.archiva.repository.storage.StorageUtil;
-import org.apache.commons.io.FilenameUtils;
+import org.apache.archiva.repository.storage.util.StorageUtil;
-import java.nio.file.Path;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
SortedMap<String, StorageAsset> uniqueChildFiles = new TreeMap<>();
for ( StorageAsset resource : repositoryAssets )
{
- List<StorageAsset> files = resource.list();
+ List<? extends StorageAsset> files = resource.list();
for ( StorageAsset file : files )
{
// the first entry wins