Browse Source

Merge changes I39bfefee,I47795987,I70d120fb,I58cc5e01,I96bee7b9

* changes:
  Enable configuration of non-standard pack settings
  Pass PackConfig down to PackWriter when packing
  Simplify UploadPack use of options during writing
  Move PackWriter configuration to PackConfig
  Allow PackWriter callers to manage the thread pool
tags/v0.9.1
Shawn O. Pearce 14 years ago
parent
commit
8e9cc826e9

+ 3
- 0
org.eclipse.jgit.pgm/resources/org/eclipse/jgit/pgm/CLIText.properties View File

@@ -14,6 +14,7 @@ branchCreatedFrom =branch: Created from {0}
branchIsNotAnAncestorOfYourCurrentHEAD=The branch '{0}' is not an ancestor of your current HEAD.\nIf you are sure you want to delete it, run 'jgit branch -D {0}'.
branchNotFound=branch '{0}' not found.
cacheTreePathInfo="{0}": {1} entries, {2} children
configFileNotFound=configuration file {0} not found
cannotBeRenamed={0} cannot be renamed
cannotChekoutNoHeadsAdvertisedByRemote=cannot checkout; no HEAD advertised by remote
cannotCreateCommand=Cannot create command {0}
@@ -61,6 +62,7 @@ metaVar_bucket=BUCKET
metaVar_command=command
metaVar_commitOrTag=COMMIT|TAG
metaVar_commitish=commit-ish
metaVar_configFile=FILE
metaVar_connProp=conn.prop
metaVar_directory=DIRECTORY
metaVar_file=FILE
@@ -138,6 +140,7 @@ usage_approveDestructionOfRepository=approve destruction of repository
usage_beMoreVerbose=be more verbose
usage_beVerbose=be verbose
usage_cloneRepositoryIntoNewDir=Clone a repository into a new directory
usage_configFile=configuration file
usage_configureTheServiceInDaemonServicename=configure the service in daemon.servicename
usage_deleteBranchEvenIfNotMerged=delete branch (even if not merged)
usage_deleteFullyMergedBranch=delete fully merged branch

+ 1
- 0
org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/CLIText.java View File

@@ -67,6 +67,7 @@ public class CLIText extends TranslationBundle {
/***/ public String branchIsNotAnAncestorOfYourCurrentHEAD;
/***/ public String branchNotFound;
/***/ public String cacheTreePathInfo;
/***/ public String configFileNotFound;
/***/ public String cannotBeRenamed;
/***/ public String cannotChekoutNoHeadsAdvertisedByRemote;
/***/ public String cannotCreateCommand;

+ 36
- 1
org.eclipse.jgit.pgm/src/org/eclipse/jgit/pgm/Daemon.java View File

@@ -48,13 +48,22 @@ import java.net.InetSocketAddress;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;

import org.eclipse.jgit.storage.file.FileBasedConfig;
import org.eclipse.jgit.storage.file.WindowCache;
import org.eclipse.jgit.storage.file.WindowCacheConfig;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.transport.DaemonService;
import org.eclipse.jgit.util.FS;
import org.kohsuke.args4j.Argument;
import org.kohsuke.args4j.Option;
import org.eclipse.jgit.transport.DaemonService;

@Command(common = true, usage = "usage_exportRepositoriesOverGit")
class Daemon extends TextBuiltin {
@Option(name = "--config-file", metaVar = "metaVar_configFile", usage = "usage_configFile")
File configFile;

@Option(name = "--port", metaVar = "metaVar_port", usage = "usage_portNumberToListenOn")
int port = org.eclipse.jgit.transport.Daemon.DEFAULT_PORT;

@@ -89,12 +98,38 @@ class Daemon extends TextBuiltin {

@Override
protected void run() throws Exception {
PackConfig packConfig = new PackConfig();

if (configFile != null) {
if (!configFile.exists()) {
throw die(MessageFormat.format(
CLIText.get().configFileNotFound, //
configFile.getAbsolutePath()));
}

FileBasedConfig cfg = new FileBasedConfig(configFile, FS.DETECTED);
cfg.load();

WindowCacheConfig wcc = new WindowCacheConfig();
wcc.fromConfig(cfg);
WindowCache.reconfigure(wcc);

packConfig.fromConfig(cfg);
}

int threads = packConfig.getThreads();
if (threads <= 0)
threads = Runtime.getRuntime().availableProcessors();
if (1 < threads)
packConfig.setExecutor(Executors.newFixedThreadPool(threads));

final org.eclipse.jgit.transport.Daemon d;

d = new org.eclipse.jgit.transport.Daemon(
host != null ? new InetSocketAddress(host, port)
: new InetSocketAddress(port));
d.setExportAll(exportAll);
d.setPackConfig(packConfig);
if (0 <= timeout)
d.setTimeout(timeout);


+ 31
- 15
org.eclipse.jgit.test/tst/org/eclipse/jgit/storage/file/PackWriterTest.java View File

@@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.TextProgressMonitor;
import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.file.PackIndex.MutableEntry;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter;
import org.eclipse.jgit.transport.IndexPack;
import org.eclipse.jgit.util.JGitTestUtil;
@@ -78,6 +79,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
private static final List<RevObject> EMPTY_LIST_REVS = Collections
.<RevObject> emptyList();

private PackConfig config;

private PackWriter writer;

private ByteArrayOutputStream os;
@@ -96,16 +99,23 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
packBase = new File(trash, "tmp_pack");
packFile = new File(trash, "tmp_pack.pack");
indexFile = new File(trash, "tmp_pack.idx");
writer = new PackWriter(db);
config = new PackConfig(db);
}

public void tearDown() throws Exception {
if (writer != null)
writer.release();
super.tearDown();
}

/**
* Test constructor for exceptions, default settings, initialization.
*/
public void testContructor() {
writer = new PackWriter(config, db.newObjectReader());
assertEquals(false, writer.isDeltaBaseAsOffset());
assertEquals(true, writer.isReuseDeltas());
assertEquals(true, writer.isReuseObjects());
assertEquals(true, config.isReuseDeltas());
assertEquals(true, config.isReuseObjects());
assertEquals(0, writer.getObjectsNumber());
}

@@ -113,13 +123,17 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* Change default settings and verify them.
*/
public void testModifySettings() {
config.setReuseDeltas(false);
config.setReuseObjects(false);
config.setDeltaBaseAsOffset(false);
assertEquals(false, config.isReuseDeltas());
assertEquals(false, config.isReuseObjects());
assertEquals(false, config.isDeltaBaseAsOffset());

writer = new PackWriter(config, db.newObjectReader());
writer.setDeltaBaseAsOffset(true);
writer.setReuseDeltas(false);
writer.setReuseObjects(false);

assertEquals(true, writer.isDeltaBaseAsOffset());
assertEquals(false, writer.isReuseDeltas());
assertEquals(false, writer.isReuseObjects());
assertEquals(false, config.isDeltaBaseAsOffset());
}

/**
@@ -188,7 +202,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException
*/
public void testWritePack1() throws IOException {
writer.setReuseDeltas(false);
config.setReuseDeltas(false);
writeVerifyPack1();
}

@@ -199,8 +213,8 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException
*/
public void testWritePack1NoObjectReuse() throws IOException {
writer.setReuseDeltas(false);
writer.setReuseObjects(false);
config.setReuseDeltas(false);
config.setReuseObjects(false);
writeVerifyPack1();
}

@@ -231,7 +245,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
* @throws IOException
*/
public void testWritePack2DeltasReuseOffsets() throws IOException {
writer.setDeltaBaseAsOffset(true);
config.setDeltaBaseAsOffset(true);
writeVerifyPack2(true);
}

@@ -265,7 +279,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
*
*/
public void testWritePack3() throws MissingObjectException, IOException {
writer.setReuseDeltas(false);
config.setReuseDeltas(false);
final ObjectId forcedOrder[] = new ObjectId[] {
ObjectId.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"),
ObjectId.fromString("c59759f143fb1fe21c197981df75a7ee00290799"),
@@ -363,7 +377,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
}

public void testWriteIndex() throws Exception {
writer.setIndexVersion(2);
config.setIndexVersion(2);
writeVerifyPack4(false);

// Validate that IndexPack came up with the right CRC32 value.
@@ -419,7 +433,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
}

private void writeVerifyPack2(boolean deltaReuse) throws IOException {
writer.setReuseDeltas(deltaReuse);
config.setReuseDeltas(deltaReuse);
final LinkedList<ObjectId> interestings = new LinkedList<ObjectId>();
interestings.add(ObjectId
.fromString("82c6b885ff600be425b4ea96dee75dca255b69e7"));
@@ -482,6 +496,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
final boolean ignoreMissingUninteresting)
throws MissingObjectException, IOException {
NullProgressMonitor m = NullProgressMonitor.INSTANCE;
writer = new PackWriter(config, db.newObjectReader());
writer.setThin(thin);
writer.setIgnoreMissingUninteresting(ignoreMissingUninteresting);
writer.preparePack(m, interestings, uninterestings);
@@ -493,6 +508,7 @@ public class PackWriterTest extends SampleDataRepositoryTestCase {
private void createVerifyOpenPack(final Iterator<RevObject> objectSource)
throws MissingObjectException, IOException {
NullProgressMonitor m = NullProgressMonitor.INSTANCE;
writer = new PackWriter(config, db.newObjectReader());
writer.preparePack(objectSource);
writer.writePack(m, m, os);
writer.release();

+ 15
- 0
org.eclipse.jgit/src/org/eclipse/jgit/lib/Config.java View File

@@ -216,6 +216,21 @@ public class Config {
, section, name));
}

/**
* Obtain an integer value from the configuration.
*
* @param section
* section the key is grouped within.
* @param name
* name of the key to get.
* @param defaultValue
* default value to return if no value was present.
* @return an integer value from the configuration, or defaultValue.
*/
public long getLong(String section, String name, long defaultValue) {
return getLong(section, null, name, defaultValue);
}

/**
* Obtain an integer value from the configuration.
*

+ 3
- 3
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaCache.java View File

@@ -55,9 +55,9 @@ class DeltaCache {

private long used;

DeltaCache(PackWriter pw) {
size = pw.getDeltaCacheSize();
entryLimit = pw.getDeltaCacheLimit();
DeltaCache(PackConfig pc) {
size = pc.getDeltaCacheSize();
entryLimit = pc.getDeltaCacheLimit();
queue = new ReferenceQueue<byte[]>();
}


+ 88
- 0
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaTask.java View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2010, Google Inc.
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
* under the terms of the Eclipse Distribution License v1.0 which
* accompanies this distribution, is reproduced below, and is
* available at http://www.eclipse.org/org/documents/edl-v10.php
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* - Neither the name of the Eclipse Foundation, Inc. nor the
* names of its contributors may be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

package org.eclipse.jgit.storage.pack;

import java.util.concurrent.Callable;

import org.eclipse.jgit.lib.ObjectReader;
import org.eclipse.jgit.lib.ProgressMonitor;

final class DeltaTask implements Callable<Object> {
private final PackConfig config;

private final ObjectReader templateReader;

private final DeltaCache dc;

private final ProgressMonitor pm;

private final int batchSize;

private final int start;

private final ObjectToPack[] list;

DeltaTask(PackConfig config, ObjectReader reader, DeltaCache dc,
ProgressMonitor pm, int batchSize, int start, ObjectToPack[] list) {
this.config = config;
this.templateReader = reader;
this.dc = dc;
this.pm = pm;
this.batchSize = batchSize;
this.start = start;
this.list = list;
}

public Object call() throws Exception {
final ObjectReader or = templateReader.newReader();
try {
DeltaWindow dw;
dw = new DeltaWindow(config, dc, or);
dw.search(pm, list, start, batchSize);
} finally {
or.release();
}
return null;
}
}

+ 8
- 8
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/DeltaWindow.java View File

@@ -60,7 +60,7 @@ class DeltaWindow {

private static final int NEXT_SRC = 1;

private final PackWriter writer;
private final PackConfig config;

private final DeltaCache deltaCache;

@@ -101,8 +101,8 @@ class DeltaWindow {
/** Used to compress cached deltas. */
private Deflater deflater;

DeltaWindow(PackWriter pw, DeltaCache dc, ObjectReader or) {
writer = pw;
DeltaWindow(PackConfig pc, DeltaCache dc, ObjectReader or) {
config = pc;
deltaCache = dc;
reader = or;

@@ -117,12 +117,12 @@ class DeltaWindow {
// PackWriter has a minimum of 2 for the window size, but then
// users might complain that JGit is creating a bigger pack file.
//
window = new DeltaWindowEntry[pw.getDeltaSearchWindowSize() + 1];
window = new DeltaWindowEntry[config.getDeltaSearchWindowSize() + 1];
for (int i = 0; i < window.length; i++)
window[i] = new DeltaWindowEntry();

maxMemory = pw.getDeltaSearchMemoryLimit();
maxDepth = pw.getMaxDeltaDepth();
maxMemory = config.getDeltaSearchMemoryLimit();
maxDepth = config.getMaxDeltaDepth();
}

void search(ProgressMonitor monitor, ObjectToPack[] toSearch, int off,
@@ -442,7 +442,7 @@ class DeltaWindow {
IncorrectObjectTypeException, IOException, LargeObjectException {
byte[] buf = ent.buffer;
if (buf == null) {
buf = writer.buffer(reader, ent.object);
buf = PackWriter.buffer(config, reader, ent.object);
if (0 < maxMemory)
loaded += buf.length;
ent.buffer = buf;
@@ -452,7 +452,7 @@ class DeltaWindow {

private Deflater deflater() {
if (deflater == null)
deflater = new Deflater(writer.getCompressionLevel());
deflater = new Deflater(config.getCompressionLevel());
else
deflater.reset();
return deflater;

+ 561
- 33
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackConfig.java View File

@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010, Google Inc.
* Copyright (C) 2008-2010, Google Inc.
* Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
* and other copyright owners as documented in the project's IP log.
*
* This program and the accompanying materials are made available
@@ -43,51 +44,578 @@

package org.eclipse.jgit.storage.pack;

import static java.util.zip.Deflater.DEFAULT_COMPRESSION;
import java.util.concurrent.Executor;
import java.util.zip.Deflater;

import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.Config.SectionParser;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.storage.file.PackIndexWriter;

/**
* Configuration used by a {@link PackWriter} when constructing the stream.
*
* A configuration may be modified once created, but should not be modified
* while it is being used by a PackWriter. If a configuration is not modified it
* is safe to share the same configuration instance between multiple concurrent
* threads executing different PackWriters.
*/
public class PackConfig {
/**
* Default value of deltas reuse option: {@value}
*
* @see #setReuseDeltas(boolean)
*/
public static final boolean DEFAULT_REUSE_DELTAS = true;

/**
* Default value of objects reuse option: {@value}
*
* @see #setReuseObjects(boolean)
*/
public static final boolean DEFAULT_REUSE_OBJECTS = true;

/**
* Default value of delta compress option: {@value}
*
* @see #setDeltaCompress(boolean)
*/
public static final boolean DEFAULT_DELTA_COMPRESS = true;

/**
* Default value of delta base as offset option: {@value}
*
* @see #setDeltaBaseAsOffset(boolean)
*/
public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false;

/**
* Default value of maximum delta chain depth: {@value}
*
* @see #setMaxDeltaDepth(int)
*/
public static final int DEFAULT_MAX_DELTA_DEPTH = 50;

/**
* Default window size during packing: {@value}
*
* @see #setDeltaSearchWindowSize(int)
*/
public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10;

/**
* Default big file threshold: {@value}
*
* @see #setBigFileThreshold(long)
*/
public static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024;

/**
* Default delta cache size: {@value}
*
* @see #setDeltaCacheSize(long)
*/
public static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024;

/**
* Default delta cache limit: {@value}
*
* @see #setDeltaCacheLimit(int)
*/
public static final int DEFAULT_DELTA_CACHE_LIMIT = 100;

/**
* Default index version: {@value}
*
* @see #setIndexVersion(int)
*/
public static final int DEFAULT_INDEX_VERSION = 2;


private int compressionLevel = Deflater.DEFAULT_COMPRESSION;

private boolean reuseDeltas = DEFAULT_REUSE_DELTAS;

private boolean reuseObjects = DEFAULT_REUSE_OBJECTS;

private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET;

private boolean deltaCompress = DEFAULT_DELTA_COMPRESS;

private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH;

private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE;

private long deltaSearchMemoryLimit;

private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE;

private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT;

private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD;

private int threads;

private Executor executor;

private int indexVersion = DEFAULT_INDEX_VERSION;


/** Create a default configuration. */
public PackConfig() {
// Fields are initialized to defaults.
}

/**
* Create a configuration honoring the repository's settings.
*
* @param db
* the repository to read settings from. The repository is not
* retained by the new configuration, instead its settings are
* copied during the constructor.
*/
public PackConfig(Repository db) {
fromConfig(db.getConfig());
}

/**
* Create a configuration honoring settings in a {@link Config}.
*
* @param cfg
* the source to read settings from. The source is not retained
* by the new configuration, instead its settings are copied
* during the constructor.
*/
public PackConfig(Config cfg) {
fromConfig(cfg);
}

/**
* Check whether to reuse deltas existing in repository.
*
* Default setting: {@value #DEFAULT_REUSE_DELTAS}
*
* @return true if object is configured to reuse deltas; false otherwise.
*/
public boolean isReuseDeltas() {
return reuseDeltas;
}

/**
* Set reuse deltas configuration option for the writer.
*
* When enabled, writer will search for delta representation of object in
* repository and use it if possible. Normally, only deltas with base to
* another object existing in set of objects to pack will be used. The
* exception however is thin-packs where the base object may exist on the
* other side.
*
* When raw delta data is directly copied from a pack file, its checksum is
* computed to verify the data is not corrupt.
*
* Default setting: {@value #DEFAULT_REUSE_DELTAS}
*
* @param reuseDeltas
* boolean indicating whether or not try to reuse deltas.
*/
public void setReuseDeltas(boolean reuseDeltas) {
this.reuseDeltas = reuseDeltas;
}

/**
* Checks whether to reuse existing objects representation in repository.
*
* Default setting: {@value #DEFAULT_REUSE_OBJECTS}
*
* @return true if writer is configured to reuse objects representation from
* pack; false otherwise.
*/
public boolean isReuseObjects() {
return reuseObjects;
}

/**
* Set reuse objects configuration option for the writer.
*
* If enabled, writer searches for compressed representation in a pack file.
* If possible, compressed data is directly copied from such a pack file.
* Data checksum is verified.
*
* Default setting: {@value #DEFAULT_REUSE_OBJECTS}
*
* @param reuseObjects
* boolean indicating whether or not writer should reuse existing
* objects representation.
*/
public void setReuseObjects(boolean reuseObjects) {
this.reuseObjects = reuseObjects;
}

/**
* True if writer can use offsets to point to a delta base.
*
* If true the writer may choose to use an offset to point to a delta base
* in the same pack, this is a newer style of reference that saves space.
* False if the writer has to use the older (and more compatible style) of
* storing the full ObjectId of the delta base.
*
* Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @return true if delta base is stored as an offset; false if it is stored
* as an ObjectId.
*/
public boolean isDeltaBaseAsOffset() {
return deltaBaseAsOffset;
}

/**
* Set writer delta base format.
*
* Delta base can be written as an offset in a pack file (new approach
* reducing file size) or as an object id (legacy approach, compatible with
* old readers).
*
* Default setting: {@value #DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @param deltaBaseAsOffset
* boolean indicating whether delta base can be stored as an
* offset.
*/
public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
this.deltaBaseAsOffset = deltaBaseAsOffset;
}

/**
* Check whether the writer will create new deltas on the fly.
*
* Default setting: {@value #DEFAULT_DELTA_COMPRESS}
*
* @return true if the writer will create a new delta when either
* {@link #isReuseDeltas()} is false, or no suitable delta is
* available for reuse.
*/
public boolean isDeltaCompress() {
return deltaCompress;
}

/**
* Set whether or not the writer will create new deltas on the fly.
*
* Default setting: {@value #DEFAULT_DELTA_COMPRESS}
*
* @param deltaCompress
* true to create deltas when {@link #isReuseDeltas()} is false,
* or when a suitable delta isn't available for reuse. Set to
* false to write whole objects instead.
*/
public void setDeltaCompress(boolean deltaCompress) {
this.deltaCompress = deltaCompress;
}

/**
* Get maximum depth of delta chain set up for the writer.
*
* Generated chains are not longer than this value.
*
* Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH}
*
* @return maximum delta chain depth.
*/
public int getMaxDeltaDepth() {
return maxDeltaDepth;
}

/**
* Set up maximum depth of delta chain for the writer.
*
* Generated chains are not longer than this value. Too low value causes low
* compression level, while too big makes unpacking (reading) longer.
*
* Default setting: {@value #DEFAULT_MAX_DELTA_DEPTH}
*
* @param maxDeltaDepth
* maximum delta chain depth.
*/
public void setMaxDeltaDepth(int maxDeltaDepth) {
this.maxDeltaDepth = maxDeltaDepth;
}

/**
* Get the number of objects to try when looking for a delta base.
*
* This limit is per thread, if 4 threads are used the actual memory used
* will be 4 times this value.
*
* Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
*
* @return the object count to be searched.
*/
public int getDeltaSearchWindowSize() {
return deltaSearchWindowSize;
}

/**
* Set the number of objects considered when searching for a delta base.
*
* Default setting: {@value #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
*
* @param objectCount
* number of objects to search at once. Must be at least 2.
*/
public void setDeltaSearchWindowSize(int objectCount) {
if (objectCount <= 2)
setDeltaCompress(false);
else
deltaSearchWindowSize = objectCount;
}

/**
* Get maximum number of bytes to put into the delta search window.
*
* Default setting is 0, for an unlimited amount of memory usage. Actual
* memory used is the lower limit of either this setting, or the sum of
* space used by at most {@link #getDeltaSearchWindowSize()} objects.
*
* This limit is per thread, if 4 threads are used the actual memory limit
* will be 4 times this value.
*
* @return the memory limit.
*/
public long getDeltaSearchMemoryLimit() {
return deltaSearchMemoryLimit;
}

/**
* Set the maximum number of bytes to put into the delta search window.
*
* Default setting is 0, for an unlimited amount of memory usage. If the
* memory limit is reached before {@link #getDeltaSearchWindowSize()} the
* window size is temporarily lowered.
*
* @param memoryLimit
* Maximum number of bytes to load at once, 0 for unlimited.
*/
public void setDeltaSearchMemoryLimit(long memoryLimit) {
deltaSearchMemoryLimit = memoryLimit;
}

/**
* Get the size of the in-memory delta cache.
*
* This limit is for the entire writer, even if multiple threads are used.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE}
*
* @return maximum number of bytes worth of delta data to cache in memory.
* If 0 the cache is infinite in size (up to the JVM heap limit
* anyway). A very tiny size such as 1 indicates the cache is
* effectively disabled.
*/
public long getDeltaCacheSize() {
return deltaCacheSize;
}

class PackConfig {
/** Key for {@link Config#get(SectionParser)}. */
static final Config.SectionParser<PackConfig> KEY = new SectionParser<PackConfig>() {
public PackConfig parse(final Config cfg) {
return new PackConfig(cfg);
}
};
/**
* Set the maximum number of bytes of delta data to cache.
*
* During delta search, up to this many bytes worth of small or hard to
* compute deltas will be stored in memory. This cache speeds up writing by
* allowing the cached entry to simply be dumped to the output stream.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_SIZE}
*
* @param size
* number of bytes to cache. Set to 0 to enable an infinite
* cache, set to 1 (an impossible size for any delta) to disable
* the cache.
*/
public void setDeltaCacheSize(long size) {
deltaCacheSize = size;
}

/**
* Maximum size in bytes of a delta to cache.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT}
*
* @return maximum size (in bytes) of a delta that should be cached.
*/
public int getDeltaCacheLimit() {
return deltaCacheLimit;
}

/**
* Set the maximum size of a delta that should be cached.
*
* During delta search, any delta smaller than this size will be cached, up
* to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing
* by allowing these cached deltas to be output as-is.
*
* Default setting: {@value #DEFAULT_DELTA_CACHE_LIMIT}
*
* @param size
* maximum size (in bytes) of a delta to be cached.
*/
public void setDeltaCacheLimit(int size) {
deltaCacheLimit = size;
}

final int deltaWindow;
/**
* Get the maximum file size that will be delta compressed.
*
* Files bigger than this setting will not be delta compressed, as they are
* more than likely already highly compressed binary data files that do not
* delta compress well, such as MPEG videos.
*
* Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD}
*
* @return the configured big file threshold.
*/
public long getBigFileThreshold() {
return bigFileThreshold;
}

final long deltaWindowMemory;
/**
* Set the maximum file size that should be considered for deltas.
*
* Default setting: {@value #DEFAULT_BIG_FILE_THRESHOLD}
*
* @param bigFileThreshold
* the limit, in bytes.
*/
public void setBigFileThreshold(long bigFileThreshold) {
this.bigFileThreshold = bigFileThreshold;
}

final int deltaDepth;
/**
* Get the compression level applied to objects in the pack.
*
* Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION}
*
* @return current compression level, see {@link java.util.zip.Deflater}.
*/
public int getCompressionLevel() {
return compressionLevel;
}

final long deltaCacheSize;
/**
* Set the compression level applied to objects in the pack.
*
* Default setting: {@value java.util.zip.Deflater#DEFAULT_COMPRESSION}
*
* @param level
* compression level, must be a valid level recognized by the
* {@link java.util.zip.Deflater} class.
*/
public void setCompressionLevel(int level) {
compressionLevel = level;
}

final int deltaCacheLimit;
/**
* Get the number of threads used during delta compression.
*
* Default setting: 0 (auto-detect processors)
*
* @return number of threads used for delta compression. 0 will auto-detect
* the threads to the number of available processors.
*/
public int getThreads() {
return threads;
}

final int compression;
/**
* Set the number of threads to use for delta compression.
*
* During delta compression, if there are enough objects to be considered
* the writer will start up concurrent threads and allow them to compress
* different sections of the repository concurrently.
*
* An application thread pool can be set by {@link #setExecutor(Executor)}.
* If not set a temporary pool will be created by the writer, and torn down
* automatically when compression is over.
*
* Default setting: 0 (auto-detect processors)
*
* @param threads
* number of threads to use. If <= 0 the number of available
* processors for this JVM is used.
*/
public void setThreads(int threads) {
this.threads = threads;
}

final int indexVersion;
/** @return the preferred thread pool to execute delta search on. */
public Executor getExecutor() {
return executor;
}

final long bigFileThreshold;
/**
* Set the executor to use when using threads.
*
* During delta compression if the executor is non-null jobs will be queued
* up on it to perform delta compression in parallel. Aside from setting the
* executor, the caller must set {@link #setThreads(int)} to enable threaded
* delta search.
*
* @param executor
* executor to use for threads. Set to null to create a temporary
* executor just for the writer.
*/
public void setExecutor(Executor executor) {
this.executor = executor;
}

final int threads;
/**
* Get the pack index file format version this instance creates.
*
* Default setting: {@value #DEFAULT_INDEX_VERSION}
*
* @return the index version, the special version 0 designates the oldest
* (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public int getIndexVersion() {
return indexVersion;
}

private PackConfig(Config rc) {
deltaWindow = rc.getInt("pack", "window", PackWriter.DEFAULT_DELTA_SEARCH_WINDOW_SIZE);
deltaWindowMemory = rc.getLong("pack", null, "windowmemory", 0);
deltaCacheSize = rc.getLong("pack", null, "deltacachesize", PackWriter.DEFAULT_DELTA_CACHE_SIZE);
deltaCacheLimit = rc.getInt("pack", "deltacachelimit", PackWriter.DEFAULT_DELTA_CACHE_LIMIT);
deltaDepth = rc.getInt("pack", "depth", PackWriter.DEFAULT_MAX_DELTA_DEPTH);
compression = compression(rc);
indexVersion = rc.getInt("pack", "indexversion", 2);
bigFileThreshold = rc.getLong("core", null, "bigfilethreshold", PackWriter.DEFAULT_BIG_FILE_THRESHOLD);
threads = rc.getInt("pack", "threads", 0);
/**
* Set the pack index file format version this instance will create.
*
* Default setting: {@value #DEFAULT_INDEX_VERSION}
*
* @param version
* the version to write. The special version 0 designates the
* oldest (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public void setIndexVersion(final int version) {
indexVersion = version;
}

private static int compression(Config rc) {
if (rc.getString("pack", null, "compression") != null)
return rc.getInt("pack", "compression", DEFAULT_COMPRESSION);
return rc.getInt("core", "compression", DEFAULT_COMPRESSION);
/**
* Update properties by setting fields from the configuration.
*
* If a property's corresponding variable is not defined in the supplied
* configuration, then it is left unmodified.
*
* @param rc
* configuration to read properties from.
*/
public void fromConfig(final Config rc) {
setMaxDeltaDepth(rc.getInt("pack", "depth", getMaxDeltaDepth()));
setDeltaSearchWindowSize(rc.getInt("pack", "window", getDeltaSearchWindowSize()));
setDeltaSearchMemoryLimit(rc.getLong("pack", "windowmemory", getDeltaSearchMemoryLimit()));
setDeltaCacheSize(rc.getLong("pack", "deltacachesize", getDeltaCacheSize()));
setDeltaCacheLimit(rc.getInt("pack", "deltacachelimit", getDeltaCacheLimit()));
setCompressionLevel(rc.getInt("pack", "compression",
rc.getInt("core", "compression", getCompressionLevel())));
setIndexVersion(rc.getInt("pack", "indexversion", getIndexVersion()));
setBigFileThreshold(rc.getLong("core", "bigfilethreshold", getBigFileThreshold()));
setThreads(rc.getInt("pack", "threads", getThreads()));

// These variables aren't standardized
//
setReuseDeltas(rc.getBoolean("pack", "reusedeltas", isReuseDeltas()));
setReuseObjects(rc.getBoolean("pack", "reuseobjects", isReuseObjects()));
setDeltaCompress(rc.getBoolean("pack", "deltacompression", isDeltaCompress()));
}
}

+ 124
- 436
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackWriter.java View File

@@ -58,8 +58,12 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
@@ -71,7 +75,6 @@ import org.eclipse.jgit.errors.LargeObjectException;
import org.eclipse.jgit.errors.MissingObjectException;
import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
import org.eclipse.jgit.lib.AnyObjectId;
import org.eclipse.jgit.lib.Config;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.NullProgressMonitor;
import org.eclipse.jgit.lib.ObjectId;
@@ -123,47 +126,6 @@ import org.eclipse.jgit.util.TemporaryBuffer;
* </p>
*/
public class PackWriter {
/**
* Default value of deltas reuse option.
*
* @see #setReuseDeltas(boolean)
*/
public static final boolean DEFAULT_REUSE_DELTAS = true;

/**
* Default value of objects reuse option.
*
* @see #setReuseObjects(boolean)
*/
public static final boolean DEFAULT_REUSE_OBJECTS = true;

/**
* Default value of delta base as offset option.
*
* @see #setDeltaBaseAsOffset(boolean)
*/
public static final boolean DEFAULT_DELTA_BASE_AS_OFFSET = false;

/**
* Default value of maximum delta chain depth.
*
* @see #setMaxDeltaDepth(int)
*/
public static final int DEFAULT_MAX_DELTA_DEPTH = 50;

/**
* Default window size during packing.
*
* @see #setDeltaSearchWindowSize(int)
*/
public static final int DEFAULT_DELTA_SEARCH_WINDOW_SIZE = 10;

static final long DEFAULT_BIG_FILE_THRESHOLD = 50 * 1024 * 1024;

static final long DEFAULT_DELTA_CACHE_SIZE = 50 * 1024 * 1024;

static final int DEFAULT_DELTA_CACHE_LIMIT = 100;

private static final int PACK_VERSION_GENERATED = 2;

@SuppressWarnings("unchecked")
@@ -181,8 +143,6 @@ public class PackWriter {
// edge objects for thin packs
private final ObjectIdSubclassMap<ObjectToPack> edgeObjects = new ObjectIdSubclassMap<ObjectToPack>();

private int compressionLevel;

private Deflater myDeflater;

private final ObjectReader reader;
@@ -190,33 +150,15 @@ public class PackWriter {
/** {@link #reader} recast to the reuse interface, if it supports it. */
private final ObjectReuseAsIs reuseSupport;

private final PackConfig config;

private List<ObjectToPack> sortedByName;

private byte packcsum[];

private boolean reuseDeltas = DEFAULT_REUSE_DELTAS;

private boolean reuseObjects = DEFAULT_REUSE_OBJECTS;

private boolean deltaBaseAsOffset = DEFAULT_DELTA_BASE_AS_OFFSET;

private boolean deltaCompress = true;
private boolean deltaBaseAsOffset;

private int maxDeltaDepth = DEFAULT_MAX_DELTA_DEPTH;

private int deltaSearchWindowSize = DEFAULT_DELTA_SEARCH_WINDOW_SIZE;

private long deltaSearchMemoryLimit;

private long deltaCacheSize = DEFAULT_DELTA_CACHE_SIZE;

private int deltaCacheLimit = DEFAULT_DELTA_CACHE_LIMIT;

private int indexVersion;

private long bigFileThreshold = DEFAULT_BIG_FILE_THRESHOLD;

private int threads = 1;
private boolean reuseDeltas;

private boolean thin;

@@ -245,7 +187,7 @@ public class PackWriter {
* reader to read from the repository with.
*/
public PackWriter(final ObjectReader reader) {
this(null, reader);
this(new PackConfig(), reader);
}

/**
@@ -260,105 +202,38 @@ public class PackWriter {
* reader to read from the repository with.
*/
public PackWriter(final Repository repo, final ObjectReader reader) {
this.reader = reader;
if (reader instanceof ObjectReuseAsIs)
reuseSupport = ((ObjectReuseAsIs) reader);
else
reuseSupport = null;

final PackConfig pc = configOf(repo).get(PackConfig.KEY);
deltaSearchWindowSize = pc.deltaWindow;
deltaSearchMemoryLimit = pc.deltaWindowMemory;
deltaCacheSize = pc.deltaCacheSize;
deltaCacheLimit = pc.deltaCacheLimit;
maxDeltaDepth = pc.deltaDepth;
compressionLevel = pc.compression;
indexVersion = pc.indexVersion;
bigFileThreshold = pc.bigFileThreshold;
threads = pc.threads;
}

private static Config configOf(final Repository repo) {
if (repo == null)
return new Config();
return repo.getConfig();
}

/**
* Check whether object is configured to reuse deltas existing in
* repository.
* <p>
* Default setting: {@link #DEFAULT_REUSE_DELTAS}
* </p>
*
* @return true if object is configured to reuse deltas; false otherwise.
*/
public boolean isReuseDeltas() {
return reuseDeltas;
this(new PackConfig(repo), reader);
}

/**
* Set reuse deltas configuration option for this writer. When enabled,
* writer will search for delta representation of object in repository and
* use it if possible. Normally, only deltas with base to another object
* existing in set of objects to pack will be used. Exception is however
* thin-pack (see
* {@link #preparePack(ProgressMonitor, Collection, Collection)} and
* {@link #preparePack(Iterator)}) where base object must exist on other
* side machine.
* <p>
* When raw delta data is directly copied from a pack file, checksum is
* computed to verify data.
* </p>
* Create writer with a specified configuration.
* <p>
* Default setting: {@link #DEFAULT_REUSE_DELTAS}
* </p>
*
* @param reuseDeltas
* boolean indicating whether or not try to reuse deltas.
*/
public void setReuseDeltas(boolean reuseDeltas) {
this.reuseDeltas = reuseDeltas;
}

/**
* Checks whether object is configured to reuse existing objects
* representation in repository.
* <p>
* Default setting: {@link #DEFAULT_REUSE_OBJECTS}
* </p>
* Objects for packing are specified in {@link #preparePack(Iterator)} or
* {@link #preparePack(ProgressMonitor, Collection, Collection)}.
*
* @return true if writer is configured to reuse objects representation from
* pack; false otherwise.
* @param config
* configuration for the pack writer.
* @param reader
* reader to read from the repository with.
*/
public boolean isReuseObjects() {
return reuseObjects;
}
public PackWriter(final PackConfig config, final ObjectReader reader) {
this.config = config;
this.reader = reader;
if (reader instanceof ObjectReuseAsIs)
reuseSupport = ((ObjectReuseAsIs) reader);
else
reuseSupport = null;

/**
* Set reuse objects configuration option for this writer. If enabled,
* writer searches for representation in a pack file. If possible,
* compressed data is directly copied from such a pack file. Data checksum
* is verified.
* <p>
* Default setting: {@link #DEFAULT_REUSE_OBJECTS}
* </p>
*
* @param reuseObjects
* boolean indicating whether or not writer should reuse existing
* objects representation.
*/
public void setReuseObjects(boolean reuseObjects) {
this.reuseObjects = reuseObjects;
deltaBaseAsOffset = config.isDeltaBaseAsOffset();
reuseDeltas = config.isReuseDeltas();
}

/**
* Check whether writer can store delta base as an offset (new style
* reducing pack size) or should store it as an object id (legacy style,
* compatible with old readers).
* <p>
* Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET}
* </p>
*
* Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @return true if delta base is stored as an offset; false if it is stored
* as an object id.
@@ -371,9 +246,8 @@ public class PackWriter {
* Set writer delta base format. Delta base can be written as an offset in a
* pack file (new approach reducing file size) or as an object id (legacy
* approach, compatible with old readers).
* <p>
* Default setting: {@link #DEFAULT_DELTA_BASE_AS_OFFSET}
* </p>
*
* Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
*
* @param deltaBaseAsOffset
* boolean indicating whether delta base can be stored as an
@@ -383,235 +257,6 @@ public class PackWriter {
this.deltaBaseAsOffset = deltaBaseAsOffset;
}

/**
* Check whether the writer will create new deltas on the fly.
* <p>
* Default setting: true
* </p>
*
* @return true if the writer will create a new delta when either
* {@link #isReuseDeltas()} is false, or no suitable delta is
* available for reuse.
*/
public boolean isDeltaCompress() {
return deltaCompress;
}

/**
* Set whether or not the writer will create new deltas on the fly.
*
* @param deltaCompress
* true to create deltas when {@link #isReuseDeltas()} is false,
* or when a suitable delta isn't available for reuse. Set to
* false to write whole objects instead.
*/
public void setDeltaCompress(boolean deltaCompress) {
this.deltaCompress = deltaCompress;
}

/**
* Get maximum depth of delta chain set up for this writer. Generated chains
* are not longer than this value.
* <p>
* Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH}
* </p>
*
* @return maximum delta chain depth.
*/
public int getMaxDeltaDepth() {
return maxDeltaDepth;
}

/**
* Set up maximum depth of delta chain for this writer. Generated chains are
* not longer than this value. Too low value causes low compression level,
* while too big makes unpacking (reading) longer.
* <p>
* Default setting: {@link #DEFAULT_MAX_DELTA_DEPTH}
* </p>
*
* @param maxDeltaDepth
* maximum delta chain depth.
*/
public void setMaxDeltaDepth(int maxDeltaDepth) {
this.maxDeltaDepth = maxDeltaDepth;
}

/**
* Get the number of objects to try when looking for a delta base.
* <p>
* This limit is per thread, if 4 threads are used the actual memory
* used will be 4 times this value.
*
* @return the object count to be searched.
*/
public int getDeltaSearchWindowSize() {
return deltaSearchWindowSize;
}

/**
* Set the number of objects considered when searching for a delta base.
* <p>
* Default setting: {@link #DEFAULT_DELTA_SEARCH_WINDOW_SIZE}
* </p>
*
* @param objectCount
* number of objects to search at once. Must be at least 2.
*/
public void setDeltaSearchWindowSize(int objectCount) {
if (objectCount <= 2)
setDeltaCompress(false);
else
deltaSearchWindowSize = objectCount;
}

/**
* Get maximum number of bytes to put into the delta search window.
* <p>
* Default setting is 0, for an unlimited amount of memory usage. Actual
* memory used is the lower limit of either this setting, or the sum of
* space used by at most {@link #getDeltaSearchWindowSize()} objects.
* <p>
* This limit is per thread, if 4 threads are used the actual memory
* limit will be 4 times this value.
*
* @return the memory limit.
*/
public long getDeltaSearchMemoryLimit() {
return deltaSearchMemoryLimit;
}

/**
* Set the maximum number of bytes to put into the delta search window.
* <p>
* Default setting is 0, for an unlimited amount of memory usage. If the
* memory limit is reached before {@link #getDeltaSearchWindowSize()} the
* window size is temporarily lowered.
*
* @param memoryLimit
* Maximum number of bytes to load at once, 0 for unlimited.
*/
public void setDeltaSearchMemoryLimit(long memoryLimit) {
deltaSearchMemoryLimit = memoryLimit;
}

/**
* Get the size of the in-memory delta cache.
* <p>
* This limit is for the entire writer, even if multiple threads are used.
*
* @return maximum number of bytes worth of delta data to cache in memory.
* If 0 the cache is infinite in size (up to the JVM heap limit
* anyway). A very tiny size such as 1 indicates the cache is
* effectively disabled.
*/
public long getDeltaCacheSize() {
return deltaCacheSize;
}

/**
* Set the maximum number of bytes of delta data to cache.
* <p>
* During delta search, up to this many bytes worth of small or hard to
* compute deltas will be stored in memory. This cache speeds up writing by
* allowing the cached entry to simply be dumped to the output stream.
*
* @param size
* number of bytes to cache. Set to 0 to enable an infinite
* cache, set to 1 (an impossible size for any delta) to disable
* the cache.
*/
public void setDeltaCacheSize(long size) {
deltaCacheSize = size;
}

/**
* Maximum size in bytes of a delta to cache.
*
* @return maximum size (in bytes) of a delta that should be cached.
*/
public int getDeltaCacheLimit() {
return deltaCacheLimit;
}

/**
* Set the maximum size of a delta that should be cached.
* <p>
* During delta search, any delta smaller than this size will be cached, up
* to the {@link #getDeltaCacheSize()} maximum limit. This speeds up writing
* by allowing these cached deltas to be output as-is.
*
* @param size
* maximum size (in bytes) of a delta to be cached.
*/
public void setDeltaCacheLimit(int size) {
deltaCacheLimit = size;
}

/**
* Get the maximum file size that will be delta compressed.
* <p>
* Files bigger than this setting will not be delta compressed, as they are
* more than likely already highly compressed binary data files that do not
* delta compress well, such as MPEG videos.
*
* @return the configured big file threshold.
*/
public long getBigFileThreshold() {
return bigFileThreshold;
}

/**
* Set the maximum file size that should be considered for deltas.
*
* @param bigFileThreshold
* the limit, in bytes.
*/
public void setBigFileThreshold(long bigFileThreshold) {
this.bigFileThreshold = bigFileThreshold;
}

/**
* Get the compression level applied to objects in the pack.
*
* @return current compression level, see {@link java.util.zip.Deflater}.
*/
public int getCompressionLevel() {
return compressionLevel;
}

/**
* Set the compression level applied to objects in the pack.
*
* @param level
* compression level, must be a valid level recognized by the
* {@link java.util.zip.Deflater} class. Typically this setting
* is {@link java.util.zip.Deflater#BEST_SPEED}.
*/
public void setCompressionLevel(int level) {
compressionLevel = level;
}

/** @return number of threads used for delta compression. */
public int getThreads() {
return threads;
}

/**
* Set the number of threads to use for delta compression.
* <p>
* During delta compression, if there are enough objects to be considered
* the writer will start up concurrent threads and allow them to compress
* different sections of the repository concurrently.
*
* @param threads
* number of threads to use. If <= 0 the number of available
* processors for this JVM is used.
*/
public void setThread(int threads) {
this.threads = threads;
}

/** @return true if this writer is producing a thin pack. */
public boolean isThin() {
return thin;
@@ -651,18 +296,6 @@ public class PackWriter {
ignoreMissingUninteresting = ignore;
}

/**
* Set the pack index file format version this instance will create.
*
* @param version
* the version to write. The special version 0 designates the
* oldest (most compatible) format available for the objects.
* @see PackIndexWriter
*/
public void setIndexVersion(final int version) {
indexVersion = version;
}

/**
* Returns objects number in a pack file that was created by this writer.
*
@@ -791,6 +424,7 @@ public class PackWriter {
public void writeIndex(final OutputStream indexStream) throws IOException {
final List<ObjectToPack> list = sortByName();
final PackIndexWriter iw;
int indexVersion = config.getIndexVersion();
if (indexVersion <= 0)
iw = PackIndexWriter.createOldestPossible(indexStream, list);
else
@@ -842,9 +476,9 @@ public class PackWriter {
if (writeMonitor == null)
writeMonitor = NullProgressMonitor.INSTANCE;

if ((reuseDeltas || reuseObjects) && reuseSupport != null)
if ((reuseDeltas || config.isReuseObjects()) && reuseSupport != null)
searchForReuse();
if (deltaCompress)
if (config.isDeltaCompress())
searchForDeltas(compressMonitor);

final PackOutputStream out = new PackOutputStream(writeMonitor,
@@ -954,7 +588,7 @@ public class PackWriter {

// If its too big for us to handle, skip over it.
//
if (bigFileThreshold <= sz || Integer.MAX_VALUE <= sz)
if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE <= sz)
return false;

// If its too tiny for the delta compression to work, skip it.
@@ -970,21 +604,19 @@ public class PackWriter {
final ObjectToPack[] list, final int cnt)
throws MissingObjectException, IncorrectObjectTypeException,
LargeObjectException, IOException {
int threads = config.getThreads();
if (threads == 0)
threads = Runtime.getRuntime().availableProcessors();

if (threads <= 1 || cnt <= 2 * getDeltaSearchWindowSize()) {
DeltaCache dc = new DeltaCache(this);
DeltaWindow dw = new DeltaWindow(this, dc, reader);
if (threads <= 1 || cnt <= 2 * config.getDeltaSearchWindowSize()) {
DeltaCache dc = new DeltaCache(config);
DeltaWindow dw = new DeltaWindow(config, dc, reader);
dw.search(monitor, list, 0, cnt);
return;
}

final List<Throwable> errors = Collections
.synchronizedList(new ArrayList<Throwable>());
final DeltaCache dc = new ThreadSafeDeltaCache(this);
final DeltaCache dc = new ThreadSafeDeltaCache(config);
final ProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
final ExecutorService pool = Executors.newFixedThreadPool(threads);

// Guess at the size of batch we want. Because we don't really
// have a way for a thread to steal work from another thread if
@@ -992,9 +624,10 @@ public class PackWriter {
// are a bit smaller.
//
int estSize = cnt / (threads * 2);
if (estSize < 2 * getDeltaSearchWindowSize())
estSize = 2 * getDeltaSearchWindowSize();
if (estSize < 2 * config.getDeltaSearchWindowSize())
estSize = 2 * config.getDeltaSearchWindowSize();

final List<DeltaTask> myTasks = new ArrayList<DeltaTask>(threads * 2);
for (int i = 0; i < cnt;) {
final int start = i;
final int batchSize;
@@ -1019,39 +652,67 @@ public class PackWriter {
batchSize = end - start;
}
i += batchSize;
myTasks.add(new DeltaTask(config, reader, dc, pm, batchSize, start, list));
}

pool.submit(new Runnable() {
public void run() {
final Executor executor = config.getExecutor();
final List<Throwable> errors = Collections
.synchronizedList(new ArrayList<Throwable>());
if (executor instanceof ExecutorService) {
// Caller supplied us a service, use it directly.
//
runTasks((ExecutorService) executor, myTasks, errors);

} else if (executor == null) {
// Caller didn't give us a way to run the tasks, spawn up a
// temporary thread pool and make sure it tears down cleanly.
//
ExecutorService pool = Executors.newFixedThreadPool(threads);
try {
runTasks(pool, myTasks, errors);
} finally {
pool.shutdown();
for (;;) {
try {
final ObjectReader or = reader.newReader();
if (pool.awaitTermination(60, TimeUnit.SECONDS))
break;
} catch (InterruptedException e) {
throw new IOException(
JGitText.get().packingCancelledDuringObjectsWriting);
}
}
}
} else {
// The caller gave us an executor, but it might not do
// asynchronous execution. Wrap everything and hope it
// can schedule these for us.
//
final CountDownLatch done = new CountDownLatch(myTasks.size());
for (final DeltaTask task : myTasks) {
executor.execute(new Runnable() {
public void run() {
try {
DeltaWindow dw;
dw = new DeltaWindow(PackWriter.this, dc, or);
dw.search(pm, list, start, batchSize);
task.call();
} catch (Throwable failure) {
errors.add(failure);
} finally {
or.release();
done.countDown();
}
} catch (Throwable err) {
errors.add(err);
}
}
});
}

// Tell the pool to stop.
//
pool.shutdown();
for (;;) {
});
}
try {
if (pool.awaitTermination(60, TimeUnit.SECONDS))
break;
} catch (InterruptedException e) {
done.await();
} catch (InterruptedException ie) {
// We can't abort the other tasks as we have no handle.
// Cross our fingers and just break out anyway.
//
throw new IOException(
JGitText.get().packingCancelledDuringObjectsWriting);
}
}

// If any thread threw an error, try to report it back as
// If any task threw an error, try to report it back as
// though we weren't using a threaded search algorithm.
//
if (!errors.isEmpty()) {
@@ -1069,6 +730,28 @@ public class PackWriter {
}
}

private void runTasks(ExecutorService pool, List<DeltaTask> tasks,
List<Throwable> errors) throws IOException {
List<Future<?>> futures = new ArrayList<Future<?>>(tasks.size());
for (DeltaTask task : tasks)
futures.add(pool.submit(task));

try {
for (Future<?> f : futures) {
try {
f.get();
} catch (ExecutionException failed) {
errors.add(failed.getCause());
}
}
} catch (InterruptedException ie) {
for (Future<?> f : futures)
f.cancel(true);
throw new IOException(
JGitText.get().packingCancelledDuringObjectsWriting);
}
}

private void writeObjects(ProgressMonitor writeMonitor, PackOutputStream out)
throws IOException {
for (List<ObjectToPack> list : objectsLists) {
@@ -1196,8 +879,8 @@ public class PackWriter {

private TemporaryBuffer.Heap delta(final ObjectToPack otp)
throws IOException {
DeltaIndex index = new DeltaIndex(buffer(reader, otp.getDeltaBaseId()));
byte[] res = buffer(reader, otp);
DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
byte[] res = buffer(otp);

// We never would have proposed this pair if the delta would be
// larger than the unpacked version of the object. So using it
@@ -1208,7 +891,12 @@ public class PackWriter {
return delta;
}

byte[] buffer(ObjectReader or, AnyObjectId objId) throws IOException {
private byte[] buffer(AnyObjectId objId) throws IOException {
return buffer(config, reader, objId);
}

static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
throws IOException {
ObjectLoader ldr = or.open(objId);
if (!ldr.isLarge())
return ldr.getCachedBytes();
@@ -1221,7 +909,7 @@ public class PackWriter {
// If it really is too big to work with, abort out now.
//
long sz = ldr.getSize();
if (getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz)
if (config.getBigFileThreshold() <= sz || Integer.MAX_VALUE < sz)
throw new LargeObjectException(objId.copy());

// Its considered to be large by the loader, but we really
@@ -1248,7 +936,7 @@ public class PackWriter {

private Deflater deflater() {
if (myDeflater == null)
myDeflater = new Deflater(compressionLevel);
myDeflater = new Deflater(config.getCompressionLevel());
return myDeflater;
}

@@ -1404,7 +1092,7 @@ public class PackWriter {
otp.clearDeltaBase();
otp.clearReuseAsIs();
}
} else if (nFmt == PACK_WHOLE && reuseObjects) {
} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
otp.clearDeltaBase();
otp.setReuseAsIs();
otp.setWeight(nWeight);

+ 2
- 2
org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/ThreadSafeDeltaCache.java View File

@@ -48,8 +48,8 @@ import java.util.concurrent.locks.ReentrantLock;
class ThreadSafeDeltaCache extends DeltaCache {
private final ReentrantLock lock;

ThreadSafeDeltaCache(PackWriter pw) {
super(pw);
ThreadSafeDeltaCache(PackConfig pc) {
super(pc);
lock = new ReentrantLock();
}


+ 2
- 1
org.eclipse.jgit/src/org/eclipse/jgit/transport/BasePackPushConnection.java View File

@@ -231,7 +231,8 @@ class BasePackPushConnection extends BasePackConnection implements
List<ObjectId> newObjects = new ArrayList<ObjectId>(refUpdates.size());

final long start;
final PackWriter writer = new PackWriter(local);
final PackWriter writer = new PackWriter(transport.getPackConfig(),
local.newObjectReader());
try {

for (final Ref r : getRefs())

+ 20
- 2
org.eclipse.jgit/src/org/eclipse/jgit/transport/BundleWriter.java View File

@@ -61,6 +61,7 @@ import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.revwalk.RevCommit;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter;

/**
@@ -81,12 +82,14 @@ import org.eclipse.jgit.storage.pack.PackWriter;
* overall bundle size.
*/
public class BundleWriter {
private final PackWriter packWriter;
private final Repository db;

private final Map<String, ObjectId> include;

private final Set<RevCommit> assume;

private PackConfig packConfig;

/**
* Create a writer for a bundle.
*
@@ -94,11 +97,22 @@ public class BundleWriter {
* repository where objects are stored.
*/
public BundleWriter(final Repository repo) {
packWriter = new PackWriter(repo);
db = repo;
include = new TreeMap<String, ObjectId>();
assume = new HashSet<RevCommit>();
}

/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}

/**
* Include an object (and everything reachable from it) in the bundle.
*
@@ -166,6 +180,10 @@ public class BundleWriter {
*/
public void writeBundle(ProgressMonitor monitor, OutputStream os)
throws IOException {
PackConfig pc = packConfig;
if (pc == null)
pc = new PackConfig(db);
PackWriter packWriter = new PackWriter(pc, db.newObjectReader());
try {
final HashSet<ObjectId> inc = new HashSet<ObjectId>();
final HashSet<ObjectId> exc = new HashSet<ObjectId>();

+ 15
- 0
org.eclipse.jgit/src/org/eclipse/jgit/transport/Daemon.java View File

@@ -63,6 +63,7 @@ import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.RepositoryCache;
import org.eclipse.jgit.lib.RepositoryCache.FileKey;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.util.FS;

/** Basic daemon for the anonymous <code>git://</code> transport protocol. */
@@ -90,6 +91,8 @@ public class Daemon {

private int timeout;

private PackConfig packConfig;

/** Configure a daemon to listen on any available network port. */
public Daemon() {
this(null);
@@ -120,6 +123,7 @@ public class Daemon {
final UploadPack rp = new UploadPack(db);
final InputStream in = dc.getInputStream();
rp.setTimeout(Daemon.this.getTimeout());
rp.setPackConfig(Daemon.this.packConfig);
rp.upload(in, dc.getOutputStream(), null);
}
}, new DaemonService("receive-pack", "receivepack") {
@@ -242,6 +246,17 @@ public class Daemon {
timeout = seconds;
}

/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}

/**
* Start this daemon on a background thread.
*

+ 30
- 0
org.eclipse.jgit/src/org/eclipse/jgit/transport/Transport.java View File

@@ -66,6 +66,7 @@ import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref;
import org.eclipse.jgit.lib.Repository;
import org.eclipse.jgit.lib.TransferConfig;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.util.FS;

/**
@@ -554,6 +555,9 @@ public abstract class Transport {
/** Timeout in seconds to wait before aborting an IO read or write. */
private int timeout;

/** Pack configuration used by this transport to make pack file. */
private PackConfig packConfig;

/**
* Create a new transport instance.
*
@@ -791,6 +795,32 @@ public abstract class Transport {
timeout = seconds;
}

/**
* Get the configuration used by the pack generator to make packs.
*
* If {@link #setPackConfig(PackConfig)} was previously given null a new
* PackConfig is created on demand by this method using the source
* repository's settings.
*
* @return the pack configuration. Never null.
*/
public PackConfig getPackConfig() {
if (packConfig == null)
packConfig = new PackConfig(local);
return packConfig;
}

/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
packConfig = pc;
}

/**
* Fetch objects and refs from the remote repository to the local one.
* <p>

+ 21
- 5
org.eclipse.jgit/src/org/eclipse/jgit/transport/UploadPack.java View File

@@ -69,6 +69,7 @@ import org.eclipse.jgit.revwalk.RevFlagSet;
import org.eclipse.jgit.revwalk.RevObject;
import org.eclipse.jgit.revwalk.RevTag;
import org.eclipse.jgit.revwalk.RevWalk;
import org.eclipse.jgit.storage.pack.PackConfig;
import org.eclipse.jgit.storage.pack.PackWriter;
import org.eclipse.jgit.transport.BasePackFetchConnection.MultiAck;
import org.eclipse.jgit.transport.RefAdvertiser.PacketLineOutRefAdvertiser;
@@ -102,6 +103,9 @@ public class UploadPack {
/** Revision traversal support over {@link #db}. */
private final RevWalk walk;

/** Configuration to pass into the PackWriter. */
private PackConfig packConfig;

/** Timeout in seconds to wait for client interaction. */
private int timeout;

@@ -258,6 +262,17 @@ public class UploadPack {
this.refFilter = refFilter != null ? refFilter : RefFilter.DEFAULT;
}

/**
* Set the configuration used by the pack generator.
*
* @param pc
* configuration controlling packing parameters. If null the
* source repository's settings will be used.
*/
public void setPackConfig(PackConfig pc) {
this.packConfig = pc;
}

/**
* Execute the upload task on the socket.
*
@@ -548,8 +563,6 @@ public class UploadPack {
}

private void sendPack() throws IOException {
final boolean thin = options.contains(OPTION_THIN_PACK);
final boolean progress = !options.contains(OPTION_NO_PROGRESS);
final boolean sideband = options.contains(OPTION_SIDE_BAND)
|| options.contains(OPTION_SIDE_BAND_64K);

@@ -563,15 +576,18 @@ public class UploadPack {

packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA,
bufsz, rawOut);
if (progress)
if (!options.contains(OPTION_NO_PROGRESS))
pm = new SideBandProgressMonitor(new SideBandOutputStream(
SideBandOutputStream.CH_PROGRESS, bufsz, rawOut));
}

final PackWriter pw = new PackWriter(db, walk.getObjectReader());
PackConfig cfg = packConfig;
if (cfg == null)
cfg = new PackConfig(db);
final PackWriter pw = new PackWriter(cfg, walk.getObjectReader());
try {
pw.setDeltaBaseAsOffset(options.contains(OPTION_OFS_DELTA));
pw.setThin(thin);
pw.setThin(options.contains(OPTION_THIN_PACK));
pw.preparePack(pm, wantAll, commonBase);
if (options.contains(OPTION_INCLUDE_TAG)) {
for (final Ref r : refs.values()) {

+ 14
- 10
org.eclipse.jgit/src/org/eclipse/jgit/transport/WalkPushConnection.java View File

@@ -103,6 +103,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
/** Database connection to the remote repository. */
private final WalkRemoteObjectDatabase dest;

/** The configured transport we were constructed by. */
private final Transport transport;

/**
* Packs already known to reside in the remote repository.
* <p>
@@ -123,9 +126,9 @@ class WalkPushConnection extends BaseConnection implements PushConnection {

WalkPushConnection(final WalkTransport walkTransport,
final WalkRemoteObjectDatabase w) {
Transport t = (Transport)walkTransport;
local = t.local;
uri = t.getURI();
transport = (Transport) walkTransport;
local = transport.local;
uri = transport.getURI();
dest = w;
}

@@ -209,7 +212,8 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
String pathPack = null;
String pathIdx = null;

final PackWriter pw = new PackWriter(local);
final PackWriter writer = new PackWriter(transport.getPackConfig(),
local.newObjectReader());
try {
final List<ObjectId> need = new ArrayList<ObjectId>();
final List<ObjectId> have = new ArrayList<ObjectId>();
@@ -220,20 +224,20 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
if (r.getPeeledObjectId() != null)
have.add(r.getPeeledObjectId());
}
pw.preparePack(monitor, need, have);
writer.preparePack(monitor, need, have);

// We don't have to continue further if the pack will
// be an empty pack, as the remote has all objects it
// needs to complete this change.
//
if (pw.getObjectsNumber() == 0)
if (writer.getObjectsNumber() == 0)
return;

packNames = new LinkedHashMap<String, String>();
for (final String n : dest.getPackNames())
packNames.put(n, n);

final String base = "pack-" + pw.computeName().name();
final String base = "pack-" + writer.computeName().name();
final String packName = base + ".pack";
pathPack = "pack/" + packName;
pathIdx = "pack/" + base + ".idx";
@@ -254,7 +258,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
OutputStream os = dest.writeFile(pathPack, monitor, wt + "..pack");
try {
os = new BufferedOutputStream(os);
pw.writePack(monitor, monitor, os);
writer.writePack(monitor, monitor, os);
} finally {
os.close();
}
@@ -262,7 +266,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {
os = dest.writeFile(pathIdx, monitor, wt + "..idx");
try {
os = new BufferedOutputStream(os);
pw.writeIndex(os);
writer.writeIndex(os);
} finally {
os.close();
}
@@ -282,7 +286,7 @@ class WalkPushConnection extends BaseConnection implements PushConnection {

throw new TransportException(uri, JGitText.get().cannotStoreObjects, err);
} finally {
pw.release();
writer.release();
}
}


Loading…
Cancel
Save