Instead of spooling large delta bases into temporary files and then immediately deleting them afterwards, spool the large delta out to a normal loose object. Later any requests for that large delta can be answered by reading from the loose object, which is much easier to stream efficiently for readers. Since the object is now duplicated, once in the pack as a delta and again as a loose object, any future prune-packed will automatically delete the loose object variant, releasing the wasted disk space. As prune-packed is run automatically during either repack or gc, and gc --auto triggers automatically based on the number of loose objects, we get automatic cache management for free. Large objects that were unpacked will be periodically cleared out, and will simply be restored later if they are needed again. After a short offline discussion with Junio Hamano today, we may want to propose a change to prune-packed to hold onto larger loose objects which also exist in pack files as deltas, if the loose object was recently accessed or modified in the last 2 days. Change-Id: I3668a3967c807010f48cd69f994dcbaaf582337c Signed-off-by: Shawn O. Pearce <spearce@spearce.org>tags/v0.9.1
@@ -54,7 +54,6 @@ import org.eclipse.jgit.lib.Constants; | |||
import org.eclipse.jgit.lib.ObjectDatabase; | |||
import org.eclipse.jgit.lib.ObjectId; | |||
import org.eclipse.jgit.lib.ObjectIdSubclassMap; | |||
import org.eclipse.jgit.lib.ObjectInserter; | |||
import org.eclipse.jgit.lib.ObjectLoader; | |||
import org.eclipse.jgit.storage.pack.ObjectToPack; | |||
import org.eclipse.jgit.storage.pack.PackWriter; | |||
@@ -113,7 +112,7 @@ class CachedObjectDirectory extends FileObjectDatabase { | |||
} | |||
@Override | |||
public ObjectInserter newInserter() { | |||
public ObjectDirectoryInserter newInserter() { | |||
return wrapped.newInserter(); | |||
} | |||
@@ -213,6 +212,11 @@ class CachedObjectDirectory extends FileObjectDatabase { | |||
throw new UnsupportedOperationException(); | |||
} | |||
@Override | |||
boolean insertUnpackedObject(File tmp, ObjectId objectId, boolean force) { | |||
return wrapped.insertUnpackedObject(tmp, objectId, force); | |||
} | |||
@Override | |||
void selectObjectRepresentation(PackWriter packer, ObjectToPack otp, | |||
WindowCursor curs) throws IOException { |
@@ -62,6 +62,9 @@ abstract class FileObjectDatabase extends ObjectDatabase { | |||
return new WindowCursor(this); | |||
} | |||
@Override | |||
public abstract ObjectDirectoryInserter newInserter(); | |||
/** | |||
* Does the requested object exist in this database? | |||
* <p> | |||
@@ -246,6 +249,8 @@ abstract class FileObjectDatabase extends ObjectDatabase { | |||
abstract long getObjectSize2(WindowCursor curs, String objectName, | |||
AnyObjectId objectId) throws IOException; | |||
abstract boolean insertUnpackedObject(File tmp, ObjectId id, boolean force); | |||
abstract FileObjectDatabase newCachedFileObjectDatabase(); | |||
static class AlternateHandle { |
@@ -44,9 +44,12 @@ | |||
package org.eclipse.jgit.storage.file; | |||
import java.io.BufferedInputStream; | |||
import java.io.File; | |||
import java.io.FileOutputStream; | |||
import java.io.IOException; | |||
import java.io.InputStream; | |||
import java.util.zip.DataFormatException; | |||
import java.util.zip.DeflaterOutputStream; | |||
import java.util.zip.InflaterInputStream; | |||
import org.eclipse.jgit.errors.IncorrectObjectTypeException; | |||
@@ -58,7 +61,6 @@ import org.eclipse.jgit.lib.ObjectLoader; | |||
import org.eclipse.jgit.lib.ObjectStream; | |||
import org.eclipse.jgit.storage.pack.BinaryDelta; | |||
import org.eclipse.jgit.storage.pack.DeltaStream; | |||
import org.eclipse.jgit.util.TemporaryBuffer; | |||
import org.eclipse.jgit.util.io.TeeInputStream; | |||
class LargePackedDeltaObject extends ObjectLoader { | |||
@@ -165,14 +167,39 @@ class LargePackedDeltaObject extends ObjectLoader { | |||
@Override | |||
public ObjectStream openStream() throws MissingObjectException, IOException { | |||
// If the object was recently unpacked, its available loose. | |||
// The loose format is going to be faster to access than a | |||
// delta applied on top of a base. Use that whenever we can. | |||
// | |||
final ObjectId myId = getObjectId(); | |||
final WindowCursor wc = new WindowCursor(db); | |||
ObjectLoader ldr = db.openObject2(wc, myId.name(), myId); | |||
if (ldr != null) | |||
return ldr.openStream(); | |||
InputStream in = open(wc); | |||
in = new BufferedInputStream(in, 8192); | |||
return new ObjectStream.Filter(getType(), size, in) { | |||
// While we inflate the object, also deflate it back as a loose | |||
// object. This will later be cleaned up by a gc pass, but until | |||
// then we will reuse the loose form by the above code path. | |||
// | |||
int myType = getType(); | |||
long mySize = getSize(); | |||
final ObjectDirectoryInserter odi = db.newInserter(); | |||
final File tmp = odi.newTempFile(); | |||
DeflaterOutputStream dOut = odi.compress(new FileOutputStream(tmp)); | |||
odi.writeHeader(dOut, myType, mySize); | |||
in = new TeeInputStream(in, dOut); | |||
return new ObjectStream.Filter(myType, mySize, in) { | |||
@Override | |||
public void close() throws IOException { | |||
wc.release(); | |||
super.close(); | |||
odi.release(); | |||
wc.release(); | |||
db.insertUnpackedObject(tmp, myId, true /* force creation */); | |||
} | |||
}; | |||
} | |||
@@ -195,13 +222,9 @@ class LargePackedDeltaObject extends ObjectLoader { | |||
final ObjectLoader base = pack.load(wc, baseOffset); | |||
DeltaStream ds = new DeltaStream(delta) { | |||
private long baseSize = SIZE_UNKNOWN; | |||
private TemporaryBuffer.LocalFile buffer; | |||
@Override | |||
protected InputStream openBase() throws IOException { | |||
if (buffer != null) | |||
return buffer.openInputStream(); | |||
InputStream in; | |||
if (base instanceof LargePackedDeltaObject) | |||
in = ((LargePackedDeltaObject) base).open(wc); | |||
@@ -213,9 +236,7 @@ class LargePackedDeltaObject extends ObjectLoader { | |||
else if (in instanceof ObjectStream) | |||
baseSize = ((ObjectStream) in).getSize(); | |||
} | |||
buffer = new TemporaryBuffer.LocalFile(db.getDirectory()); | |||
return new TeeInputStream(in, buffer); | |||
return in; | |||
} | |||
@Override | |||
@@ -228,14 +249,11 @@ class LargePackedDeltaObject extends ObjectLoader { | |||
} | |||
return baseSize; | |||
} | |||
@Override | |||
public void close() throws IOException { | |||
super.close(); | |||
if (buffer != null) | |||
buffer.destroy(); | |||
} | |||
}; | |||
if (type == Constants.OBJ_BAD) { | |||
if (!(base instanceof LargePackedDeltaObject)) | |||
type = base.getType(); | |||
} | |||
if (size == SIZE_UNKNOWN) | |||
size = ds.getSize(); | |||
return ds; |
@@ -69,7 +69,6 @@ import org.eclipse.jgit.lib.Config; | |||
import org.eclipse.jgit.lib.Constants; | |||
import org.eclipse.jgit.lib.ObjectDatabase; | |||
import org.eclipse.jgit.lib.ObjectId; | |||
import org.eclipse.jgit.lib.ObjectInserter; | |||
import org.eclipse.jgit.lib.ObjectLoader; | |||
import org.eclipse.jgit.lib.RepositoryCache; | |||
import org.eclipse.jgit.lib.RepositoryCache.FileKey; | |||
@@ -176,7 +175,7 @@ public class ObjectDirectory extends FileObjectDatabase { | |||
} | |||
@Override | |||
public ObjectInserter newInserter() { | |||
public ObjectDirectoryInserter newInserter() { | |||
return new ObjectDirectoryInserter(this, config); | |||
} | |||
@@ -455,8 +454,48 @@ public class ObjectDirectory extends FileObjectDatabase { | |||
} | |||
} | |||
void addUnpackedObject(ObjectId id) { | |||
unpackedObjectCache.add(id); | |||
@Override | |||
boolean insertUnpackedObject(File tmp, ObjectId id, boolean force) { | |||
if (!force && has(id)) { | |||
// Object is already in the repository, remove temporary file. | |||
// | |||
tmp.delete(); | |||
return true; | |||
} | |||
tmp.setReadOnly(); | |||
final File dst = fileFor(id); | |||
if (force && dst.exists()) { | |||
tmp.delete(); | |||
return true; | |||
} | |||
if (tmp.renameTo(dst)) { | |||
unpackedObjectCache.add(id); | |||
return true; | |||
} | |||
// Maybe the directory doesn't exist yet as the object | |||
// directories are always lazily created. Note that we | |||
// try the rename first as the directory likely does exist. | |||
// | |||
dst.getParentFile().mkdir(); | |||
if (tmp.renameTo(dst)) { | |||
unpackedObjectCache.add(id); | |||
return true; | |||
} | |||
if (!force && has(id)) { | |||
tmp.delete(); | |||
return true; | |||
} | |||
// The object failed to be renamed into its proper | |||
// location and it doesn't exist in the repository | |||
// either. We really don't know what went wrong, so | |||
// fail. | |||
// | |||
tmp.delete(); | |||
return false; | |||
} | |||
boolean tryAgain1() { |
@@ -83,40 +83,10 @@ class ObjectDirectoryInserter extends ObjectInserter { | |||
final MessageDigest md = digest(); | |||
final File tmp = toTemp(md, type, len, is); | |||
final ObjectId id = ObjectId.fromRaw(md.digest()); | |||
if (db.has(id)) { | |||
// Object is already in the repository, remove temporary file. | |||
// | |||
tmp.delete(); | |||
if (db.insertUnpackedObject(tmp, id, false /* no duplicate */)) | |||
return id; | |||
} | |||
final File dst = db.fileFor(id); | |||
if (tmp.renameTo(dst)) { | |||
db.addUnpackedObject(id); | |||
return id; | |||
} | |||
// Maybe the directory doesn't exist yet as the object | |||
// directories are always lazily created. Note that we | |||
// try the rename first as the directory likely does exist. | |||
// | |||
dst.getParentFile().mkdir(); | |||
if (tmp.renameTo(dst)) { | |||
db.addUnpackedObject(id); | |||
return id; | |||
} | |||
if (db.has(id)) { | |||
tmp.delete(); | |||
return id; | |||
} | |||
// The object failed to be renamed into its proper | |||
// location and it doesn't exist in the repository | |||
// either. We really don't know what went wrong, so | |||
// fail. | |||
// | |||
tmp.delete(); | |||
throw new ObjectWritingException("Unable to create new object: " + dst); | |||
} | |||
@@ -140,15 +110,12 @@ class ObjectDirectoryInserter extends ObjectInserter { | |||
final InputStream is) throws IOException, FileNotFoundException, | |||
Error { | |||
boolean delete = true; | |||
File tmp = File.createTempFile("noz", null, db.getDirectory()); | |||
File tmp = newTempFile(); | |||
try { | |||
DigestOutputStream dOut = new DigestOutputStream( | |||
compress(new FileOutputStream(tmp)), md); | |||
try { | |||
dOut.write(Constants.encodedTypeString(type)); | |||
dOut.write((byte) ' '); | |||
dOut.write(Constants.encodeASCII(len)); | |||
dOut.write((byte) 0); | |||
writeHeader(dOut, type, len); | |||
final byte[] buf = buffer(); | |||
while (len > 0) { | |||
@@ -162,7 +129,6 @@ class ObjectDirectoryInserter extends ObjectInserter { | |||
dOut.close(); | |||
} | |||
tmp.setReadOnly(); | |||
delete = false; | |||
return tmp; | |||
} finally { | |||
@@ -171,7 +137,19 @@ class ObjectDirectoryInserter extends ObjectInserter { | |||
} | |||
} | |||
private DeflaterOutputStream compress(final OutputStream out) { | |||
void writeHeader(OutputStream out, final int type, long len) | |||
throws IOException { | |||
out.write(Constants.encodedTypeString(type)); | |||
out.write((byte) ' '); | |||
out.write(Constants.encodeASCII(len)); | |||
out.write((byte) 0); | |||
} | |||
File newTempFile() throws IOException { | |||
return File.createTempFile("noz", null, db.getDirectory()); | |||
} | |||
DeflaterOutputStream compress(final OutputStream out) { | |||
if (deflate == null) | |||
deflate = new Deflater(config.get(CoreConfig.KEY).getCompression()); | |||
else |