When running IndexPack we use a CachedObjectDirectory, which knows what objects are loose and tries to avoid stat(2) calls for objects that do not exist in the repository, as stat(2) on Win32 is very slow. However large delta objects found in a pack file are expanded into a loose object, in order to avoid costly delta chain processing when that object is used as a base for another delta. If this expand occurs while working with the CachedObjectDirectory, we need to update the cached directory data to include this new object, otherwise it won't be available when we try to open it during the object verify phase. Bug: 324868 Change-Id: Idf0c76d4849d69aa415ead32e46a435622395d68 Signed-off-by: Shawn O. Pearce <spearce@spearce.org>tags/v0.9.3
} | } | ||||
@Override | @Override | ||||
boolean insertUnpackedObject(File tmp, ObjectId objectId, boolean force) { | |||||
return wrapped.insertUnpackedObject(tmp, objectId, force); | |||||
InsertLooseObjectResult insertUnpackedObject(File tmp, ObjectId objectId, | |||||
boolean createDuplicate) { | |||||
InsertLooseObjectResult result = wrapped.insertUnpackedObject(tmp, | |||||
objectId, createDuplicate); | |||||
switch (result) { | |||||
case INSERTED: | |||||
case EXISTS_LOOSE: | |||||
if (!unpackedObjects.contains(objectId)) | |||||
unpackedObjects.add(objectId); | |||||
break; | |||||
case EXISTS_PACKED: | |||||
case FAILURE: | |||||
break; | |||||
} | |||||
return result; | |||||
} | } | ||||
@Override | @Override |
import org.eclipse.jgit.storage.pack.PackWriter; | import org.eclipse.jgit.storage.pack.PackWriter; | ||||
abstract class FileObjectDatabase extends ObjectDatabase { | abstract class FileObjectDatabase extends ObjectDatabase { | ||||
static enum InsertLooseObjectResult { | |||||
INSERTED, EXISTS_PACKED, EXISTS_LOOSE, FAILURE; | |||||
} | |||||
@Override | @Override | ||||
public ObjectReader newReader() { | public ObjectReader newReader() { | ||||
return new WindowCursor(this); | return new WindowCursor(this); | ||||
abstract long getObjectSize2(WindowCursor curs, String objectName, | abstract long getObjectSize2(WindowCursor curs, String objectName, | ||||
AnyObjectId objectId) throws IOException; | AnyObjectId objectId) throws IOException; | ||||
abstract boolean insertUnpackedObject(File tmp, ObjectId id, boolean force); | |||||
abstract InsertLooseObjectResult insertUnpackedObject(File tmp, | |||||
ObjectId id, boolean createDuplicate); | |||||
abstract FileObjectDatabase newCachedFileObjectDatabase(); | abstract FileObjectDatabase newCachedFileObjectDatabase(); | ||||
} | } | ||||
@Override | @Override | ||||
boolean insertUnpackedObject(File tmp, ObjectId id, boolean force) { | |||||
if (!force && has(id)) { | |||||
// Object is already in the repository, remove temporary file. | |||||
// | |||||
InsertLooseObjectResult insertUnpackedObject(File tmp, ObjectId id, | |||||
boolean createDuplicate) { | |||||
// If the object is already in the repository, remove temporary file. | |||||
// | |||||
if (unpackedObjectCache.isUnpacked(id)) { | |||||
tmp.delete(); | tmp.delete(); | ||||
return true; | |||||
return InsertLooseObjectResult.EXISTS_LOOSE; | |||||
} | } | ||||
if (!createDuplicate && has(id)) { | |||||
tmp.delete(); | |||||
return InsertLooseObjectResult.EXISTS_PACKED; | |||||
} | |||||
tmp.setReadOnly(); | tmp.setReadOnly(); | ||||
final File dst = fileFor(id); | final File dst = fileFor(id); | ||||
if (force && dst.exists()) { | |||||
if (dst.exists()) { | |||||
// We want to be extra careful and avoid replacing an object | |||||
// that already exists. We can't be sure renameTo() would | |||||
// fail on all platforms if dst exists, so we check first. | |||||
// | |||||
tmp.delete(); | tmp.delete(); | ||||
return true; | |||||
return InsertLooseObjectResult.EXISTS_LOOSE; | |||||
} | } | ||||
if (tmp.renameTo(dst)) { | if (tmp.renameTo(dst)) { | ||||
unpackedObjectCache.add(id); | unpackedObjectCache.add(id); | ||||
return true; | |||||
return InsertLooseObjectResult.INSERTED; | |||||
} | } | ||||
// Maybe the directory doesn't exist yet as the object | // Maybe the directory doesn't exist yet as the object | ||||
dst.getParentFile().mkdir(); | dst.getParentFile().mkdir(); | ||||
if (tmp.renameTo(dst)) { | if (tmp.renameTo(dst)) { | ||||
unpackedObjectCache.add(id); | unpackedObjectCache.add(id); | ||||
return true; | |||||
return InsertLooseObjectResult.INSERTED; | |||||
} | } | ||||
if (!force && has(id)) { | |||||
if (!createDuplicate && has(id)) { | |||||
tmp.delete(); | tmp.delete(); | ||||
return true; | |||||
return InsertLooseObjectResult.EXISTS_PACKED; | |||||
} | } | ||||
// The object failed to be renamed into its proper | // The object failed to be renamed into its proper | ||||
// fail. | // fail. | ||||
// | // | ||||
tmp.delete(); | tmp.delete(); | ||||
return false; | |||||
return InsertLooseObjectResult.FAILURE; | |||||
} | } | ||||
boolean tryAgain1() { | boolean tryAgain1() { |
final MessageDigest md = digest(); | final MessageDigest md = digest(); | ||||
final File tmp = toTemp(md, type, len, is); | final File tmp = toTemp(md, type, len, is); | ||||
final ObjectId id = ObjectId.fromRaw(md.digest()); | final ObjectId id = ObjectId.fromRaw(md.digest()); | ||||
if (db.insertUnpackedObject(tmp, id, false /* no duplicate */)) | |||||
switch (db.insertUnpackedObject(tmp, id, false /* no duplicate */)) { | |||||
case INSERTED: | |||||
case EXISTS_PACKED: | |||||
case EXISTS_LOOSE: | |||||
return id; | return id; | ||||
case FAILURE: | |||||
default: | |||||
break; | |||||
} | |||||
final File dst = db.fileFor(id); | final File dst = db.fileFor(id); | ||||
throw new ObjectWritingException("Unable to create new object: " + dst); | throw new ObjectWritingException("Unable to create new object: " + dst); | ||||
} | } |