hasOnlyObjectIds("4624442d68ee402a94364191085b77137618633e",
"f900c8326a43303685c46b279b9f70411bff1a4b"));
assertEquals(13000, request.getFilterSpec().getBlobLimit());
+ assertEquals(-1, request.getFilterSpec().getTreeDepthLimit());
}
}
ConfigBuilder.start().allowFilter().done());
FetchV2Request request = parser.parseFetchRequest(pckIn);
assertEquals(0, request.getFilterSpec().getBlobLimit());
+ assertEquals(-1, request.getFilterSpec().getTreeDepthLimit());
}
@Test
ConfigBuilder.start().allowFilter().done());
FetchV2Request request = parser.parseFetchRequest(pckIn);
assertEquals(15, request.getFilterSpec().getBlobLimit());
+ assertEquals(-1, request.getFilterSpec().getTreeDepthLimit());
+ }
+
+ @Test
+ public void testFetchWithTreeDepthFilter() throws IOException {
+ PacketLineIn pckIn = formatAsPacketLine(PacketLineIn.DELIM,
+ "filter tree:3",
+ PacketLineIn.END);
+ ProtocolV2Parser parser = new ProtocolV2Parser(
+ ConfigBuilder.start().allowFilter().done());
+ FetchV2Request request = parser.parseFetchRequest(pckIn);
+ assertEquals(-1, request.getFilterSpec().getBlobLimit());
+ assertEquals(3, request.getFilterSpec().getTreeDepthLimit());
}
@Test
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.StringWriter;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import org.eclipse.jgit.dircache.DirCache;
+import org.eclipse.jgit.dircache.DirCacheBuilder;
+import org.eclipse.jgit.dircache.DirCacheEntry;
import org.eclipse.jgit.errors.PackProtocolException;
import org.eclipse.jgit.errors.TransportException;
import org.eclipse.jgit.internal.storage.dfs.DfsGarbageCollector;
import org.eclipse.jgit.internal.storage.dfs.InMemoryRepository;
import org.eclipse.jgit.junit.TestRepository;
import org.eclipse.jgit.lib.NullProgressMonitor;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
import org.eclipse.jgit.lib.PersonIdent;
import org.eclipse.jgit.lib.ProgressMonitor;
import org.eclipse.jgit.lib.Ref;
assertTrue(client.getObjectDatabase().has(small.toObjectId()));
}
+ abstract class TreeBuilder {
+ abstract void addElements(DirCacheBuilder dcBuilder) throws Exception;
+
+ RevTree build() throws Exception {
+ DirCache dc = DirCache.newInCore();
+ DirCacheBuilder dcBuilder = dc.builder();
+ addElements(dcBuilder);
+ dcBuilder.finish();
+ ObjectId id;
+ try (ObjectInserter ins =
+ remote.getRepository().newObjectInserter()) {
+ id = dc.writeTree(ins);
+ ins.flush();
+ }
+ return remote.getRevWalk().parseTree(id);
+ }
+ }
+
+ class DeepTreePreparator {
+ RevBlob blobLowDepth = remote.blob("lo");
+ RevBlob blobHighDepth = remote.blob("hi");
+
+ RevTree subtree = remote.tree(remote.file("1", blobHighDepth));
+ RevTree rootTree = (new TreeBuilder() {
+ @Override
+ void addElements(DirCacheBuilder dcBuilder) throws Exception {
+ dcBuilder.add(remote.file("1", blobLowDepth));
+ dcBuilder.addTree(new byte[] {'2'}, DirCacheEntry.STAGE_0,
+ remote.getRevWalk().getObjectReader(), subtree);
+ }
+ }).build();
+ RevCommit commit = remote.commit(rootTree);
+
+ DeepTreePreparator() throws Exception {}
+ }
+
+ private void uploadV2WithTreeDepthFilter(
+ long depth, ObjectId... wants) throws Exception {
+ server.getConfig().setBoolean("uploadpack", null, "allowfilter", true);
+
+ List<String> input = new ArrayList();
+ input.add("command=fetch\n");
+ input.add(PacketLineIn.DELIM);
+ for (ObjectId want : wants) {
+ input.add("want " + want.getName() + "\n");
+ }
+ input.add("filter tree:" + depth + "\n");
+ input.add("done\n");
+ input.add(PacketLineIn.END);
+ ByteArrayInputStream recvStream =
+ uploadPackV2(RequestPolicy.ANY, /*refFilter=*/null,
+ /*hook=*/null, input.toArray(new String[0]));
+ PacketLineIn pckIn = new PacketLineIn(recvStream);
+ assertThat(pckIn.readString(), is("packfile"));
+ parsePack(recvStream);
+ }
+
+ @Test
+ public void testV2FetchFilterTreeDepth0() throws Exception {
+ DeepTreePreparator preparator = new DeepTreePreparator();
+ remote.update("master", preparator.commit);
+
+ uploadV2WithTreeDepthFilter(0, preparator.commit.toObjectId());
+
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.rootTree.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.subtree.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.blobLowDepth.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.blobHighDepth.toObjectId()));
+ }
+
+ @Test
+ public void testV2FetchFilterTreeDepth1_serverHasBitmap() throws Exception {
+ DeepTreePreparator preparator = new DeepTreePreparator();
+ remote.update("master", preparator.commit);
+
+ // The bitmap should be ignored since we need to track the depth while
+ // traversing the trees.
+ generateBitmaps(server);
+
+ uploadV2WithTreeDepthFilter(1, preparator.commit.toObjectId());
+
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.rootTree.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.subtree.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.blobLowDepth.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.blobHighDepth.toObjectId()));
+ }
+
+ @Test
+ public void testV2FetchFilterTreeDepth2() throws Exception {
+ DeepTreePreparator preparator = new DeepTreePreparator();
+ remote.update("master", preparator.commit);
+
+ uploadV2WithTreeDepthFilter(2, preparator.commit.toObjectId());
+
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.rootTree.toObjectId()));
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.subtree.toObjectId()));
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.blobLowDepth.toObjectId()));
+ assertFalse(client.getObjectDatabase()
+ .has(preparator.blobHighDepth.toObjectId()));
+ }
+
+ /**
+ * Creates a commit with the following files:
+ * <pre>
+ * a/x/b/foo
+ * x/b/foo
+ * </pre>
+ * which has an identical tree in two locations: once at / and once at /a
+ */
+ class RepeatedSubtreePreparator {
+ RevBlob foo = remote.blob("foo");
+ RevTree subtree3 = remote.tree(remote.file("foo", foo));
+ RevTree subtree2 = (new TreeBuilder() {
+ @Override
+ void addElements(DirCacheBuilder dcBuilder) throws Exception {
+ dcBuilder.addTree(new byte[] {'b'}, DirCacheEntry.STAGE_0,
+ remote.getRevWalk().getObjectReader(), subtree3);
+ }
+ }).build();
+ RevTree subtree1 = (new TreeBuilder() {
+ @Override
+ void addElements(DirCacheBuilder dcBuilder) throws Exception {
+ dcBuilder.addTree(new byte[] {'x'}, DirCacheEntry.STAGE_0,
+ remote.getRevWalk().getObjectReader(), subtree2);
+ }
+ }).build();
+ RevTree rootTree = (new TreeBuilder() {
+ @Override
+ void addElements(DirCacheBuilder dcBuilder) throws Exception {
+ dcBuilder.addTree(new byte[] {'a'}, DirCacheEntry.STAGE_0,
+ remote.getRevWalk().getObjectReader(), subtree1);
+ dcBuilder.addTree(new byte[] {'x'}, DirCacheEntry.STAGE_0,
+ remote.getRevWalk().getObjectReader(), subtree2);
+ }
+ }).build();
+ RevCommit commit = remote.commit(rootTree);
+
+ RepeatedSubtreePreparator() throws Exception {}
+ }
+
+ @Test
+ public void testV2FetchFilterTreeDepth_iterateOverTreeAtTwoLevels()
+ throws Exception {
+ // Test tree:<depth> where a tree is iterated to twice - once where a
+ // subentry is too deep to be included, and again where the blob inside
+ // it is shallow enough to be included.
+ RepeatedSubtreePreparator preparator = new RepeatedSubtreePreparator();
+ remote.update("master", preparator.commit);
+
+ uploadV2WithTreeDepthFilter(4, preparator.commit.toObjectId());
+
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.foo.toObjectId()));
+ }
+
+ @Test
+ public void testWantFilteredObject() throws Exception {
+ RepeatedSubtreePreparator preparator = new RepeatedSubtreePreparator();
+ remote.update("master", preparator.commit);
+
+ // Specify wanted blob objects that are deep enough to be filtered. We
+ // should still upload them.
+ uploadV2WithTreeDepthFilter(
+ 3,
+ preparator.commit.toObjectId(),
+ preparator.foo.toObjectId());
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.foo.toObjectId()));
+
+ client = newRepo("client");
+ // Specify a wanted tree object that is deep enough to be filtered. We
+ // should still upload it.
+ uploadV2WithTreeDepthFilter(
+ 2,
+ preparator.commit.toObjectId(),
+ preparator.subtree3.toObjectId());
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.foo.toObjectId()));
+ assertTrue(client.getObjectDatabase()
+ .has(preparator.subtree3.toObjectId()));
+ }
+
@Test
public void testV2FetchFilterWhenNotAllowed() throws Exception {
RevCommit commit = remote.commit().message("0").create();
: new ObjectWalk(reader);
}
+ /**
+ * A visitation policy which causes objects to be visited repeatedly by
+ * making {@code shouldVisit} always return {@code true}.
+ */
+ private static final ObjectWalk.VisitationPolicy ALWAYS_VISIT_POLICY =
+ new ObjectWalk.VisitationPolicy() {
+ @Override
+ public boolean shouldVisit(RevObject o) {
+ return true;
+ }
+
+ @Override
+ public void visited(RevObject o) {}
+ };
+
/**
* Prepare the list of objects to be written to the pack stream.
* <p>
if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
throw new IllegalArgumentException(
JGitText.get().shallowPacksRequireDepthWalk);
+ if (filterSpec.getTreeDepthLimit() >= 0) {
+ walk.setVisitationPolicy(ALWAYS_VISIT_POLICY);
+ }
findObjectsToPack(countingMonitor, walk, interestingObjects,
uninterestingObjects, noBitmaps);
}
byte[] pathBuf = walker.getPathBuffer();
int pathLen = walker.getPathLength();
bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
- filterAndAddObject(o, o.getType(), pathHash, want);
+ if (!depthSkip(o, walker)) {
+ filterAndAddObject(o, o.getType(), pathHash, want);
+ }
countingMonitor.update(1);
}
} else {
continue;
if (exclude(o))
continue;
- filterAndAddObject(o, o.getType(), walker.getPathHashCode(), want);
+ if (!depthSkip(o, walker)) {
+ filterAndAddObject(o, o.getType(), walker.getPathHashCode(),
+ want);
+ }
countingMonitor.update(1);
}
}
objectsMap.add(otp);
}
+ /**
+ * Determines if the object should be omitted from the pack as a result of
+ * its depth (probably because of the tree:<depth> filter).
+ *
+ * @param obj
+ * the object to check whether it should be omitted.
+ * @param walker
+ * the walker being used for traveresal.
+ * @return whether the given object should be skipped.
+ */
+ private boolean depthSkip(@NonNull RevObject obj, ObjectWalk walker) {
+ long treeDepth = walker.getTreeDepth();
+
+ // Check if this object needs to be rejected because it is a tree or
+ // blob that is too deep from the root tree.
+
+ // A blob is considered one level deeper than the tree that contains it.
+ if (obj.getType() == OBJ_BLOB) {
+ treeDepth++;
+ }
+
+ // TODO: Do not continue traversing the tree, since its children
+ // will also be too deep.
+ return filterSpec.getTreeDepthLimit() != -1 &&
+ treeDepth > filterSpec.getTreeDepthLimit();
+ }
+
// Adds the given object as an object to be packed, first performing
// filtering on blobs at or exceeding a given size.
private void filterAndAddObject(@NonNull AnyObjectId src, int type,
package org.eclipse.jgit.revwalk;
+import static java.util.Objects.requireNonNull;
import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
*/
private static final int IN_PENDING = RevWalk.REWRITE;
+ /**
+ * When walking over a tree and blob graph, objects are usually marked as
+ * seen as they are visited and this "seen" status is checked upon the next
+ * visit. If they are already "seen" then they are not processed (returned
+ * by {@link ObjectWalk#nextObject()}) again. However, this behavior can be
+ * overridden by supplying a different implementation of this class.
+ *
+ * @since 5.4
+ */
+ public interface VisitationPolicy {
+ /**
+ * Whenever the rev or object walk reaches a Git object, if that object
+ * already exists as a RevObject, this method is called to determine if
+ * that object should be visited.
+ *
+ * @param o
+ * the object to check if it should be visited
+ * @return true if the object should be visited
+ */
+ boolean shouldVisit(RevObject o);
+
+ /**
+ * Called when an object is visited.
+ *
+ * @param o
+ * the object that was visited
+ */
+ void visited(RevObject o);
+ }
+
+ /**
+ * The default visitation policy: causes all objects to be visited exactly
+ * once.
+ *
+ * @since 5.4
+ */
+ public static final VisitationPolicy SIMPLE_VISITATION_POLICY =
+ new VisitationPolicy() {
+ @Override
+ public boolean shouldVisit(RevObject o) {
+ return (o.flags & SEEN) == 0;
+ }
+
+ @Override
+ public void visited(RevObject o) {
+ o.flags |= SEEN;
+ }
+ };
+
private List<RevObject> rootObjects;
private BlockObjQueue pendingObjects;
private boolean boundary;
+ private VisitationPolicy visitationPolicy = SIMPLE_VISITATION_POLICY;
+
/**
* Create a new revision and object walker for a given repository.
*
objectFilter = newFilter != null ? newFilter : ObjectFilter.ALL;
}
+ /**
+ * Sets the visitation policy to use during this walk.
+ *
+ * @param policy
+ * the {@code VisitationPolicy} to use
+ * @since 5.4
+ */
+ public void setVisitationPolicy(VisitationPolicy policy) {
+ assertNotStarted();
+ visitationPolicy = requireNonNull(policy);
+ }
+
/** {@inheritDoc} */
@Override
public RevCommit next() throws MissingObjectException,
}
RevObject obj = objects.get(idBuffer);
- if (obj != null && (obj.flags & SEEN) != 0)
+ if (obj != null && !visitationPolicy.shouldVisit(obj))
continue;
int mode = parseMode(buf, startPtr, ptr, tv);
- int flags;
switch (mode >>> TYPE_SHIFT) {
case TYPE_FILE:
case TYPE_SYMLINK:
if (obj == null) {
obj = new RevBlob(idBuffer);
- obj.flags = SEEN;
+ visitationPolicy.visited(obj);
objects.add(obj);
return obj;
}
if (!(obj instanceof RevBlob))
throw new IncorrectObjectTypeException(obj, OBJ_BLOB);
- obj.flags = flags = obj.flags | SEEN;
- if ((flags & UNINTERESTING) == 0)
+ visitationPolicy.visited(obj);
+ if ((obj.flags & UNINTERESTING) == 0)
return obj;
if (boundary)
return obj;
case TYPE_TREE:
if (obj == null) {
obj = new RevTree(idBuffer);
- obj.flags = SEEN;
+ visitationPolicy.visited(obj);
objects.add(obj);
return pushTree(obj);
}
if (!(obj instanceof RevTree))
throw new IncorrectObjectTypeException(obj, OBJ_TREE);
- obj.flags = flags = obj.flags | SEEN;
- if ((flags & UNINTERESTING) == 0)
+ visitationPolicy.visited(obj);
+ if ((obj.flags & UNINTERESTING) == 0)
return pushTree(obj);
if (boundary)
return pushTree(obj);
if (o == null) {
return null;
}
- int flags = o.flags;
- if ((flags & SEEN) != 0)
+ if (!visitationPolicy.shouldVisit(o)) {
continue;
- flags |= SEEN;
- o.flags = flags;
- if ((flags & UNINTERESTING) == 0 | boundary) {
+ }
+ visitationPolicy.visited(o);
+ if ((o.flags & UNINTERESTING) == 0 | boundary) {
if (o instanceof RevTree) {
// The previous while loop should have exhausted the stack
// of trees.
return RawParseUtils.decode(pathBuf, 0, pathLen);
}
+ /**
+ * @return the current traversal depth from the root tree object
+ * @since 5.4
+ */
+ public int getTreeDepth() {
+ if (currVisit == null) {
+ return 0;
+ }
+ return currVisit.depth;
+ }
+
/**
* Get the current object's path hash code.
* <p>
tv.buf = reader.open(obj, OBJ_TREE).getCachedBytes();
tv.parent = currVisit;
currVisit = tv;
+ if (tv.parent == null) {
+ tv.depth = 1;
+ } else {
+ tv.depth = tv.parent.depth + 1;
+ }
return obj;
}
/** Number of bytes in the path leading up to this tree. */
int pathLen;
+
+ /** Number of levels deep from the root tree. 0 for root tree. */
+ int depth;
}
}
private final long blobLimit;
- private FilterSpec(long blobLimit) {
+ private final long treeDepthLimit;
+
+ private FilterSpec(long blobLimit, long treeDepthLimit) {
this.blobLimit = blobLimit;
+ this.treeDepthLimit = treeDepthLimit;
}
/**
* Process the content of "filter" line from the protocol. It has a shape
- * like "blob:none" or "blob:limit=N", with limit a positive number.
+ * like:
+ *
+ * <ul>
+ * <li>"blob:none"
+ * <li>"blob:limit=N", with N >= 0
+ * <li>"tree:DEPTH", with DEPTH >= 0
+ * </ul>
*
* @param filterLine
* the content of the "filter" line in the protocol
*/
public static FilterSpec fromFilterLine(String filterLine)
throws PackProtocolException {
- long blobLimit = -1;
-
if (filterLine.equals("blob:none")) { //$NON-NLS-1$
- blobLimit = 0;
+ return FilterSpec.withBlobLimit(0);
} else if (filterLine.startsWith("blob:limit=")) { //$NON-NLS-1$
+ long blobLimit = -1;
try {
blobLimit = Long
.parseLong(filterLine.substring("blob:limit=".length())); //$NON-NLS-1$
} catch (NumberFormatException e) {
- throw new PackProtocolException(MessageFormat
- .format(JGitText.get().invalidFilter, filterLine));
+ // Do not change blobLimit so that we throw a
+ // PackProtocolException later.
+ }
+ if (blobLimit >= 0) {
+ return FilterSpec.withBlobLimit(blobLimit);
+ }
+ } else if (filterLine.startsWith("tree:")) { //$NON-NLS-1$
+ long treeDepthLimit = -1;
+ try {
+ treeDepthLimit = Long
+ .parseLong(filterLine.substring("tree:".length())); //$NON-NLS-1$
+ } catch (NumberFormatException e) {
+ // Do not change blobLimit so that we throw a
+ // PackProtocolException later.
+ }
+ if (treeDepthLimit >= 0) {
+ return FilterSpec.withTreeDepthLimit(treeDepthLimit);
}
- }
- /*
- * We must have (1) either "blob:none" or "blob:limit=" set (because we
- * only support blob size limits for now), and (2) if the latter, then
- * it must be nonnegative. Throw if (1) or (2) is not met.
- */
- if (blobLimit < 0) {
- throw new PackProtocolException(
- MessageFormat.format(
- JGitText.get().invalidFilter, filterLine));
}
- return new FilterSpec(blobLimit);
+ // Did not match any known filter format.
+ throw new PackProtocolException(
+ MessageFormat.format(JGitText.get().invalidFilter, filterLine));
}
/**
throw new IllegalArgumentException(
"blobLimit cannot be negative: " + blobLimit); //$NON-NLS-1$
}
- return new FilterSpec(blobLimit);
+ return new FilterSpec(blobLimit, -1);
+ }
+
+ /**
+ * @param treeDepthLimit
+ * the tree depth limit in a "tree:[depth]" filter line
+ * @return a filter spec which filters blobs and trees beyond a certain tree
+ * depth
+ */
+ static FilterSpec withTreeDepthLimit(long treeDepthLimit) {
+ if (treeDepthLimit < 0) {
+ throw new IllegalArgumentException(
+ "treeDepthLimit cannot be negative: " + treeDepthLimit); //$NON-NLS-1$
+ }
+ return new FilterSpec(-1, treeDepthLimit);
}
/**
* A placeholder that indicates no filtering.
*/
- public static final FilterSpec NO_FILTER = new FilterSpec(-1);
+ public static final FilterSpec NO_FILTER = new FilterSpec(-1, -1);
/**
* @return -1 if this filter does not filter blobs based on size, or a
return blobLimit;
}
+ /**
+ * @return -1 if this filter does not filter blobs and trees based on depth,
+ * or a non-negative integer representing the max tree depth of
+ * blobs and trees to fetch
+ */
+ public long getTreeDepthLimit() {
+ return treeDepthLimit;
+ }
+
/**
* @return true if this filter doesn't filter out anything
*/
public boolean isNoOp() {
- return blobLimit == -1;
+ return blobLimit == -1 && treeDepthLimit == -1;
}
/**
}
pw.setUseBitmaps(
req.getDepth() == 0
- && req.getClientShallowCommits().isEmpty());
+ && req.getClientShallowCommits().isEmpty()
+ && req.getFilterSpec().getTreeDepthLimit() == -1);
pw.setClientShallowCommits(req.getClientShallowCommits());
pw.setReuseDeltaCommits(true);
pw.setDeltaBaseAsOffset(