]> source.dussan.org Git - jgit.git/commitdiff
Always send refs' objects despite "filter" in pack 48/126048/2
authorJonathan Tan <jonathantanmy@google.com>
Thu, 12 Jul 2018 17:58:28 +0000 (10:58 -0700)
committerJonathan Nieder <jrn@google.com>
Sat, 21 Jul 2018 00:09:05 +0000 (17:09 -0700)
In a0c9016abd ("upload-pack: send refs' objects despite "filter"",
2018-07-09), Git updated the "filter" option in the fetch-pack
upload-pack protocol to not filter objects explicitly specified in
"want" lines, even if they match the criterion of the filter. Update
JGit to match that behavior.

Change-Id: Ia4d74326edb89e61062e397e05483298c50f9232
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
org.eclipse.jgit.test/tst/org/eclipse/jgit/transport/UploadPackTest.java
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/PackWriter.java

index ef083da183e589314b17cf70c1dd61ee8e812dfe..2a3e1ca56dd7f449876dbae762dc368ec694fd87 100644 (file)
@@ -226,6 +226,44 @@ public class UploadPackTest {
                }
        }
 
+       @Test
+       public void testFetchExplicitBlobWithFilter() throws Exception {
+               InMemoryRepository server2 = newRepo("server2");
+               TestRepository<InMemoryRepository> remote2 =
+                               new TestRepository<>(server2);
+               RevBlob blob1 = remote2.blob("foobar");
+               RevBlob blob2 = remote2.blob("fooba");
+               RevTree tree = remote2.tree(remote2.file("1", blob1),
+                               remote2.file("2", blob2));
+               RevCommit commit = remote2.commit(tree);
+               remote2.update("master", commit);
+               remote2.update("a_blob", blob1);
+
+               server2.getConfig().setBoolean("uploadpack", null, "allowfilter", true);
+
+               testProtocol = new TestProtocol<>(
+                               new UploadPackFactory<Object>() {
+                                       @Override
+                                       public UploadPack create(Object req, Repository db)
+                                                       throws ServiceNotEnabledException,
+                                                       ServiceNotAuthorizedException {
+                                               UploadPack up = new UploadPack(db);
+                                               return up;
+                                       }
+                               }, null);
+               uri = testProtocol.register(ctx, server2);
+
+               try (Transport tn = testProtocol.open(uri, client, "server2")) {
+                       tn.setFilterBlobLimit(0);
+                       tn.fetch(NullProgressMonitor.INSTANCE, Arrays.asList(
+                                               new RefSpec(commit.name()),
+                                               new RefSpec(blob1.name())));
+                       assertTrue(client.hasObject(tree.toObjectId()));
+                       assertTrue(client.hasObject(blob1.toObjectId()));
+                       assertFalse(client.hasObject(blob2.toObjectId()));
+               }
+       }
+
        @Test
        public void testFetchWithBlobLimitFilter() throws Exception {
                InMemoryRepository server2 = newRepo("server2");
@@ -261,6 +299,47 @@ public class UploadPackTest {
                }
        }
 
+       @Test
+       public void testFetchExplicitBlobWithFilterAndBitmaps() throws Exception {
+               InMemoryRepository server2 = newRepo("server2");
+               TestRepository<InMemoryRepository> remote2 =
+                               new TestRepository<>(server2);
+               RevBlob blob1 = remote2.blob("foobar");
+               RevBlob blob2 = remote2.blob("fooba");
+               RevTree tree = remote2.tree(remote2.file("1", blob1),
+                               remote2.file("2", blob2));
+               RevCommit commit = remote2.commit(tree);
+               remote2.update("master", commit);
+               remote2.update("a_blob", blob1);
+
+               server2.getConfig().setBoolean("uploadpack", null, "allowfilter", true);
+
+               // generate bitmaps
+               new DfsGarbageCollector(server2).pack(null);
+               server2.scanForRepoChanges();
+
+               testProtocol = new TestProtocol<>(
+                               new UploadPackFactory<Object>() {
+                                       @Override
+                                       public UploadPack create(Object req, Repository db)
+                                                       throws ServiceNotEnabledException,
+                                                       ServiceNotAuthorizedException {
+                                               UploadPack up = new UploadPack(db);
+                                               return up;
+                                       }
+                               }, null);
+               uri = testProtocol.register(ctx, server2);
+
+               try (Transport tn = testProtocol.open(uri, client, "server2")) {
+                       tn.setFilterBlobLimit(0);
+                       tn.fetch(NullProgressMonitor.INSTANCE, Arrays.asList(
+                                               new RefSpec(commit.name()),
+                                               new RefSpec(blob1.name())));
+                       assertTrue(client.hasObject(blob1.toObjectId()));
+                       assertFalse(client.hasObject(blob2.toObjectId()));
+               }
+       }
+
        @Test
        public void testFetchWithBlobLimitFilterAndBitmaps() throws Exception {
                InMemoryRepository server2 = newRepo("server2");
index 36d6f0aebc562a9bd140b1b589fcfd01cb633a80..24af8a73ba0377479a1c7fed0f36d29a516f3d20 100644 (file)
@@ -1970,7 +1970,7 @@ public class PackWriter implements AutoCloseable {
                                byte[] pathBuf = walker.getPathBuffer();
                                int pathLen = walker.getPathLength();
                                bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
-                               filterAndAddObject(o, o.getType(), pathHash);
+                               filterAndAddObject(o, o.getType(), pathHash, want);
                                countingMonitor.update(1);
                        }
                } else {
@@ -1980,7 +1980,7 @@ public class PackWriter implements AutoCloseable {
                                        continue;
                                if (exclude(o))
                                        continue;
-                               filterAndAddObject(o, o.getType(), walker.getPathHashCode());
+                               filterAndAddObject(o, o.getType(), walker.getPathHashCode(), want);
                                countingMonitor.update(1);
                        }
                }
@@ -2013,7 +2013,7 @@ public class PackWriter implements AutoCloseable {
                                needBitmap.remove(objectId);
                                continue;
                        }
-                       filterAndAddObject(objectId, obj.getType(), 0);
+                       filterAndAddObject(objectId, obj.getType(), 0, want);
                }
 
                if (thin)
@@ -2075,12 +2075,14 @@ public class PackWriter implements AutoCloseable {
        // Adds the given object as an object to be packed, first performing
        // filtering on blobs at or exceeding a given size.
        private void filterAndAddObject(@NonNull AnyObjectId src, int type,
-                       int pathHashCode) throws IOException {
+                       int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
+                       throws IOException {
 
                // Check if this object needs to be rejected, doing the cheaper
                // checks first.
                boolean reject = filterBlobLimit >= 0 &&
                        type == OBJ_BLOB &&
+                       !want.contains(src) &&
                        reader.getObjectSize(src, OBJ_BLOB) > filterBlobLimit;
                if (!reject) {
                        addObject(src, type, pathHashCode);