]> source.dussan.org Git - jgit.git/commitdiff
Implement atomic BatchRefUpdates for RefDirectory 71/100771/9
authorDave Borowitz <dborowitz@google.com>
Wed, 5 Jul 2017 17:45:39 +0000 (13:45 -0400)
committerDave Borowitz <dborowitz@google.com>
Tue, 25 Jul 2017 17:14:50 +0000 (13:14 -0400)
The existing packed-refs file provides a mechanism for implementing
atomic multi-ref updates without any changes to the on-disk format or
lockfile protocol. We just need to make sure that there are no loose
refs involved in the transaction, which we can achieve by packing the
refs while holding locks on all loose refs. Full details of the
algorithm are in the PackedBatchRefUpdate javadoc.

This change does not implement reflog support, which will come in a
later change.

Change-Id: I09829544a0d4e8dbb141d28c748c3b96ef66fee1

org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/file/RefDirectoryTest.java
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/LockFile.java
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackedBatchRefUpdate.java [new file with mode: 0644]
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/RefDirectory.java
org.eclipse.jgit/src/org/eclipse/jgit/lib/BatchRefUpdate.java
org.eclipse.jgit/src/org/eclipse/jgit/transport/ReceiveCommand.java

index 29f4a577a79a12bec560f64e10db028dec5f1a68..f5d04cd8c1e78f3929e15b67a8fb2b029796da84 100644 (file)
@@ -1294,7 +1294,7 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateSimpleNoForce() throws IOException {
+       public void testBatchRefUpdateSimpleNoForceNonAtomic() throws IOException {
                writeLooseRef("refs/heads/master", A);
                writeLooseRef("refs/heads/masters", B);
                List<ReceiveCommand> commands = Arrays.asList(
@@ -1303,6 +1303,7 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                                new ReceiveCommand(B, A, "refs/heads/masters",
                                                ReceiveCommand.Type.UPDATE_NONFASTFORWARD));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
                batchUpdate.addCommand(commands);
                batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
                Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
@@ -1316,7 +1317,7 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateSimpleForce() throws IOException {
+       public void testBatchRefUpdateSimpleNoForceAtomic() throws IOException {
                writeLooseRef("refs/heads/master", A);
                writeLooseRef("refs/heads/masters", B);
                List<ReceiveCommand> commands = Arrays.asList(
@@ -1325,6 +1326,39 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                                new ReceiveCommand(B, A, "refs/heads/masters",
                                                ReceiveCommand.Type.UPDATE_NONFASTFORWARD));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(0)));
+               assertEquals(ReceiveCommand.Result.REJECTED_NONFASTFORWARD, commands
+                               .get(1).getResult());
+               assertEquals("[HEAD, refs/heads/master, refs/heads/masters]", refs
+                               .keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+               assertEquals(B.getId(), refs.get("refs/heads/masters").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateSimpleForceNonAtomic() throws IOException {
+               testBatchRefUpdateSimpleForce(false);
+       }
+
+       @Test
+       public void testBatchRefUpdateSimpleForceAtomic() throws IOException {
+               testBatchRefUpdateSimpleForce(true);
+       }
+
+       private void testBatchRefUpdateSimpleForce(boolean atomic) throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               writeLooseRef("refs/heads/masters", B);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(B, A, "refs/heads/masters",
+                                               ReceiveCommand.Type.UPDATE_NONFASTFORWARD));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(atomic);
                batchUpdate.setAllowNonFastForwards(true);
                batchUpdate.addCommand(commands);
                batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
@@ -1338,13 +1372,25 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheck()
+       public void testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheckNonAtomic()
                        throws IOException {
+               testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheck(false);
+       }
+
+       @Test
+       public void testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheckAtomic()
+                       throws IOException {
+               testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheck(true);
+       }
+
+       private void testBatchRefUpdateNonFastForwardDoesNotDoExpensiveMergeCheck(
+                       boolean atomic) throws IOException {
                writeLooseRef("refs/heads/master", B);
                List<ReceiveCommand> commands = Arrays.asList(
                                new ReceiveCommand(B, A, "refs/heads/master",
                                                ReceiveCommand.Type.UPDATE_NONFASTFORWARD));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(atomic);
                batchUpdate.setAllowNonFastForwards(true);
                batchUpdate.addCommand(commands);
                batchUpdate.execute(new RevWalk(diskRepo) {
@@ -1359,7 +1405,8 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateConflict() throws IOException {
+       public void testBatchRefUpdateFileDirectoryConflictNonAtomic()
+                       throws IOException {
                writeLooseRef("refs/heads/master", A);
                writeLooseRef("refs/heads/masters", B);
                List<ReceiveCommand> commands = Arrays.asList(
@@ -1370,11 +1417,14 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                                new ReceiveCommand(zeroId(), A, "refs/heads",
                                                ReceiveCommand.Type.CREATE));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
                batchUpdate.setAllowNonFastForwards(true);
                batchUpdate.addCommand(commands);
                batchUpdate
                                .execute(new RevWalk(diskRepo), NullProgressMonitor.INSTANCE);
                Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               // Non-atomic updates are applied in order: master succeeds, then master/x
+               // fails due to conflict.
                assertEquals(ReceiveCommand.Result.OK, commands.get(0).getResult());
                assertEquals(ReceiveCommand.Result.LOCK_FAILURE, commands.get(1)
                                .getResult());
@@ -1387,7 +1437,39 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateConflictThanksToDelete() throws IOException {
+       public void testBatchRefUpdateFileDirectoryConflictAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               writeLooseRef("refs/heads/masters", B);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), A, "refs/heads/master/x",
+                                               ReceiveCommand.Type.CREATE),
+                               new ReceiveCommand(zeroId(), A, "refs/heads",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate
+                               .execute(new RevWalk(diskRepo), NullProgressMonitor.INSTANCE);
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               // Atomic update sees that master and master/x are conflicting, then marks
+               // the first one in the list as LOCK_FAILURE and aborts the rest.
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(0).getResult());
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(1)));
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(2)));
+               assertEquals("[HEAD, refs/heads/master, refs/heads/masters]", refs
+                               .keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+               assertEquals(B.getId(), refs.get("refs/heads/masters").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateConflictThanksToDeleteNonAtomic()
+                       throws IOException {
                writeLooseRef("refs/heads/master", A);
                writeLooseRef("refs/heads/masters", B);
                List<ReceiveCommand> commands = Arrays.asList(
@@ -1398,6 +1480,7 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                                new ReceiveCommand(B, zeroId(), "refs/heads/masters",
                                                ReceiveCommand.Type.DELETE));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
                batchUpdate.setAllowNonFastForwards(true);
                batchUpdate.addCommand(commands);
                batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
@@ -1411,7 +1494,33 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateUpdateToMissingObject() throws IOException {
+       public void testBatchRefUpdateConflictThanksToDeleteAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               writeLooseRef("refs/heads/masters", B);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), A, "refs/heads/masters/x",
+                                               ReceiveCommand.Type.CREATE),
+                               new ReceiveCommand(B, zeroId(), "refs/heads/masters",
+                                               ReceiveCommand.Type.DELETE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.OK, commands.get(0).getResult());
+               assertEquals(ReceiveCommand.Result.OK, commands.get(1).getResult());
+               assertEquals(ReceiveCommand.Result.OK, commands.get(2).getResult());
+               assertEquals("[HEAD, refs/heads/master, refs/heads/masters/x]", refs
+                               .keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/masters/x").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateUpdateToMissingObjectNonAtomic() throws IOException {
                writeLooseRef("refs/heads/master", A);
                ObjectId bad =
                                ObjectId.fromString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef");
@@ -1436,7 +1545,32 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
        }
 
        @Test
-       public void testBatchRefUpdateAddMissingObject() throws IOException {
+       public void testBatchRefUpdateUpdateToMissingObjectAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               ObjectId bad =
+                               ObjectId.fromString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef");
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, bad, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), B, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), NullProgressMonitor.INSTANCE);
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.REJECTED_MISSING_OBJECT,
+                               commands.get(0).getResult());
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(1)));
+               assertEquals("[HEAD, refs/heads/master]", refs.keySet()
+                               .toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateAddMissingObjectNonAtomic() throws IOException {
                writeLooseRef("refs/heads/master", A);
                ObjectId bad =
                                ObjectId.fromString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef");
@@ -1446,6 +1580,7 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                                new ReceiveCommand(zeroId(), bad, "refs/heads/foo2",
                                                ReceiveCommand.Type.CREATE));
                BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
                batchUpdate.setAllowNonFastForwards(true);
                batchUpdate.addCommand(commands);
                batchUpdate.execute(new RevWalk(diskRepo), NullProgressMonitor.INSTANCE);
@@ -1458,6 +1593,160 @@ public class RefDirectoryTest extends LocalDiskRepositoryTestCase {
                assertEquals(B.getId(), refs.get("refs/heads/master").getObjectId());
        }
 
+       @Test
+       public void testBatchRefUpdateAddMissingObjectAtomic() throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               ObjectId bad =
+                               ObjectId.fromString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef");
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), bad, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), NullProgressMonitor.INSTANCE);
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(0)));
+               assertEquals(ReceiveCommand.Result.REJECTED_MISSING_OBJECT,
+                               commands.get(1).getResult());
+               assertEquals("[HEAD, refs/heads/master]", refs.keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateOneNonExistentRefNonAtomic()
+                       throws IOException {
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/foo1",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), B, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(0).getResult());
+               assertEquals(ReceiveCommand.Result.OK, commands.get(1).getResult());
+               assertEquals("[refs/heads/foo2]", refs.keySet().toString());
+               assertEquals(B.getId(), refs.get("refs/heads/foo2").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateOneNonExistentRefAtomic()
+                       throws IOException {
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/foo1",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), B, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(0).getResult());
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(1)));
+               assertEquals("[]", refs.keySet().toString());
+       }
+
+       @Test
+       public void testBatchRefUpdateOneRefWrongOldValueNonAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(B, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), B, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(0).getResult());
+               assertEquals(ReceiveCommand.Result.OK, commands.get(1).getResult());
+               assertEquals("[HEAD, refs/heads/foo2, refs/heads/master]", refs
+                               .keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+               assertEquals(B.getId(), refs.get("refs/heads/foo2").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefUpdateOneRefWrongOldValueAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(B, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(zeroId(), B, "refs/heads/foo2",
+                                               ReceiveCommand.Type.CREATE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(0).getResult());
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(1)));
+               assertEquals("[HEAD, refs/heads/master]", refs.keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefDeleteNonExistentRefNonAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(A, zeroId(), "refs/heads/foo2",
+                                               ReceiveCommand.Type.DELETE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               batchUpdate.setAtomic(false);
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertEquals(ReceiveCommand.Result.OK, commands.get(0).getResult());
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(1).getResult());
+               assertEquals("[HEAD, refs/heads/master]", refs.keySet().toString());
+               assertEquals(B.getId(), refs.get("refs/heads/master").getObjectId());
+       }
+
+       @Test
+       public void testBatchRefDeleteNonExistentRefAtomic()
+                       throws IOException {
+               writeLooseRef("refs/heads/master", A);
+               List<ReceiveCommand> commands = Arrays.asList(
+                               new ReceiveCommand(A, B, "refs/heads/master",
+                                               ReceiveCommand.Type.UPDATE),
+                               new ReceiveCommand(A, zeroId(), "refs/heads/foo2",
+                                               ReceiveCommand.Type.DELETE));
+               BatchRefUpdate batchUpdate = refdir.newBatchUpdate();
+               assertTrue(batchUpdate.isAtomic());
+               batchUpdate.setAllowNonFastForwards(true);
+               batchUpdate.addCommand(commands);
+               batchUpdate.execute(new RevWalk(diskRepo), new StrictWorkMonitor());
+               Map<String, Ref> refs = refdir.getRefs(RefDatabase.ALL);
+               assertTrue(ReceiveCommand.isTransactionAborted(commands.get(0)));
+               assertEquals(ReceiveCommand.Result.LOCK_FAILURE,
+                               commands.get(1).getResult());
+               assertEquals("[HEAD, refs/heads/master]", refs.keySet().toString());
+               assertEquals(A.getId(), refs.get("refs/heads/master").getObjectId());
+       }
+
        private void writeLooseRef(String name, AnyObjectId id) throws IOException {
                writeLooseRef(name, id.name() + "\n");
        }
index d2fcacf3ea8bf55fb8c39597f31a81bda826bed6..6221cfa4fc9027230784a2a7b3ce35e853077dcd 100644 (file)
@@ -374,7 +374,7 @@ public class LockFile {
                };
        }
 
-       private void requireLock() {
+       void requireLock() {
                if (os == null) {
                        unlock();
                        throw new IllegalStateException(MessageFormat.format(JGitText.get().lockOnNotHeld, ref));
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackedBatchRefUpdate.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/PackedBatchRefUpdate.java
new file mode 100644 (file)
index 0000000..65f9ec4
--- /dev/null
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2017, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.internal.storage.file;
+
+import static java.util.stream.Collectors.toList;
+import static org.eclipse.jgit.transport.ReceiveCommand.Result.LOCK_FAILURE;
+import static org.eclipse.jgit.transport.ReceiveCommand.Result.NOT_ATTEMPTED;
+import static org.eclipse.jgit.transport.ReceiveCommand.Result.REJECTED_NONFASTFORWARD;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.internal.storage.file.RefDirectory.PackedRefList;
+import org.eclipse.jgit.lib.BatchRefUpdate;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectIdRef;
+import org.eclipse.jgit.lib.ProgressMonitor;
+import org.eclipse.jgit.lib.Ref;
+import org.eclipse.jgit.lib.RefDatabase;
+import org.eclipse.jgit.revwalk.RevObject;
+import org.eclipse.jgit.revwalk.RevTag;
+import org.eclipse.jgit.revwalk.RevWalk;
+import org.eclipse.jgit.transport.ReceiveCommand;
+import org.eclipse.jgit.util.RefList;
+
+/**
+ * Implementation of {@link BatchRefUpdate} that uses the {@code packed-refs}
+ * file to support atomically updating multiple refs.
+ * <p>
+ * The algorithm is designed to be compatible with traditional single ref
+ * updates operating on single refs only. Regardless of success or failure, the
+ * results are atomic: from the perspective of any reader, either all updates in
+ * the batch will be visible, or none will. In the case of process failure
+ * during any of the following steps, removal of stale lock files is always
+ * safe, and will never result in an inconsistent state, although the update may
+ * or may not have been applied.
+ * <p>
+ * The algorithm is:
+ * <ol>
+ * <li>Pack loose refs involved in the transaction using the normal pack-refs
+ * operation. This ensures that creating lock files in the following step
+ * succeeds even if a batch contains both a delete of {@code refs/x} (loose) and
+ * a create of {@code refs/x/y}.</li>
+ * <li>Create locks for all loose refs involved in the transaction, even if they
+ * are not currently loose.</li>
+ * <li>Pack loose refs again, this time while holding all lock files (see {@link
+ * RefDirectory#pack(Map)}), without deleting them afterwards. This covers a
+ * potential race where new loose refs were created after the initial packing
+ * step. If no new loose refs were created during this race, this step does not
+ * modify any files on disk. Keep the merged state in memory.</li>
+ * <li>Update the in-memory packed refs with the commands in the batch, possibly
+ * failing the whole batch if any old ref values do not match.</li>
+ * <li>If the update succeeds, lock {@code packed-refs} and commit by atomically
+ * renaming the lock file.</li>
+ * <li>Delete loose ref lock files.</li>
+ * </ol>
+ *
+ * Because the packed-refs file format is a sorted list, this algorithm is
+ * linear in the total number of refs, regardless of the batch size. This can be
+ * a significant slowdown on repositories with large numbers of refs; callers
+ * that prefer speed over atomicity should use {@code setAtomic(false)}. As an
+ * optimization, an update containing a single ref update does not use the
+ * packed-refs protocol.
+ */
+class PackedBatchRefUpdate extends BatchRefUpdate {
+       private RefDirectory refdb;
+
+       PackedBatchRefUpdate(RefDirectory refdb) {
+               super(refdb);
+               this.refdb = refdb;
+       }
+
+       @Override
+       public void execute(RevWalk walk, ProgressMonitor monitor,
+                       List<String> options) throws IOException {
+               if (!isAtomic()) {
+                       // Use default one-by-one implementation.
+                       super.execute(walk, monitor, options);
+                       return;
+               }
+               List<ReceiveCommand> pending =
+                               ReceiveCommand.filter(getCommands(), NOT_ATTEMPTED);
+               if (pending.isEmpty()) {
+                       return;
+               }
+               if (pending.size() == 1) {
+                       // Single-ref updates are always atomic, no need for packed-refs.
+                       super.execute(walk, monitor, options);
+                       return;
+               }
+
+               // Required implementation details copied from super.execute.
+               if (!blockUntilTimestamps(MAX_WAIT)) {
+                       return;
+               }
+               if (options != null) {
+                       setPushOptions(options);
+               }
+               // End required implementation details.
+
+               // Check for conflicting names before attempting to acquire locks, since
+               // lockfile creation may fail on file/directory conflicts.
+               if (!checkConflictingNames(pending)) {
+                       return;
+               }
+
+               if (!checkObjectExistence(walk, pending)) {
+                       return;
+               }
+
+               if (!checkNonFastForwards(walk, pending)) {
+                       return;
+               }
+
+               // Pack refs normally, so we can create lock files even in the case where
+               // refs/x is deleted and refs/x/y is created in this batch.
+               refdb.pack(
+                               pending.stream().map(ReceiveCommand::getRefName).collect(toList()));
+
+               Map<String, LockFile> locks = new HashMap<>();
+               try {
+                       if (!lockLooseRefs(pending, locks)) {
+                               return;
+                       }
+                       PackedRefList oldPackedList = refdb.pack(locks);
+                       RefList<Ref> newRefs = applyUpdates(walk, oldPackedList, pending);
+                       if (newRefs == null) {
+                               return;
+                       }
+                       LockFile packedRefsLock = new LockFile(refdb.packedRefsFile);
+                       try {
+                               packedRefsLock.lock();
+                               refdb.commitPackedRefs(packedRefsLock, newRefs, oldPackedList);
+                       } finally {
+                               packedRefsLock.unlock();
+                       }
+               } finally {
+                       locks.values().forEach(LockFile::unlock);
+               }
+
+               refdb.fireRefsChanged();
+               pending.forEach(c -> c.setResult(ReceiveCommand.Result.OK));
+       }
+
+       private boolean checkConflictingNames(List<ReceiveCommand> commands)
+                       throws IOException {
+               Set<String> takenNames = new HashSet<>();
+               Set<String> takenPrefixes = new HashSet<>();
+               Set<String> deletes = new HashSet<>();
+               for (ReceiveCommand cmd : commands) {
+                       if (cmd.getType() != ReceiveCommand.Type.DELETE) {
+                               takenNames.add(cmd.getRefName());
+                               addPrefixesTo(cmd.getRefName(), takenPrefixes);
+                       } else {
+                               deletes.add(cmd.getRefName());
+                       }
+               }
+               Set<String> initialRefs = refdb.getRefs(RefDatabase.ALL).keySet();
+               for (String name : initialRefs) {
+                       if (!deletes.contains(name)) {
+                               takenNames.add(name);
+                               addPrefixesTo(name, takenPrefixes);
+                       }
+               }
+
+               for (ReceiveCommand cmd : commands) {
+                       if (cmd.getType() != ReceiveCommand.Type.DELETE &&
+                                       takenPrefixes.contains(cmd.getRefName())) {
+                               // This ref is a prefix of some other ref. This check doesn't apply when
+                               // this command is a delete, because if the ref is deleted nobody will
+                               // ever be creating a loose ref with that name.
+                               lockFailure(cmd, commands);
+                               return false;
+                       }
+                       for (String prefix : getPrefixes(cmd.getRefName())) {
+                               if (takenNames.contains(prefix)) {
+                                       // A prefix of this ref is already a refname. This check does apply
+                                       // when this command is a delete, because we would need to create the
+                                       // refname as a directory in order to create a lockfile for the
+                                       // to-be-deleted ref.
+                                       lockFailure(cmd, commands);
+                                       return false;
+                               }
+                       }
+               }
+               return true;
+       }
+
+       private boolean checkObjectExistence(RevWalk walk,
+                       List<ReceiveCommand> commands) throws IOException {
+               for (ReceiveCommand cmd : commands) {
+                       try {
+                               if (!cmd.getNewId().equals(ObjectId.zeroId())) {
+                                       walk.parseAny(cmd.getNewId());
+                               }
+                       } catch (MissingObjectException e) {
+                               // ReceiveCommand#setResult(Result) converts REJECTED to
+                               // REJECTED_NONFASTFORWARD, even though that result is also used for a
+                               // missing object. Eagerly handle this case so we can set the right
+                               // result.
+                               reject(cmd, ReceiveCommand.Result.REJECTED_MISSING_OBJECT, commands);
+                               return false;
+                       }
+               }
+               return true;
+       }
+
+       private boolean checkNonFastForwards(RevWalk walk,
+                       List<ReceiveCommand> commands) throws IOException {
+               if (isAllowNonFastForwards()) {
+                       return true;
+               }
+               for (ReceiveCommand cmd : commands) {
+                       cmd.updateType(walk);
+                       if (cmd.getType() == ReceiveCommand.Type.UPDATE_NONFASTFORWARD) {
+                               reject(cmd, REJECTED_NONFASTFORWARD, commands);
+                               return false;
+                       }
+               }
+               return true;
+       }
+
+       private boolean lockLooseRefs(List<ReceiveCommand> commands,
+                       Map<String, LockFile> locks) throws IOException {
+               for (ReceiveCommand c : commands) {
+                       LockFile lock = new LockFile(refdb.fileFor(c.getRefName()));
+                       if (!lock.lock()) {
+                               lockFailure(c, commands);
+                               return false;
+                       }
+                       locks.put(c.getRefName(), lock);
+               }
+               return true;
+       }
+
+       private static RefList<Ref> applyUpdates(RevWalk walk, RefList<Ref> refs,
+                       List<ReceiveCommand> commands) throws IOException {
+               int nDeletes = 0;
+               List<ReceiveCommand> adds = new ArrayList<>(commands.size());
+               for (ReceiveCommand c : commands) {
+                       if (c.getType() == ReceiveCommand.Type.CREATE) {
+                               adds.add(c);
+                       } else if (c.getType() == ReceiveCommand.Type.DELETE) {
+                               nDeletes++;
+                       }
+               }
+               int addIdx = 0;
+
+               // Construct a new RefList by linearly scanning the old list, and merging in
+               // any updates.
+               Map<String, ReceiveCommand> byName = byName(commands);
+               RefList.Builder<Ref> b =
+                               new RefList.Builder<>(refs.size() - nDeletes + adds.size());
+               for (Ref ref : refs) {
+                       String name = ref.getName();
+                       ReceiveCommand cmd = byName.remove(name);
+                       if (cmd == null) {
+                               b.add(ref);
+                               continue;
+                       }
+                       if (!cmd.getOldId().equals(ref.getObjectId())) {
+                               lockFailure(cmd, commands);
+                               return null;
+                       }
+
+                       // Consume any adds between the last and current ref.
+                       while (addIdx < adds.size()) {
+                               ReceiveCommand currAdd = adds.get(addIdx);
+                               if (currAdd.getRefName().compareTo(name) < 0) {
+                                       b.add(peeledRef(walk, currAdd));
+                                       byName.remove(currAdd.getRefName());
+                               } else {
+                                       break;
+                               }
+                               addIdx++;
+                       }
+
+                       if (cmd.getType() != ReceiveCommand.Type.DELETE) {
+                               b.add(peeledRef(walk, cmd));
+                       }
+               }
+
+               // All remaining adds are valid, since the refs didn't exist.
+               while (addIdx < adds.size()) {
+                       ReceiveCommand cmd = adds.get(addIdx++);
+                       byName.remove(cmd.getRefName());
+                       b.add(peeledRef(walk, cmd));
+               }
+
+               // Any remaining updates/deletes do not correspond to any existing refs, so
+               // they are lock failures.
+               if (!byName.isEmpty()) {
+                       lockFailure(byName.values().iterator().next(), commands);
+                       return null;
+               }
+
+               return b.toRefList();
+       }
+
+       private static Map<String, ReceiveCommand> byName(
+                       List<ReceiveCommand> commands) {
+               Map<String, ReceiveCommand> ret = new LinkedHashMap<>();
+               for (ReceiveCommand cmd : commands) {
+                       ret.put(cmd.getRefName(), cmd);
+               }
+               return ret;
+       }
+
+       private static Ref peeledRef(RevWalk walk, ReceiveCommand cmd)
+                       throws IOException {
+               ObjectId newId = cmd.getNewId().copy();
+               RevObject obj = walk.parseAny(newId);
+               if (obj instanceof RevTag) {
+                       return new ObjectIdRef.PeeledTag(
+                                       Ref.Storage.PACKED, cmd.getRefName(), newId, walk.peel(obj).copy());
+               }
+               return new ObjectIdRef.PeeledNonTag(
+                               Ref.Storage.PACKED, cmd.getRefName(), newId);
+       }
+
+       private static void lockFailure(ReceiveCommand cmd,
+                       List<ReceiveCommand> commands) {
+               reject(cmd, LOCK_FAILURE, commands);
+       }
+
+       private static void reject(ReceiveCommand cmd, ReceiveCommand.Result result,
+                       List<ReceiveCommand> commands) {
+               cmd.setResult(result);
+               for (ReceiveCommand c2 : commands) {
+                       if (c2.getResult() == ReceiveCommand.Result.OK) {
+                               // Undo OK status so ReceiveCommand#abort aborts it. Assumes this method
+                               // is always called before committing any updates to disk.
+                               c2.setResult(ReceiveCommand.Result.NOT_ATTEMPTED);
+                       }
+               }
+               ReceiveCommand.abort(commands);
+       }
+}
index c8c2dd586719f9f13d9cb33ec06f2b71e5ab6948..eb569749bc026a5da8f991df929248e52032b7fe 100644 (file)
@@ -67,6 +67,8 @@ import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.text.MessageFormat;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -143,7 +145,7 @@ public class RefDirectory extends RefDatabase {
 
        private final ReflogWriter logWriter;
 
-       private final File packedRefsFile;
+       final File packedRefsFile;
 
        /**
         * Immutable sorted list of loose references.
@@ -183,7 +185,7 @@ public class RefDirectory extends RefDatabase {
                packedRefsFile = fs.resolve(gitDir, PACKED_REFS);
 
                looseRefs.set(RefList.<LooseRef> emptyList());
-               packedRefs.set(PackedRefList.NO_PACKED_REFS);
+               packedRefs.set(NO_PACKED_REFS);
        }
 
        Repository getRepository() {
@@ -209,7 +211,7 @@ public class RefDirectory extends RefDatabase {
 
        private void clearReferences() {
                looseRefs.set(RefList.<LooseRef> emptyList());
-               packedRefs.set(PackedRefList.NO_PACKED_REFS);
+               packedRefs.set(NO_PACKED_REFS);
        }
 
        @Override
@@ -562,6 +564,16 @@ public class RefDirectory extends RefDatabase {
                return new RefDirectoryRename(from, to);
        }
 
+       @Override
+       public PackedBatchRefUpdate newBatchUpdate() {
+               return new PackedBatchRefUpdate(this);
+       }
+
+       @Override
+       public boolean performsAtomicTransactions() {
+               return true;
+       }
+
        void stored(RefDirectoryUpdate update, FileSnapshot snapshot) {
                final ObjectId target = update.getNewObjectId().copy();
                final Ref leaf = update.getRef().getLeaf();
@@ -635,15 +647,29 @@ public class RefDirectory extends RefDatabase {
         * @throws IOException
         */
        public void pack(List<String> refs) throws IOException {
-               if (refs.size() == 0)
-                       return;
+               pack(refs, Collections.emptyMap());
+       }
+
+       PackedRefList pack(Map<String, LockFile> heldLocks) throws IOException {
+               return pack(heldLocks.keySet(), heldLocks);
+       }
+
+       private PackedRefList pack(Collection<String> refs,
+                       Map<String, LockFile> heldLocks) throws IOException {
+               for (LockFile ol : heldLocks.values()) {
+                       ol.requireLock();
+               }
+               if (refs.size() == 0) {
+                       return null;
+               }
                FS fs = parent.getFS();
 
                // Lock the packed refs file and read the content
                LockFile lck = new LockFile(packedRefsFile);
-               if (!lck.lock())
+               if (!lck.lock()) {
                        throw new IOException(MessageFormat.format(
                                        JGitText.get().cannotLock, packedRefsFile));
+               }
 
                try {
                        final PackedRefList packed = getPackedRefs();
@@ -653,6 +679,9 @@ public class RefDirectory extends RefDatabase {
                        boolean dirty = false;
                        for (String refName : refs) {
                                Ref oldRef = readRef(refName, cur);
+                               if (oldRef == null) {
+                                       continue; // A non-existent ref is already correctly packed.
+                               }
                                if (oldRef.isSymbolic()) {
                                        continue; // can't pack symbolic refs
                                }
@@ -675,25 +704,37 @@ public class RefDirectory extends RefDatabase {
                        }
                        if (!dirty) {
                                // All requested refs were already packed accurately
-                               return;
+                               return packed;
                        }
 
                        // The new content for packed-refs is collected. Persist it.
-                       commitPackedRefs(lck, cur, packed);
+                       PackedRefList result = commitPackedRefs(lck, cur, packed);
 
                        // Now delete the loose refs which are now packed
                        for (String refName : refs) {
                                // Lock the loose ref
                                File refFile = fileFor(refName);
-                               if (!fs.exists(refFile))
-                                       continue;
-                               LockFile rLck = new LockFile(refFile);
-                               if (!rLck.lock())
+                               if (!fs.exists(refFile)) {
                                        continue;
+                               }
+
+                               LockFile rLck = heldLocks.get(refName);
+                               boolean shouldUnlock;
+                               if (rLck == null) {
+                                       rLck = new LockFile(refFile);
+                                       if (!rLck.lock()) {
+                                               continue;
+                                       }
+                                       shouldUnlock = true;
+                               } else {
+                                       shouldUnlock = false;
+                               }
+
                                try {
                                        LooseRef currentLooseRef = scanRef(null, refName);
-                                       if (currentLooseRef == null || currentLooseRef.isSymbolic())
+                                       if (currentLooseRef == null || currentLooseRef.isSymbolic()) {
                                                continue;
+                                       }
                                        Ref packedRef = cur.get(refName);
                                        ObjectId clr_oid = currentLooseRef.getObjectId();
                                        if (clr_oid != null
@@ -702,19 +743,23 @@ public class RefDirectory extends RefDatabase {
                                                do {
                                                        curLoose = looseRefs.get();
                                                        int idx = curLoose.find(refName);
-                                                       if (idx < 0)
+                                                       if (idx < 0) {
                                                                break;
+                                                       }
                                                        newLoose = curLoose.remove(idx);
                                                } while (!looseRefs.compareAndSet(curLoose, newLoose));
                                                int levels = levelsIn(refName) - 2;
                                                delete(refFile, levels, rLck);
                                        }
                                } finally {
-                                       rLck.unlock();
+                                       if (shouldUnlock) {
+                                               rLck.unlock();
+                                       }
                                }
                        }
                        // Don't fire refsChanged. The refs have not change, only their
                        // storage.
+                       return result;
                } finally {
                        lck.unlock();
                }
@@ -813,7 +858,7 @@ public class RefDirectory extends RefDatabase {
                                        throw noPackedRefs;
                                }
                                // Ignore it and leave the new list empty.
-                               return PackedRefList.NO_PACKED_REFS;
+                               return NO_PACKED_REFS;
                        }
                        try {
                                return new PackedRefList(parsePackedRefs(br), snapshot,
@@ -894,8 +939,11 @@ public class RefDirectory extends RefDatabase {
                return new StringBuilder(end - off).append(src, off, end).toString();
        }
 
-       private void commitPackedRefs(final LockFile lck, final RefList<Ref> refs,
+       PackedRefList commitPackedRefs(final LockFile lck, final RefList<Ref> refs,
                        final PackedRefList oldPackedList) throws IOException {
+               // Can't just return packedRefs.get() from this method; it might have been
+               // updated again after writePackedRefs() returns.
+               AtomicReference<PackedRefList> result = new AtomicReference<>();
                new RefWriter(refs) {
                        @Override
                        protected void writeFile(String name, byte[] content)
@@ -935,8 +983,10 @@ public class RefDirectory extends RefDatabase {
                                        throw new ObjectWritingException(
                                                        MessageFormat.format(JGitText.get().unableToWrite, name));
                                }
+                               result.set(newPackedList);
                        }
                }.writePackedRefs();
+               return result.get();
        }
 
        private Ref readRef(String name, RefList<Ref> packed) throws IOException {
@@ -1058,7 +1108,7 @@ public class RefDirectory extends RefDatabase {
        }
 
        /** If the parent should fire listeners, fires them. */
-       private void fireRefsChanged() {
+       void fireRefsChanged() {
                final int last = lastNotifiedModCnt.get();
                final int curr = modCnt.get();
                if (last != curr && lastNotifiedModCnt.compareAndSet(last, curr) && last != 0)
@@ -1125,22 +1175,23 @@ public class RefDirectory extends RefDatabase {
                }
        }
 
-       private static class PackedRefList extends RefList<Ref> {
-               static final PackedRefList NO_PACKED_REFS = new PackedRefList(
-                               RefList.emptyList(), FileSnapshot.MISSING_FILE,
-                               ObjectId.zeroId());
+       static class PackedRefList extends RefList<Ref> {
 
-               final FileSnapshot snapshot;
+               private final FileSnapshot snapshot;
 
-               final ObjectId id;
+               private final ObjectId id;
 
-               PackedRefList(RefList<Ref> src, FileSnapshot s, ObjectId i) {
+               private PackedRefList(RefList<Ref> src, FileSnapshot s, ObjectId i) {
                        super(src);
                        snapshot = s;
                        id = i;
                }
        }
 
+       private static final PackedRefList NO_PACKED_REFS = new PackedRefList(
+                       RefList.emptyList(), FileSnapshot.MISSING_FILE,
+                       ObjectId.zeroId());
+
        private static LooseSymbolicRef newSymbolicRef(FileSnapshot snapshot,
                        String name, String target) {
                Ref dst = new ObjectIdRef.Unpeeled(NEW, target, null);
index 4a4db12fbe44d66de52c61d42bc3ce4c96c12a56..aa85cc7b29a17a5e1aa30913e23e3ae5314bda24 100644 (file)
@@ -82,8 +82,10 @@ public class BatchRefUpdate {
         * clock skew between machines on the same LAN using an NTP server also on
         * the same LAN should be under 5 seconds. 5 seconds is also not that long
         * for a large `git push` operation to complete.
+        *
+        * @since 4.9
         */
-       private static final Duration MAX_WAIT = Duration.ofSeconds(5);
+       protected static final Duration MAX_WAIT = Duration.ofSeconds(5);
 
        private final RefDatabase refdb;
 
@@ -334,6 +336,19 @@ public class BatchRefUpdate {
                return pushOptions;
        }
 
+       /**
+        * Set push options associated with this update.
+        * <p>
+        * Implementations must call this at the top of {@link #execute(RevWalk,
+        * ProgressMonitor, List)}.
+        *
+        * @param options options passed to {@code execute}.
+        * @since 4.9
+        */
+       protected void setPushOptions(List<String> options) {
+               pushOptions = options;
+       }
+
        /**
         * @return list of timestamps the batch must wait for.
         * @since 4.6
@@ -400,7 +415,7 @@ public class BatchRefUpdate {
                }
 
                if (options != null) {
-                       pushOptions = options;
+                       setPushOptions(options);
                }
 
                monitor.beginTask(JGitText.get().updatingReferences, commands.size());
@@ -553,17 +568,36 @@ public class BatchRefUpdate {
                return ref;
        }
 
-       static Collection<String> getPrefixes(String s) {
+       /**
+        * Get all path prefixes of a ref name.
+        *
+        * @param name
+        *            ref name.
+        * @return path prefixes of the ref name. For {@code refs/heads/foo}, returns
+        *         {@code refs} and {@code refs/heads}.
+        * @since 4.9
+        */
+       protected static Collection<String> getPrefixes(String name) {
                Collection<String> ret = new HashSet<>();
-               addPrefixesTo(s, ret);
+               addPrefixesTo(name, ret);
                return ret;
        }
 
-       static void addPrefixesTo(String s, Collection<String> out) {
-               int p1 = s.indexOf('/');
+       /**
+        * Add prefixes of a ref name to an existing collection.
+        *
+        * @param name
+        *            ref name.
+        * @param out
+        *            path prefixes of the ref name. For {@code refs/heads/foo},
+        *            returns {@code refs} and {@code refs/heads}.
+        * @since 4.9
+        */
+       protected static void addPrefixesTo(String name, Collection<String> out) {
+               int p1 = name.indexOf('/');
                while (p1 > 0) {
-                       out.add(s.substring(0, p1));
-                       p1 = s.indexOf('/', p1 + 1);
+                       out.add(name.substring(0, p1));
+                       p1 = name.indexOf('/', p1 + 1);
                }
        }
 
index 01ceef189efe93e619cfb02ea3b3653d3121ef5f..a37cdd42a66eed6f33c169e787e190b62b471f9b 100644 (file)
@@ -190,6 +190,20 @@ public class ReceiveCommand {
                }
        }
 
+       /**
+        * Check whether a command failed due to transaction aborted.
+        *
+        * @param cmd
+        *            command.
+        * @return whether the command failed due to transaction aborted, as in {@link
+        *         #abort(Iterable)}.
+        * @since 4.9
+        */
+       public static boolean isTransactionAborted(ReceiveCommand cmd) {
+               return cmd.getResult() == REJECTED_OTHER_REASON
+                               && cmd.getMessage().equals(JGitText.get().transactionAborted);
+       }
+
        private final ObjectId oldId;
 
        private final ObjectId newId;