summaryrefslogtreecommitdiffstats
path: root/org.eclipse.jgit.test
diff options
context:
space:
mode:
authorAlina Djamankulova <adjama@google.com>2021-11-16 09:53:20 -0800
committerAlina Djamankulova <adjama@google.com>2021-11-16 16:05:13 -0800
commit49d243b13c658c798430674ee452635d66bb7dc7 (patch)
tree57384377d26560d1c6bdc2549f8c5c08c5eb9219 /org.eclipse.jgit.test
parent78b7d9e4fa1bdd3ba27b39190b45f7bdf0b61fff (diff)
downloadjgit-49d243b13c658c798430674ee452635d66bb7dc7.tar.gz
jgit-49d243b13c658c798430674ee452635d66bb7dc7.zip
DFS block cache: harden against race over ref locks.
With change https://git.eclipse.org/r/c/jgit/jgit/+/186455 a thread loading a bitmap index could hold two ref locks at the same time (one for bitmap and one for either index or reverse index). So it is possible that two threads loading bitmaps end up in a deadlock for ref locks e.g. threadA has refLock[1] (for bitmap) and wants refLock[2] (for index or revIndex) and threadB has refLock[2] (for bitmap) and wants refLock[1]. This change introduces separate pools of locks per pack extension instead of a shared pool. So threads loading bitmap can hold two locks but with different extensions and no overlap, e.g. threadA holds refLock[BITMAP_INDEX][1] and refLock[INDEX][2] and threadB holds refLock[BITMAP_INDEX][2] and refLock[INDEX][1]. More unit tests were added to cover various paralell loading scenarios. Signed-off-by: Alina Djamankulova <adjama@google.com> Change-Id: I89704b4721c21548929608d3798ef60925280755
Diffstat (limited to 'org.eclipse.jgit.test')
-rw-r--r--org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java181
1 files changed, 155 insertions, 26 deletions
diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
index 549e1f4696..4f1314057f 100644
--- a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
+++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
@@ -10,6 +10,7 @@
package org.eclipse.jgit.internal.storage.dfs;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -20,11 +21,10 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.LongStream;
-
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
+
import org.eclipse.jgit.internal.storage.pack.PackExt;
import org.eclipse.jgit.junit.TestRepository;
import org.eclipse.jgit.junit.TestRng;
@@ -42,10 +42,12 @@ public class DfsBlockCacheTest {
public TestName testName = new TestName();
private TestRng rng;
private DfsBlockCache cache;
+ private ExecutorService pool;
@Before
public void setUp() {
rng = new TestRng(testName.getMethodName());
+ pool = Executors.newFixedThreadPool(10);
resetCache();
}
@@ -152,49 +154,169 @@ public class DfsBlockCacheTest {
@SuppressWarnings("resource")
@Test
- public void noConcurrencySerializedReads() throws Exception {
- DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
- InMemoryRepository r1 = new InMemoryRepository(repo);
- TestRepository<InMemoryRepository> repository = new TestRepository<>(
- r1);
- RevCommit commit = repository.branch("/refs/ref1").commit()
- .add("blob1", "blob1").create();
- repository.branch("/refs/ref2").commit().add("blob2", "blob2")
- .parent(commit).create();
-
- new DfsGarbageCollector(r1).pack(null);
+ public void noConcurrencySerializedReads_oneRepo() throws Exception {
+ InMemoryRepository r1 = createRepoWithBitmap("test");
// Reset cache with concurrency Level at 1 i.e. no concurrency.
- DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512)
- .setBlockLimit(1 << 20).setConcurrencyLevel(1));
- cache = DfsBlockCache.getInstance();
+ resetCache(1);
DfsReader reader = (DfsReader) r1.newObjectReader();
- ExecutorService pool = Executors.newFixedThreadPool(10);
for (DfsPackFile pack : r1.getObjectDatabase().getPacks()) {
// Only load non-garbage pack with bitmap.
if (pack.isGarbage()) {
continue;
}
- asyncRun(pool, () -> pack.getBitmapIndex(reader));
- asyncRun(pool, () -> pack.getPackIndex(reader));
- asyncRun(pool, () -> pack.getBitmapIndex(reader));
+ asyncRun(() -> pack.getBitmapIndex(reader));
+ asyncRun(() -> pack.getPackIndex(reader));
+ asyncRun(() -> pack.getBitmapIndex(reader));
}
+ waitForExecutorPoolTermination();
+
+ assertEquals(1, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
+ assertEquals(1, cache.getMissCount()[PackExt.INDEX.ordinal()]);
+ // Reverse index has no pack extension, it defaults to 0.
+ assertEquals(1, cache.getMissCount()[0]);
+ }
+
+ @SuppressWarnings("resource")
+ @Test
+ public void noConcurrencySerializedReads_twoRepos() throws Exception {
+ InMemoryRepository r1 = createRepoWithBitmap("test1");
+ InMemoryRepository r2 = createRepoWithBitmap("test2");
+ resetCache(1);
+
+ DfsReader reader = (DfsReader) r1.newObjectReader();
+ DfsPackFile[] r1Packs = r1.getObjectDatabase().getPacks();
+ DfsPackFile[] r2Packs = r2.getObjectDatabase().getPacks();
+ // Safety check that both repos have the same number of packs.
+ assertEquals(r1Packs.length, r2Packs.length);
+
+ for (int i = 0; i < r1.getObjectDatabase().getPacks().length; ++i) {
+ DfsPackFile pack1 = r1Packs[i];
+ DfsPackFile pack2 = r2Packs[i];
+ if (pack1.isGarbage() || pack2.isGarbage()) {
+ continue;
+ }
+ asyncRun(() -> pack1.getBitmapIndex(reader));
+ asyncRun(() -> pack2.getBitmapIndex(reader));
+ }
+
+ waitForExecutorPoolTermination();
+ assertEquals(2, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
+ assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
+ assertEquals(2, cache.getMissCount()[0]);
+ }
+
+ @SuppressWarnings("resource")
+ @Test
+ public void lowConcurrencyParallelReads_twoRepos() throws Exception {
+ InMemoryRepository r1 = createRepoWithBitmap("test1");
+ InMemoryRepository r2 = createRepoWithBitmap("test2");
+ resetCache(2);
+
+ DfsReader reader = (DfsReader) r1.newObjectReader();
+ DfsPackFile[] r1Packs = r1.getObjectDatabase().getPacks();
+ DfsPackFile[] r2Packs = r2.getObjectDatabase().getPacks();
+ // Safety check that both repos have the same number of packs.
+ assertEquals(r1Packs.length, r2Packs.length);
+
+ for (int i = 0; i < r1.getObjectDatabase().getPacks().length; ++i) {
+ DfsPackFile pack1 = r1Packs[i];
+ DfsPackFile pack2 = r2Packs[i];
+ if (pack1.isGarbage() || pack2.isGarbage()) {
+ continue;
+ }
+ asyncRun(() -> pack1.getBitmapIndex(reader));
+ asyncRun(() -> pack2.getBitmapIndex(reader));
+ }
+
+ waitForExecutorPoolTermination();
+ assertEquals(2, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
+ assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
+ assertEquals(2, cache.getMissCount()[0]);
+ }
+
+ @SuppressWarnings("resource")
+ @Test
+ public void lowConcurrencyParallelReads_twoReposAndIndex()
+ throws Exception {
+ InMemoryRepository r1 = createRepoWithBitmap("test1");
+ InMemoryRepository r2 = createRepoWithBitmap("test2");
+ resetCache(2);
+
+ DfsReader reader = (DfsReader) r1.newObjectReader();
+ DfsPackFile[] r1Packs = r1.getObjectDatabase().getPacks();
+ DfsPackFile[] r2Packs = r2.getObjectDatabase().getPacks();
+ // Safety check that both repos have the same number of packs.
+ assertEquals(r1Packs.length, r2Packs.length);
+
+ for (int i = 0; i < r1.getObjectDatabase().getPacks().length; ++i) {
+ DfsPackFile pack1 = r1Packs[i];
+ DfsPackFile pack2 = r2Packs[i];
+ if (pack1.isGarbage() || pack2.isGarbage()) {
+ continue;
+ }
+ asyncRun(() -> pack1.getBitmapIndex(reader));
+ asyncRun(() -> pack1.getPackIndex(reader));
+ asyncRun(() -> pack2.getBitmapIndex(reader));
+ }
+ waitForExecutorPoolTermination();
+
+ assertEquals(2, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
+ // Index is loaded once for each repo.
+ assertEquals(2, cache.getMissCount()[PackExt.INDEX.ordinal()]);
+ assertEquals(2, cache.getMissCount()[0]);
+ }
+
+ @SuppressWarnings("resource")
+ @Test
+ public void highConcurrencyParallelReads_oneRepo() throws Exception {
+ InMemoryRepository r1 = createRepoWithBitmap("test");
+ resetCache();
+
+ DfsReader reader = (DfsReader) r1.newObjectReader();
+ for (DfsPackFile pack : r1.getObjectDatabase().getPacks()) {
+ // Only load non-garbage pack with bitmap.
+ if (pack.isGarbage()) {
+ continue;
+ }
+ asyncRun(() -> pack.getBitmapIndex(reader));
+ asyncRun(() -> pack.getPackIndex(reader));
+ asyncRun(() -> pack.getBitmapIndex(reader));
+ }
+ waitForExecutorPoolTermination();
- pool.shutdown();
- pool.awaitTermination(500, TimeUnit.MILLISECONDS);
- assertTrue("Threads did not complete, likely due to a deadlock.",
- pool.isTerminated());
assertEquals(1, cache.getMissCount()[PackExt.BITMAP_INDEX.ordinal()]);
assertEquals(1, cache.getMissCount()[PackExt.INDEX.ordinal()]);
+ assertEquals(1, cache.getMissCount()[0]);
}
private void resetCache() {
+ resetCache(32);
+ }
+
+ private void resetCache(int concurrencyLevel) {
DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512)
- .setBlockLimit(1 << 20));
+ .setConcurrencyLevel(concurrencyLevel).setBlockLimit(1 << 20));
cache = DfsBlockCache.getInstance();
}
- private void asyncRun(ExecutorService pool, Callable<?> call) {
+ private InMemoryRepository createRepoWithBitmap(String repoName)
+ throws Exception {
+ DfsRepositoryDescription repoDesc = new DfsRepositoryDescription(
+ repoName);
+ InMemoryRepository repo = new InMemoryRepository(repoDesc);
+ try (TestRepository<InMemoryRepository> repository = new TestRepository<>(
+ repo)) {
+ RevCommit commit = repository.branch("/refs/ref1" + repoName)
+ .commit().add("blob1", "blob1" + repoName).create();
+ repository.branch("/refs/ref2" + repoName).commit()
+ .add("blob2", "blob2" + repoName).parent(commit).create();
+ }
+ new DfsGarbageCollector(repo).pack(null);
+ return repo;
+ }
+
+ private void asyncRun(Callable<?> call) {
pool.execute(() -> {
try {
call.call();
@@ -203,4 +325,11 @@ public class DfsBlockCacheTest {
}
});
}
+
+ private void waitForExecutorPoolTermination() throws Exception {
+ pool.shutdown();
+ pool.awaitTermination(500, MILLISECONDS);
+ assertTrue("Threads did not complete, likely due to a deadlock.",
+ pool.isTerminated());
+ }
}