diff options
author | Ivan Frade <ifrade@google.com> | 2022-01-21 14:59:30 -0500 |
---|---|---|
committer | Gerrit Code Review @ Eclipse.org <gerrit@eclipse.org> | 2022-01-21 14:59:30 -0500 |
commit | 076ecf8ded287afd95b931d7a2e5d3f58b0ade8f (patch) | |
tree | 7c2cd308a4f501047b54fdc98a9989aed1e9e0cd /org.eclipse.jgit.test | |
parent | 969601c742e4cd059b9ffebf1213ff76785622fe (diff) | |
parent | b536dbdb9b7099511362abe825cfd50b942ccf7c (diff) | |
download | jgit-076ecf8ded287afd95b931d7a2e5d3f58b0ade8f.tar.gz jgit-076ecf8ded287afd95b931d7a2e5d3f58b0ade8f.zip |
Merge "DFS block cache: report index load and evict stats"
Diffstat (limited to 'org.eclipse.jgit.test')
-rw-r--r-- | org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java | 117 |
1 files changed, 117 insertions, 0 deletions
diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java index 070d666ee5..bacd3ba0a2 100644 --- a/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java +++ b/org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java @@ -15,16 +15,19 @@ import static org.eclipse.jgit.lib.Constants.OBJ_BLOB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.time.Duration; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.LongStream; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import org.eclipse.jgit.internal.storage.dfs.DfsBlockCacheConfig.IndexEventConsumer; import org.eclipse.jgit.internal.storage.pack.PackExt; import org.eclipse.jgit.junit.TestRepository; import org.eclipse.jgit.junit.TestRng; @@ -154,6 +157,120 @@ public class DfsBlockCacheTest { @SuppressWarnings("resource") @Test + public void hasIndexEventConsumerOnlyLoaded() throws Exception { + AtomicInteger loaded = new AtomicInteger(); + IndexEventConsumer indexEventConsumer = new IndexEventConsumer() { + @Override + public void acceptRequestedEvent(int packExtPos, boolean cacheHit, + long loadMicros, long bytes, + Duration lastEvictionDuration) { + assertEquals(PackExt.INDEX.getPosition(), packExtPos); + assertTrue(cacheHit); + assertTrue(lastEvictionDuration.isZero()); + loaded.incrementAndGet(); + } + }; + + DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512) + .setBlockLimit(512 * 4) + .setIndexEventConsumer(indexEventConsumer)); + cache = DfsBlockCache.getInstance(); + + DfsRepositoryDescription repo = new DfsRepositoryDescription("test"); + InMemoryRepository r1 = new InMemoryRepository(repo); + byte[] content = rng.nextBytes(424242); + ObjectId id; + try (ObjectInserter ins = r1.newObjectInserter()) { + id = ins.insert(OBJ_BLOB, content); + ins.flush(); + } + + try (ObjectReader rdr = r1.newObjectReader()) { + byte[] actual = rdr.open(id, OBJ_BLOB).getBytes(); + assertTrue(Arrays.equals(content, actual)); + } + // All cache entries are hot and cache is at capacity. + assertTrue(LongStream.of(cache.getHitCount()).sum() > 0); + assertEquals(99, cache.getFillPercentage()); + + InMemoryRepository r2 = new InMemoryRepository(repo); + content = rng.nextBytes(424242); + try (ObjectInserter ins = r2.newObjectInserter()) { + ins.insert(OBJ_BLOB, content); + ins.flush(); + } + assertTrue(cache.getEvictions()[PackExt.PACK.getPosition()] > 0); + assertEquals(1, cache.getEvictions()[PackExt.INDEX.getPosition()]); + assertEquals(1, loaded.get()); + } + + @SuppressWarnings("resource") + @Test + public void hasIndexEventConsumerLoadedAndEvicted() throws Exception { + AtomicInteger loaded = new AtomicInteger(); + AtomicInteger evicted = new AtomicInteger(); + IndexEventConsumer indexEventConsumer = new IndexEventConsumer() { + @Override + public void acceptRequestedEvent(int packExtPos, boolean cacheHit, + long loadMicros, long bytes, + Duration lastEvictionDuration) { + assertEquals(PackExt.INDEX.getPosition(), packExtPos); + assertTrue(cacheHit); + assertTrue(lastEvictionDuration.isZero()); + loaded.incrementAndGet(); + } + + @Override + public void acceptEvictedEvent(int packExtPos, long bytes, + int totalCacheHitCount, Duration lastEvictionDuration) { + assertEquals(PackExt.INDEX.getPosition(), packExtPos); + assertTrue(totalCacheHitCount > 0); + assertTrue(lastEvictionDuration.isZero()); + evicted.incrementAndGet(); + } + + @Override + public boolean shouldReportEvictedEvent() { + return true; + } + }; + + DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512) + .setBlockLimit(512 * 4) + .setIndexEventConsumer(indexEventConsumer)); + cache = DfsBlockCache.getInstance(); + + DfsRepositoryDescription repo = new DfsRepositoryDescription("test"); + InMemoryRepository r1 = new InMemoryRepository(repo); + byte[] content = rng.nextBytes(424242); + ObjectId id; + try (ObjectInserter ins = r1.newObjectInserter()) { + id = ins.insert(OBJ_BLOB, content); + ins.flush(); + } + + try (ObjectReader rdr = r1.newObjectReader()) { + byte[] actual = rdr.open(id, OBJ_BLOB).getBytes(); + assertTrue(Arrays.equals(content, actual)); + } + // All cache entries are hot and cache is at capacity. + assertTrue(LongStream.of(cache.getHitCount()).sum() > 0); + assertEquals(99, cache.getFillPercentage()); + + InMemoryRepository r2 = new InMemoryRepository(repo); + content = rng.nextBytes(424242); + try (ObjectInserter ins = r2.newObjectInserter()) { + ins.insert(OBJ_BLOB, content); + ins.flush(); + } + assertTrue(cache.getEvictions()[PackExt.PACK.getPosition()] > 0); + assertEquals(1, cache.getEvictions()[PackExt.INDEX.getPosition()]); + assertEquals(1, loaded.get()); + assertEquals(1, evicted.get()); + } + + @SuppressWarnings("resource") + @Test public void noConcurrencySerializedReads_oneRepo() throws Exception { InMemoryRepository r1 = createRepoWithBitmap("test"); // Reset cache with concurrency Level at 1 i.e. no concurrency. |