]> source.dussan.org Git - jgit.git/commitdiff
DfsBlockCache: use PackExtBlockCacheTable when configured 66/1196166/20
authorLaura Hamelin <haowl@google.com>
Mon, 10 Jun 2024 20:42:03 +0000 (13:42 -0700)
committerLaura Hamelin <haowl@google.com>
Fri, 4 Oct 2024 23:33:49 +0000 (16:33 -0700)
Adds the usage of PackExtBlockCacheTable to the
DfsBlockCache, replacing the current DfsBlockCacheTable
when PackExtCacheConfigurations exists.
When no PackExtCacheConfigurations exists the current
DfsBlockCacheTable implementation will be used.

Change-Id: I42222a0cb43785baba907a49077dd9874d19d891

org.eclipse.jgit.test/tst/org/eclipse/jgit/internal/storage/dfs/DfsBlockCacheTest.java
org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/dfs/DfsBlockCache.java

index fef0563f486baf1935fe17187615aa071975f316..3c7cc075d2137557e32bc5ed19c96fb8d631ce78 100644 (file)
@@ -13,20 +13,24 @@ package org.eclipse.jgit.internal.storage.dfs;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.time.Duration;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.LongStream;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.LongStream;
 
+import org.eclipse.jgit.internal.storage.dfs.DfsBlockCacheConfig.DfsBlockCachePackExtConfig;
 import org.eclipse.jgit.internal.storage.dfs.DfsBlockCacheConfig.IndexEventConsumer;
 import org.eclipse.jgit.internal.storage.pack.PackExt;
 import org.eclipse.jgit.junit.TestRepository;
@@ -39,14 +43,35 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
 
+@RunWith(Parameterized.class)
 public class DfsBlockCacheTest {
        @Rule
        public TestName testName = new TestName();
+
        private TestRng rng;
+
        private DfsBlockCache cache;
+
        private ExecutorService pool;
 
+       private enum CacheType {
+               SINGLE_TABLE_CLOCK_BLOCK_CACHE, EXT_SPLIT_TABLE_CLOCK_BLOCK_CACHE
+       }
+
+       @Parameters(name = "cache type: {0}")
+       public static Iterable<? extends Object> data() {
+               return Arrays.asList(CacheType.SINGLE_TABLE_CLOCK_BLOCK_CACHE,
+                               CacheType.EXT_SPLIT_TABLE_CLOCK_BLOCK_CACHE);
+       }
+
+       @Parameter
+       public CacheType cacheType;
+
        @Before
        public void setUp() {
                rng = new TestRng(testName.getMethodName());
@@ -448,8 +473,28 @@ public class DfsBlockCacheTest {
        }
 
        private void resetCache(int concurrencyLevel) {
-               DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512)
-                               .setConcurrencyLevel(concurrencyLevel).setBlockLimit(1 << 20));
+               DfsBlockCacheConfig cacheConfig = new DfsBlockCacheConfig()
+                               .setBlockSize(512).setConcurrencyLevel(concurrencyLevel)
+                               .setBlockLimit(1 << 20);
+               switch (cacheType) {
+               case SINGLE_TABLE_CLOCK_BLOCK_CACHE:
+                       // SINGLE_TABLE_CLOCK_BLOCK_CACHE doesn't modify the config.
+                       break;
+               case EXT_SPLIT_TABLE_CLOCK_BLOCK_CACHE:
+                       List<DfsBlockCachePackExtConfig> packExtCacheConfigs = new ArrayList<>();
+                       for (PackExt packExt : PackExt.values()) {
+                               DfsBlockCacheConfig extCacheConfig = new DfsBlockCacheConfig()
+                                               .setBlockSize(512).setConcurrencyLevel(concurrencyLevel)
+                                               .setBlockLimit(1 << 20)
+                                               .setPackExtCacheConfigurations(packExtCacheConfigs);
+                               packExtCacheConfigs.add(new DfsBlockCachePackExtConfig(
+                                               EnumSet.of(packExt), extCacheConfig));
+                       }
+                       cacheConfig.setPackExtCacheConfigurations(packExtCacheConfigs);
+                       break;
+               }
+               assertNotNull(cacheConfig);
+               DfsBlockCache.reconfigure(cacheConfig);
                cache = DfsBlockCache.getInstance();
        }
 
index 3e1300c8677645e79d59ec6eeee3d119c7226bfa..0334450fbe482044ec8775f607452f4d65b42c6b 100644 (file)
@@ -97,7 +97,12 @@ public final class DfsBlockCache {
                double streamRatio = cfg.getStreamRatio();
                maxStreamThroughCache = (long) (maxBytes * streamRatio);
 
-               dfsBlockCacheTable = new ClockBlockCacheTable(cfg);
+               if (!cfg.getPackExtCacheConfigurations().isEmpty()) {
+                       dfsBlockCacheTable = PackExtBlockCacheTable
+                                       .fromBlockCacheConfigs(cfg);
+               } else {
+                       dfsBlockCacheTable = new ClockBlockCacheTable(cfg);
+               }
 
                for (int i = 0; i < PackExt.values().length; ++i) {
                        Integer limit = cfg.getCacheHotMap().get(PackExt.values()[i]);
@@ -158,8 +163,7 @@ public final class DfsBlockCache {
         * @return total number of requests (hit + miss), per pack file extension.
         */
        public long[] getTotalRequestCount() {
-               return dfsBlockCacheTable.getBlockCacheStats()
-                               .getTotalRequestCount();
+               return dfsBlockCacheTable.getBlockCacheStats().getTotalRequestCount();
        }
 
        /**