import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.LongStream;
+import org.eclipse.jgit.internal.storage.pack.PackExt;
import org.eclipse.jgit.junit.TestRng;
import org.eclipse.jgit.lib.ObjectId;
import org.eclipse.jgit.lib.ObjectInserter;
}
}
+ @SuppressWarnings("resource")
+ @Test
+ public void hasCacheHotMap() throws Exception {
+ Map<PackExt, Integer> cacheHotMap = new HashMap<>();
+ // Pack index will be kept in cache longer.
+ cacheHotMap.put(PackExt.INDEX, Integer.valueOf(3));
+ DfsBlockCache.reconfigure(new DfsBlockCacheConfig().setBlockSize(512)
+ .setBlockLimit(512 * 4).setCacheHotMap(cacheHotMap));
+ cache = DfsBlockCache.getInstance();
+
+ DfsRepositoryDescription repo = new DfsRepositoryDescription("test");
+ InMemoryRepository r1 = new InMemoryRepository(repo);
+ byte[] content = rng.nextBytes(424242);
+ ObjectId id;
+ try (ObjectInserter ins = r1.newObjectInserter()) {
+ id = ins.insert(OBJ_BLOB, content);
+ ins.flush();
+ }
+
+ try (ObjectReader rdr = r1.newObjectReader()) {
+ byte[] actual = rdr.open(id, OBJ_BLOB).getBytes();
+ assertTrue(Arrays.equals(content, actual));
+ }
+ // All cache entries are hot and cache is at capacity.
+ assertTrue(LongStream.of(cache.getHitCount()).sum() > 0);
+ assertEquals(99, cache.getFillPercentage());
+
+ InMemoryRepository r2 = new InMemoryRepository(repo);
+ content = rng.nextBytes(424242);
+ try (ObjectInserter ins = r2.newObjectInserter()) {
+ ins.insert(OBJ_BLOB, content);
+ ins.flush();
+ }
+ assertEquals(0, LongStream.of(cache.getMissCount()).sum());
+ assertTrue(cache.getEvictions()[PackExt.PACK.getPosition()] > 0);
+ assertEquals(0, cache.getEvictions()[PackExt.INDEX.getPosition()]);
+ }
+
private void resetCache() {
DfsBlockCache.reconfigure(new DfsBlockCacheConfig()
.setBlockSize(512)
* Its too expensive during object access to be accurate with a least recently
* used (LRU) algorithm. Strictly ordering every read is a lot of overhead that
* typically doesn't yield a corresponding benefit to the application. This
- * cache implements a clock replacement algorithm, giving each block one chance
- * to have been accessed during a sweep of the cache to save itself from
- * eviction.
+ * cache implements a clock replacement algorithm, giving each block at least
+ * one chance to have been accessed during a sweep of the cache to save itself
+ * from eviction. The number of swipe chances is configurable per pack
+ * extension.
* <p>
* Entities created by the cache are held under hard references, preventing the
* Java VM from clearing anything. Blocks are discarded by the replacement
/** Current position of the clock. */
private Ref clockHand;
+ /** Limits of cache hot count per pack file extension. */
+ private final int[] cacheHotLimits = new int[PackExt.values().length];
+
@SuppressWarnings("unchecked")
private DfsBlockCache(DfsBlockCacheConfig cfg) {
tableSize = tableSize(cfg);
liveBytes = new AtomicReference<>(newCounters());
refLockWaitTime = cfg.getRefLockWaitTimeConsumer();
+
+ for (int i = 0; i < PackExt.values().length; ++i) {
+ Integer limit = cfg.getCacheHotMap().get(PackExt.values()[i]);
+ if (limit != null && limit.intValue() > 0) {
+ cacheHotLimits[i] = limit.intValue();
+ } else {
+ cacheHotLimits[i] = DfsBlockCacheConfig.DEFAULT_CACHE_HOT_MAX;
+ }
+ }
}
boolean shouldCopyThroughCache(long length) {
}
Ref<DfsBlock> ref = new Ref<>(key, position, v.size(), v);
- ref.hot = true;
+ ref.markHotter();
for (;;) {
HashEntry n = new HashEntry(clean(e2), ref);
if (table.compareAndSet(slot, e2, n)) {
Ref prev = clockHand;
Ref hand = clockHand.next;
do {
- if (hand.hot) {
- // Value was recently touched. Clear
- // hot and give it another chance.
- hand.hot = false;
+ if (hand.isHot()) {
+ // Value was recently touched. Cache is still hot so
+ // give it another chance, but cool it down a bit.
+ hand.markColder();
prev = hand;
hand = hand.next;
continue;
}
getStat(statMiss, key).incrementAndGet();
ref = loader.load();
- ref.hot = true;
+ ref.markHotter();
// Reserve after loading to get the size of the object
reserveSpace(ref.size, key);
for (;;) {
}
ref = new Ref<>(key, pos, size, v);
- ref.hot = true;
+ ref.markHotter();
for (;;) {
HashEntry n = new HashEntry(clean(e2), ref);
if (table.compareAndSet(slot, e2, n)) {
final long size;
volatile T value;
Ref next;
- volatile boolean hot;
+
+ private volatile int hotCount;
Ref(DfsStreamKey key, long position, long size, T v) {
this.key = key;
T get() {
T v = value;
if (v != null) {
- hot = true;
+ markHotter();
}
return v;
}
boolean has() {
return value != null;
}
+
+ void markHotter() {
+ int cap = DfsBlockCache
+ .getInstance().cacheHotLimits[key.packExtPos];
+ hotCount = Math.min(cap, hotCount + 1);
+ }
+
+ void markColder() {
+ hotCount = Math.max(0, hotCount - 1);
+ }
+
+ boolean isHot() {
+ return hotCount > 0;
+ }
}
@FunctionalInterface
import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_STREAM_RATIO;
import java.text.MessageFormat;
+import java.util.Collections;
+import java.util.Map;
import java.util.function.Consumer;
import org.eclipse.jgit.internal.JGitText;
+import org.eclipse.jgit.internal.storage.pack.PackExt;
import org.eclipse.jgit.lib.Config;
/**
/** 1024 {@link #KB} (number of bytes in one mebibyte/megabyte) */
public static final int MB = 1024 * KB;
+ /** Default number of max cache hits. */
+ public static final int DEFAULT_CACHE_HOT_MAX = 1;
+
private long blockLimit;
private int blockSize;
private double streamRatio;
private Consumer<Long> refLock;
+ private Map<PackExt, Integer> cacheHotMap;
+
/**
* Create a default configuration.
*/
setBlockSize(64 * KB);
setStreamRatio(0.30);
setConcurrencyLevel(32);
+ cacheHotMap = Collections.emptyMap();
}
/**
return this;
}
+ /**
+ * Get the map of hot count per pack extension for {@code DfsBlockCache}.
+ *
+ * @return map of hot count per pack extension for {@code DfsBlockCache}.
+ */
+ public Map<PackExt, Integer> getCacheHotMap() {
+ return cacheHotMap;
+ }
+
+ /**
+ * Set the map of hot count per pack extension for {@code DfsBlockCache}.
+ *
+ * @param cacheHotMap
+ * map of hot count per pack extension for {@code DfsBlockCache}.
+ * @return {@code this}
+ */
+ public DfsBlockCacheConfig setCacheHotMap(
+ Map<PackExt, Integer> cacheHotMap) {
+ this.cacheHotMap = Collections.unmodifiableMap(cacheHotMap);
+ return this;
+ }
+
/**
* Update properties by setting fields from the configuration.
* <p>