aboutsummaryrefslogtreecommitdiffstats
path: root/org.eclipse.jgit/src/org/eclipse
diff options
context:
space:
mode:
authorMatthias Sohn <matthias.sohn@sap.com>2024-12-20 00:13:26 +0100
committerMatthias Sohn <matthias.sohn@sap.com>2024-12-20 00:13:26 +0100
commit88e4cc6222c760d4e1a22d0cb3f5ac21cfa248ea (patch)
treee115aa5c43a02f893c9f462ae32117f3e4ca1fd9 /org.eclipse.jgit/src/org/eclipse
parent6aefc8ec82276bc6f796e3ebc69e181de2638c3e (diff)
parentd72387775d5e5a113b254f1be5ba64d3b1bb47ce (diff)
downloadjgit-88e4cc6222c760d4e1a22d0cb3f5ac21cfa248ea.tar.gz
jgit-88e4cc6222c760d4e1a22d0cb3f5ac21cfa248ea.zip
Merge branch 'stable-7.0' into stable-7.1stable-7.1
* stable-7.0: WindowCache: share removal work among multiple threads Update target platform version in maven build to 4.32 (2024-06) Change-Id: Idd6a8a2c3b5c6f53645dbe7144afb2e1e24e8ca3
Diffstat (limited to 'org.eclipse.jgit/src/org/eclipse')
-rw-r--r--org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/WindowCache.java131
1 files changed, 126 insertions, 5 deletions
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/WindowCache.java b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/WindowCache.java
index 3ec7e6e666..fd80faf4ed 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/WindowCache.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/file/WindowCache.java
@@ -15,6 +15,7 @@ import java.io.IOException;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.util.Collections;
+import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -402,7 +403,7 @@ public class WindowCache {
}
static final void purge(Set<Pack> packs) {
- cache.removeAll(packs);
+ cache.queueRemoveAll(packs);
}
/** cleanup released and/or garbage collected windows. */
@@ -446,6 +447,29 @@ public class WindowCache {
private final boolean useStrongIndexRefs;
+ /** Removers are purely CPU/mem bound (no I/O), so likely should not go above # CPUs */
+ private final int idealNumRemovers;
+
+ /** Number of blocks to split the Pack removal into.
+ *
+ * Consolidation is better with more blocks since it increases
+ * the wait before moving to the next set by allowing more work
+ * to accumulate in the next set. On the flip side, the more
+ * blocks, the more synchronization overhead increasing each
+ * removers latency.
+ */
+ private final int numRemovalBlocks;
+
+ private final int removalBlockSize;
+
+ private Set<Pack> packsToRemove = new HashSet<>();
+
+ private Set<Pack> packsBeingRemoved;
+
+ private int numRemovers;
+
+ private int blockBeingRemoved;
+
private WindowCache(WindowCacheConfig cfg) {
tableSize = tableSize(cfg);
final int lockCount = lockCount(cfg);
@@ -484,6 +508,23 @@ public class WindowCache {
statsRecorder = mbean;
publishMBean.set(cfg.getExposeStatsViaJmx());
+ /* Since each worker will only process up to one full set of blocks, at least 2
+ * workers are needed anytime there are queued removals to ensure that all the
+ * blocks will get processed. However, if workers are maxed out at only 2, then
+ * enough newer workers will never start in order to make it safe for older
+ * workers to quit early. At least 3 workers are needed to make older worker
+ * relief transitions possible.
+ */
+ idealNumRemovers = Math.max(3, Runtime.getRuntime().availableProcessors());
+
+ int bs = 1024;
+ if (tableSize < 2 * bs) {
+ bs = tableSize / 2;
+ }
+ removalBlockSize = bs;
+ numRemovalBlocks = tableSize / removalBlockSize;
+ blockBeingRemoved = numRemovalBlocks - 1;
+
if (maxFiles < 1)
throw new IllegalArgumentException(JGitText.get().openFilesMustBeAtLeast1);
if (maxBytes < windowSize)
@@ -713,7 +754,7 @@ public class WindowCache {
}
/**
- * Clear all entries related to a single file.
+ * Asynchronously clear all entries related to files.
* <p>
* Typically this method is invoked during {@link Pack#close()}, or
* {@link Pack#close(Set)}, when we know the packs are never going to be
@@ -721,11 +762,91 @@ public class WindowCache {
* A concurrent reader loading an entry from these same packs may cause a
* pack to become stuck in the cache anyway.
*
+ * Work on clearing files will be split up into blocks so that removing
+ * can be shared by more than one thread. This potential work sharing
+ * can provide 2 optimizations for removals:
+ * <ol>
+ * <li> It provides an opportunity for separate removal requests to be
+ * consolidated into one removal pass.</li>
+ * <li> It can reduce removing thread latencies by sharing the removal work
+ * with other removing threads which otherwise might not have any work to do
+ * due to their removal request being consolidated. This makes the system
+ * more efficient and can actually reduce latencies as system load increases
+ * due to pack removals!</li>
+ * </ol>
+ * The optimizations above are all achieved without blockng threads to wait
+ * for other threads to complete (although naturally there are some
+ * synchronization points), and while ensuring that no threads do more work
+ * than if they were the only thread available to perform a removal.
+ *
* @param packs
- * the files to purge all entries of.
+ * the files to purge all entries of
*/
- private void removeAll(Set<Pack> packs) {
- for (int s = 0; s < tableSize; s++) {
+ private void queueRemoveAll(Set<Pack> packs) {
+ synchronized (this) {
+ packsToRemove.addAll(packs);
+ if (numRemovers >= idealNumRemovers) {
+ return;
+ }
+ numRemovers++;
+ }
+ for (int numRemoved = 0; removeNextBlock(numRemoved); numRemoved++);
+ synchronized (this) {
+ if (numRemovers > 0) {
+ numRemovers--;
+ }
+ }
+ }
+
+ /** Determine which block to remove next, if any, and do so.
+ *
+ * @param numRemoved
+ * the number of already processed block removals by the current thread
+ * @return whether more processing should be done by the current thread
+ */
+ private boolean removeNextBlock(int numRemoved) {
+ Set<Pack> toRemove;
+ int block;
+ synchronized (this) {
+ if (packsBeingRemoved == null || blockBeingRemoved >= numRemovalBlocks - 1) {
+ if (packsToRemove.isEmpty()) {
+ return false;
+ }
+
+ blockBeingRemoved = 0;
+ packsBeingRemoved = packsToRemove;
+ packsToRemove = new HashSet<>();
+ } else {
+ blockBeingRemoved++;
+ }
+
+ toRemove = packsBeingRemoved;
+ block = blockBeingRemoved;
+ }
+
+ removeBlock(toRemove, block);
+ numRemoved++;
+
+ /* Ensure threads never work on more than a full set of blocks (the equivalent
+ * of removing one Pack) */
+ boolean isLast = numRemoved >= numRemovalBlocks;
+ synchronized (this) {
+ if (numRemovers >= idealNumRemovers) {
+ isLast = true;
+ }
+ }
+ return !isLast;
+ }
+
+ /** Remove a block of entries for a Set of files
+ * @param packs
+ * the files to purge all entries of
+ * @param block
+ * the specific block to process removals for
+ */
+ private void removeBlock(Set<Pack> packs, int block) {
+ int starting = block * removalBlockSize;
+ for (int s = starting; s < starting + removalBlockSize && s < tableSize; s++) {
final Entry e1 = table.get(s);
boolean hasDead = false;
for (Entry e = e1; e != null; e = e.next) {