You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsBlockCache.java 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
  4. * and other copyright owners as documented in the project's IP log.
  5. *
  6. * This program and the accompanying materials are made available
  7. * under the terms of the Eclipse Distribution License v1.0 which
  8. * accompanies this distribution, is reproduced below, and is
  9. * available at http://www.eclipse.org/org/documents/edl-v10.php
  10. *
  11. * All rights reserved.
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials provided
  23. * with the distribution.
  24. *
  25. * - Neither the name of the Eclipse Foundation, Inc. nor the
  26. * names of its contributors may be used to endorse or promote
  27. * products derived from this software without specific prior
  28. * written permission.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  31. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  32. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  33. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  34. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  35. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  37. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  38. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  39. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  40. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  41. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  42. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. */
  44. package org.eclipse.jgit.storage.dfs;
  45. import java.io.IOException;
  46. import java.util.ArrayList;
  47. import java.util.Collection;
  48. import java.util.Collections;
  49. import java.util.List;
  50. import java.util.Map;
  51. import java.util.concurrent.ConcurrentHashMap;
  52. import java.util.concurrent.ThreadPoolExecutor;
  53. import java.util.concurrent.atomic.AtomicLong;
  54. import java.util.concurrent.atomic.AtomicReferenceArray;
  55. import java.util.concurrent.locks.ReentrantLock;
  56. import org.eclipse.jgit.JGitText;
  57. /**
  58. * Caches slices of a {@link DfsPackFile} in memory for faster read access.
  59. * <p>
  60. * The DfsBlockCache serves as a Java based "buffer cache", loading segments of
  61. * a DfsPackFile into the JVM heap prior to use. As JGit often wants to do reads
  62. * of only tiny slices of a file, the DfsBlockCache tries to smooth out these
  63. * tiny reads into larger block-sized IO operations.
  64. * <p>
  65. * Whenever a cache miss occurs, loading is invoked by exactly one thread for
  66. * the given <code>(DfsPackKey,position)</code> key tuple. This is ensured by an
  67. * array of locks, with the tuple hashed to a lock instance.
  68. * <p>
  69. * Its too expensive during object access to be accurate with a least recently
  70. * used (LRU) algorithm. Strictly ordering every read is a lot of overhead that
  71. * typically doesn't yield a corresponding benefit to the application. This
  72. * cache implements a clock replacement algorithm, giving each block one chance
  73. * to have been accessed during a sweep of the cache to save itself from
  74. * eviction.
  75. * <p>
  76. * Entities created by the cache are held under hard references, preventing the
  77. * Java VM from clearing anything. Blocks are discarded by the replacement
  78. * algorithm when adding a new block would cause the cache to exceed its
  79. * configured maximum size.
  80. * <p>
  81. * The key tuple is passed through to methods as a pair of parameters rather
  82. * than as a single Object, thus reducing the transient memory allocations of
  83. * callers. It is more efficient to avoid the allocation, as we can't be 100%
  84. * sure that a JIT would be able to stack-allocate a key tuple.
  85. * <p>
  86. * The internal hash table does not expand at runtime, instead it is fixed in
  87. * size at cache creation time. The internal lock table used to gate load
  88. * invocations is also fixed in size.
  89. */
  90. public final class DfsBlockCache {
  91. private static volatile DfsBlockCache cache;
  92. static {
  93. reconfigure(new DfsBlockCacheConfig());
  94. }
  95. /**
  96. * Modify the configuration of the window cache.
  97. * <p>
  98. * The new configuration is applied immediately, and the existing cache is
  99. * cleared.
  100. *
  101. * @param cfg
  102. * the new window cache configuration.
  103. * @throws IllegalArgumentException
  104. * the cache configuration contains one or more invalid
  105. * settings, usually too low of a limit.
  106. */
  107. public static void reconfigure(DfsBlockCacheConfig cfg) {
  108. DfsBlockCache nc = new DfsBlockCache(cfg);
  109. DfsBlockCache oc = cache;
  110. cache = nc;
  111. if (oc != null) {
  112. if (oc.readAheadService != null)
  113. oc.readAheadService.shutdown();
  114. for (DfsPackFile pack : oc.getPackFiles())
  115. pack.key.cachedSize.set(0);
  116. }
  117. }
  118. /** @return the currently active DfsBlockCache. */
  119. public static DfsBlockCache getInstance() {
  120. return cache;
  121. }
  122. /** Number of entries in {@link #table}. */
  123. private final int tableSize;
  124. /** Hash bucket directory; entries are chained below. */
  125. private final AtomicReferenceArray<HashEntry> table;
  126. /** Locks to prevent concurrent loads for same (PackFile,position). */
  127. private final ReentrantLock[] loadLocks;
  128. /** Maximum number of bytes the cache should hold. */
  129. private final long maxBytes;
  130. /**
  131. * Suggested block size to read from pack files in.
  132. * <p>
  133. * If a pack file does not have a native block size, this size will be used.
  134. * <p>
  135. * If a pack file has a native size, a whole multiple of the native size
  136. * will be used until it matches this size.
  137. */
  138. private final int blockSize;
  139. /** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */
  140. private final int blockSizeShift;
  141. /** Number of bytes to read-ahead from current read position. */
  142. private final int readAheadLimit;
  143. /** Thread pool to handle optimistic read-ahead. */
  144. private final ThreadPoolExecutor readAheadService;
  145. /** Cache of pack files, indexed by description. */
  146. private final Map<DfsPackDescription, DfsPackFile> packCache;
  147. /** View of pack files in the pack cache. */
  148. private final Collection<DfsPackFile> packFiles;
  149. /** Number of times a block was found in the cache. */
  150. private final AtomicLong statHit;
  151. /** Number of times a block was not found, and had to be loaded. */
  152. private final AtomicLong statMiss;
  153. /** Number of blocks evicted due to cache being full. */
  154. private volatile long statEvict;
  155. /** Protects the clock and its related data. */
  156. private final ReentrantLock clockLock;
  157. /** Current position of the clock. */
  158. private Ref clockHand;
  159. /** Number of bytes currently loaded in the cache. */
  160. private volatile long liveBytes;
  161. private DfsBlockCache(final DfsBlockCacheConfig cfg) {
  162. tableSize = tableSize(cfg);
  163. if (tableSize < 1)
  164. throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1);
  165. table = new AtomicReferenceArray<HashEntry>(tableSize);
  166. loadLocks = new ReentrantLock[32];
  167. for (int i = 0; i < loadLocks.length; i++)
  168. loadLocks[i] = new ReentrantLock(true /* fair */);
  169. int eb = (int) (tableSize * .1);
  170. if (64 < eb)
  171. eb = 64;
  172. else if (eb < 4)
  173. eb = 4;
  174. if (tableSize < eb)
  175. eb = tableSize;
  176. maxBytes = cfg.getBlockLimit();
  177. blockSize = cfg.getBlockSize();
  178. blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
  179. clockLock = new ReentrantLock(true /* fair */);
  180. clockHand = new Ref<Object>(null, -1, 0, null);
  181. clockHand.next = clockHand;
  182. readAheadLimit = cfg.getReadAheadLimit();
  183. readAheadService = cfg.getReadAheadService();
  184. packCache = new ConcurrentHashMap<DfsPackDescription, DfsPackFile>(
  185. 16, 0.75f, 1);
  186. packFiles = Collections.unmodifiableCollection(packCache.values());
  187. statHit = new AtomicLong();
  188. statMiss = new AtomicLong();
  189. }
  190. /** @return total number of bytes in the cache. */
  191. public long getCurrentSize() {
  192. return liveBytes;
  193. }
  194. /** @return 0..100, defining how full the cache is. */
  195. public long getFillPercentage() {
  196. return getCurrentSize() * 100 / maxBytes;
  197. }
  198. /** @return 0..100, defining number of cache hits. */
  199. public long getHitRatio() {
  200. long hits = statHit.get();
  201. long miss = statMiss.get();
  202. long total = hits + miss;
  203. if (total == 0)
  204. return 0;
  205. return hits * 100 / total;
  206. }
  207. /** @return number of evictions performed due to cache being full. */
  208. public long getEvictions() {
  209. return statEvict;
  210. }
  211. /**
  212. * Get the pack files stored in this cache.
  213. *
  214. * @return a collection of pack files, some of which may not actually be
  215. * present; the caller should check the pack's cached size.
  216. */
  217. public Collection<DfsPackFile> getPackFiles() {
  218. return packFiles;
  219. }
  220. DfsPackFile getOrCreate(DfsPackDescription dsc, DfsPackKey key) {
  221. // TODO This table grows without bound. It needs to clean up
  222. // entries that aren't in cache anymore, and aren't being used
  223. // by a live DfsObjDatabase reference.
  224. synchronized (packCache) {
  225. DfsPackFile pack = packCache.get(dsc);
  226. if (pack != null && pack.invalid()) {
  227. packCache.remove(dsc);
  228. pack = null;
  229. }
  230. if (pack == null) {
  231. if (key == null)
  232. key = new DfsPackKey();
  233. pack = new DfsPackFile(this, dsc, key);
  234. packCache.put(dsc, pack);
  235. }
  236. return pack;
  237. }
  238. }
  239. private int hash(int packHash, long off) {
  240. return packHash + (int) (off >>> blockSizeShift);
  241. }
  242. int getBlockSize() {
  243. return blockSize;
  244. }
  245. private static int tableSize(final DfsBlockCacheConfig cfg) {
  246. final int wsz = cfg.getBlockSize();
  247. final long limit = cfg.getBlockLimit();
  248. if (wsz <= 0)
  249. throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
  250. if (limit < wsz)
  251. throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
  252. return (int) Math.min(5 * (limit / wsz) / 2, Integer.MAX_VALUE);
  253. }
  254. /**
  255. * Lookup a cached object, creating and loading it if it doesn't exist.
  256. *
  257. * @param pack
  258. * the pack that "contains" the cached object.
  259. * @param position
  260. * offset within <code>pack</code> of the object.
  261. * @param ctx
  262. * current thread's reader.
  263. * @return the object reference.
  264. * @throws IOException
  265. * the reference was not in the cache and could not be loaded.
  266. */
  267. DfsBlock getOrLoad(DfsPackFile pack, long position, DfsReader ctx)
  268. throws IOException {
  269. final long requestedPosition = position;
  270. position = pack.alignToBlock(position);
  271. DfsPackKey key = pack.key;
  272. int slot = slot(key, position);
  273. HashEntry e1 = table.get(slot);
  274. DfsBlock v = scan(e1, key, position);
  275. if (v != null)
  276. return v;
  277. reserveSpace(blockSize);
  278. ReentrantLock regionLock = lockFor(key, position);
  279. regionLock.lock();
  280. try {
  281. HashEntry e2 = table.get(slot);
  282. if (e2 != e1) {
  283. v = scan(e2, key, position);
  284. if (v != null) {
  285. creditSpace(blockSize);
  286. return v;
  287. }
  288. }
  289. statMiss.incrementAndGet();
  290. boolean credit = true;
  291. try {
  292. v = pack.readOneBlock(position, ctx);
  293. credit = false;
  294. } finally {
  295. if (credit)
  296. creditSpace(blockSize);
  297. }
  298. if (position != v.start) {
  299. // The file discovered its blockSize and adjusted.
  300. position = v.start;
  301. slot = slot(key, position);
  302. e2 = table.get(slot);
  303. }
  304. key.cachedSize.addAndGet(v.size());
  305. Ref<DfsBlock> ref = new Ref<DfsBlock>(key, position, v.size(), v);
  306. ref.hot = true;
  307. for (;;) {
  308. HashEntry n = new HashEntry(clean(e2), ref);
  309. if (table.compareAndSet(slot, e2, n))
  310. break;
  311. e2 = table.get(slot);
  312. }
  313. addToClock(ref, blockSize - v.size());
  314. } finally {
  315. regionLock.unlock();
  316. }
  317. // If the block size changed from the default, it is possible the block
  318. // that was loaded is the wrong block for the requested position.
  319. if (v.contains(pack.key, requestedPosition))
  320. return v;
  321. return getOrLoad(pack, requestedPosition, ctx);
  322. }
  323. @SuppressWarnings("unchecked")
  324. private void reserveSpace(int reserve) {
  325. clockLock.lock();
  326. long live = liveBytes + reserve;
  327. if (maxBytes < live) {
  328. Ref prev = clockHand;
  329. Ref hand = clockHand.next;
  330. do {
  331. if (hand.hot) {
  332. // Value was recently touched. Clear
  333. // hot and give it another chance.
  334. hand.hot = false;
  335. prev = hand;
  336. hand = hand.next;
  337. continue;
  338. } else if (prev == hand)
  339. break;
  340. // No recent access since last scan, kill
  341. // value and remove from clock.
  342. Ref dead = hand;
  343. hand = hand.next;
  344. prev.next = hand;
  345. dead.next = null;
  346. dead.value = null;
  347. live -= dead.size;
  348. dead.pack.cachedSize.addAndGet(-dead.size);
  349. statEvict++;
  350. } while (maxBytes < live);
  351. clockHand = prev;
  352. }
  353. liveBytes = live;
  354. clockLock.unlock();
  355. }
  356. private void creditSpace(int credit) {
  357. clockLock.lock();
  358. liveBytes -= credit;
  359. clockLock.unlock();
  360. }
  361. private void addToClock(Ref ref, int credit) {
  362. clockLock.lock();
  363. if (credit != 0)
  364. liveBytes -= credit;
  365. Ref ptr = clockHand;
  366. ref.next = ptr.next;
  367. ptr.next = ref;
  368. clockHand = ref;
  369. clockLock.unlock();
  370. }
  371. void put(DfsBlock v) {
  372. put(v.pack, v.start, v.size(), v);
  373. }
  374. <T> Ref<T> put(DfsPackKey key, long pos, int size, T v) {
  375. int slot = slot(key, pos);
  376. HashEntry e1 = table.get(slot);
  377. Ref<T> ref = scanRef(e1, key, pos);
  378. if (ref != null)
  379. return ref;
  380. reserveSpace(size);
  381. ReentrantLock regionLock = lockFor(key, pos);
  382. regionLock.lock();
  383. try {
  384. HashEntry e2 = table.get(slot);
  385. if (e2 != e1) {
  386. ref = scanRef(e2, key, pos);
  387. if (ref != null) {
  388. creditSpace(size);
  389. return ref;
  390. }
  391. }
  392. key.cachedSize.addAndGet(size);
  393. ref = new Ref<T>(key, pos, size, v);
  394. ref.hot = true;
  395. for (;;) {
  396. HashEntry n = new HashEntry(clean(e2), ref);
  397. if (table.compareAndSet(slot, e2, n))
  398. break;
  399. e2 = table.get(slot);
  400. }
  401. addToClock(ref, 0);
  402. } finally {
  403. regionLock.unlock();
  404. }
  405. return ref;
  406. }
  407. boolean contains(DfsPackKey key, long position) {
  408. return scan(table.get(slot(key, position)), key, position) != null;
  409. }
  410. @SuppressWarnings("unchecked")
  411. <T> T get(DfsPackKey key, long position) {
  412. T val = (T) scan(table.get(slot(key, position)), key, position);
  413. if (val == null)
  414. statMiss.incrementAndGet();
  415. return val;
  416. }
  417. boolean readAhead(ReadableChannel rc, DfsPackKey key, int size, long pos,
  418. long len, DfsReader ctx) {
  419. if (!ctx.wantReadAhead() || readAheadLimit <= 0 || readAheadService == null)
  420. return false;
  421. int cap = readAheadLimit / size;
  422. long readAheadEnd = pos + readAheadLimit;
  423. List<ReadAheadTask.BlockFuture> blocks = new ArrayList<ReadAheadTask.BlockFuture>(cap);
  424. while (pos < readAheadEnd && pos < len) {
  425. long end = Math.min(pos + size, len);
  426. if (!contains(key, pos))
  427. blocks.add(new ReadAheadTask.BlockFuture(key, pos, end));
  428. pos = end;
  429. }
  430. if (blocks.isEmpty())
  431. return false;
  432. ReadAheadTask task = new ReadAheadTask(this, rc, blocks);
  433. ReadAheadTask.TaskFuture t = new ReadAheadTask.TaskFuture(task);
  434. for (ReadAheadTask.BlockFuture b : blocks)
  435. b.setTask(t);
  436. readAheadService.execute(t);
  437. ctx.startedReadAhead(blocks);
  438. return true;
  439. }
  440. @SuppressWarnings("unchecked")
  441. private <T> T scan(HashEntry n, DfsPackKey pack, long position) {
  442. for (; n != null; n = n.next) {
  443. Ref<T> r = n.ref;
  444. if (r.pack != pack || r.position != position)
  445. continue;
  446. T v = r.get();
  447. if (v == null)
  448. return null;
  449. statHit.incrementAndGet();
  450. return v;
  451. }
  452. return null;
  453. }
  454. @SuppressWarnings("unchecked")
  455. private <T> Ref<T> scanRef(HashEntry n, DfsPackKey pack, long position) {
  456. for (; n != null; n = n.next) {
  457. Ref<T> r = n.ref;
  458. if (r.pack == pack && r.position == position)
  459. return r.get() != null ? r : null;
  460. }
  461. return null;
  462. }
  463. void remove(DfsPackFile pack) {
  464. synchronized (packCache) {
  465. packCache.remove(pack.getPackDescription());
  466. }
  467. }
  468. private int slot(DfsPackKey pack, long position) {
  469. return (hash(pack.hash, position) >>> 1) % tableSize;
  470. }
  471. private ReentrantLock lockFor(DfsPackKey pack, long position) {
  472. return loadLocks[(hash(pack.hash, position) >>> 1) % loadLocks.length];
  473. }
  474. private static HashEntry clean(HashEntry top) {
  475. while (top != null && top.ref.next == null)
  476. top = top.next;
  477. if (top == null)
  478. return null;
  479. HashEntry n = clean(top.next);
  480. return n == top.next ? top : new HashEntry(n, top.ref);
  481. }
  482. private static final class HashEntry {
  483. /** Next entry in the hash table's chain list. */
  484. final HashEntry next;
  485. /** The referenced object. */
  486. final Ref ref;
  487. HashEntry(HashEntry n, Ref r) {
  488. next = n;
  489. ref = r;
  490. }
  491. }
  492. static final class Ref<T> {
  493. final DfsPackKey pack;
  494. final long position;
  495. final int size;
  496. volatile T value;
  497. Ref next;
  498. volatile boolean hot;
  499. Ref(DfsPackKey pack, long position, int size, T v) {
  500. this.pack = pack;
  501. this.position = position;
  502. this.size = size;
  503. this.value = v;
  504. }
  505. T get() {
  506. T v = value;
  507. if (v != null)
  508. hot = true;
  509. return v;
  510. }
  511. }
  512. }