You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsObjDatabase.java 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.internal.storage.dfs;
  44. import java.io.FileNotFoundException;
  45. import java.io.IOException;
  46. import java.util.ArrayList;
  47. import java.util.Collection;
  48. import java.util.Collections;
  49. import java.util.Comparator;
  50. import java.util.HashMap;
  51. import java.util.List;
  52. import java.util.Map;
  53. import java.util.Set;
  54. import java.util.concurrent.atomic.AtomicReference;
  55. import org.eclipse.jgit.internal.storage.pack.PackExt;
  56. import org.eclipse.jgit.lib.AnyObjectId;
  57. import org.eclipse.jgit.lib.ObjectDatabase;
  58. import org.eclipse.jgit.lib.ObjectInserter;
  59. import org.eclipse.jgit.lib.ObjectReader;
  60. /**
  61. * Manages objects stored in
  62. * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackFile} on a storage
  63. * system.
  64. */
  65. public abstract class DfsObjDatabase extends ObjectDatabase {
  66. private static final PackList NO_PACKS = new PackList(
  67. new DfsPackFile[0],
  68. new DfsReftable[0]) {
  69. @Override
  70. boolean dirty() {
  71. return true;
  72. }
  73. @Override
  74. void clearDirty() {
  75. // Always dirty.
  76. }
  77. @Override
  78. public void markDirty() {
  79. // Always dirty.
  80. }
  81. };
  82. /** Sources for a pack file. */
  83. public static enum PackSource {
  84. /** The pack is created by ObjectInserter due to local activity. */
  85. INSERT(0),
  86. /**
  87. * The pack is created by PackParser due to a network event.
  88. * <p>
  89. * A received pack can be from either a push into the repository, or a
  90. * fetch into the repository, the direction doesn't matter. A received
  91. * pack was built by the remote Git implementation and may not match the
  92. * storage layout preferred by this version. Received packs are likely
  93. * to be either compacted or garbage collected in the future.
  94. */
  95. RECEIVE(0),
  96. /**
  97. * The pack was created by compacting multiple packs together.
  98. * <p>
  99. * Packs created by compacting multiple packs together aren't nearly as
  100. * efficient as a fully garbage collected repository, but may save disk
  101. * space by reducing redundant copies of base objects.
  102. *
  103. * @see DfsPackCompactor
  104. */
  105. COMPACT(1),
  106. /**
  107. * Pack was created by Git garbage collection by this implementation.
  108. * <p>
  109. * This source is only used by the {@link DfsGarbageCollector} when it
  110. * builds a pack file by traversing the object graph and copying all
  111. * reachable objects into a new pack stream.
  112. *
  113. * @see DfsGarbageCollector
  114. */
  115. GC(2),
  116. /** Created from non-heads by {@link DfsGarbageCollector}. */
  117. GC_REST(3),
  118. /**
  119. * RefTreeGraph pack was created by Git garbage collection.
  120. *
  121. * @see DfsGarbageCollector
  122. */
  123. GC_TXN(4),
  124. /**
  125. * Pack was created by Git garbage collection.
  126. * <p>
  127. * This pack contains only unreachable garbage that was found during the
  128. * last GC pass. It is retained in a new pack until it is safe to prune
  129. * these objects from the repository.
  130. */
  131. UNREACHABLE_GARBAGE(5);
  132. final int category;
  133. PackSource(int category) {
  134. this.category = category;
  135. }
  136. }
  137. private final AtomicReference<PackList> packList;
  138. private final DfsRepository repository;
  139. private DfsReaderOptions readerOptions;
  140. /**
  141. * Initialize an object database for our repository.
  142. *
  143. * @param repository
  144. * repository owning this object database.
  145. * @param options
  146. * how readers should access the object database.
  147. */
  148. protected DfsObjDatabase(DfsRepository repository,
  149. DfsReaderOptions options) {
  150. this.repository = repository;
  151. this.packList = new AtomicReference<>(NO_PACKS);
  152. this.readerOptions = options;
  153. }
  154. /**
  155. * Get configured reader options, such as read-ahead.
  156. *
  157. * @return configured reader options, such as read-ahead.
  158. */
  159. public DfsReaderOptions getReaderOptions() {
  160. return readerOptions;
  161. }
  162. /** {@inheritDoc} */
  163. @Override
  164. public DfsReader newReader() {
  165. return new DfsReader(this);
  166. }
  167. /** {@inheritDoc} */
  168. @Override
  169. public ObjectInserter newInserter() {
  170. return new DfsInserter(this);
  171. }
  172. /**
  173. * Scan and list all available pack files in the repository.
  174. *
  175. * @return list of available packs. The returned array is shared with the
  176. * implementation and must not be modified by the caller.
  177. * @throws java.io.IOException
  178. * the pack list cannot be initialized.
  179. */
  180. public DfsPackFile[] getPacks() throws IOException {
  181. return getPackList().packs;
  182. }
  183. /**
  184. * Scan and list all available reftable files in the repository.
  185. *
  186. * @return list of available reftables. The returned array is shared with
  187. * the implementation and must not be modified by the caller.
  188. * @throws java.io.IOException
  189. * the pack list cannot be initialized.
  190. */
  191. public DfsReftable[] getReftables() throws IOException {
  192. return getPackList().reftables;
  193. }
  194. /**
  195. * Scan and list all available pack files in the repository.
  196. *
  197. * @return list of available packs, with some additional metadata. The
  198. * returned array is shared with the implementation and must not be
  199. * modified by the caller.
  200. * @throws java.io.IOException
  201. * the pack list cannot be initialized.
  202. */
  203. public PackList getPackList() throws IOException {
  204. return scanPacks(NO_PACKS);
  205. }
  206. /**
  207. * Get repository owning this object database.
  208. *
  209. * @return repository owning this object database.
  210. */
  211. protected DfsRepository getRepository() {
  212. return repository;
  213. }
  214. /**
  215. * List currently known pack files in the repository, without scanning.
  216. *
  217. * @return list of available packs. The returned array is shared with the
  218. * implementation and must not be modified by the caller.
  219. */
  220. public DfsPackFile[] getCurrentPacks() {
  221. return getCurrentPackList().packs;
  222. }
  223. /**
  224. * List currently known reftable files in the repository, without scanning.
  225. *
  226. * @return list of available reftables. The returned array is shared with
  227. * the implementation and must not be modified by the caller.
  228. */
  229. public DfsReftable[] getCurrentReftables() {
  230. return getCurrentPackList().reftables;
  231. }
  232. /**
  233. * List currently known pack files in the repository, without scanning.
  234. *
  235. * @return list of available packs, with some additional metadata. The
  236. * returned array is shared with the implementation and must not be
  237. * modified by the caller.
  238. */
  239. public PackList getCurrentPackList() {
  240. return packList.get();
  241. }
  242. /**
  243. * Does the requested object exist in this database?
  244. * <p>
  245. * This differs from ObjectDatabase's implementation in that we can selectively
  246. * ignore unreachable (garbage) objects.
  247. *
  248. * @param objectId
  249. * identity of the object to test for existence of.
  250. * @param avoidUnreachableObjects
  251. * if true, ignore objects that are unreachable.
  252. * @return true if the specified object is stored in this database.
  253. * @throws java.io.IOException
  254. * the object store cannot be accessed.
  255. */
  256. public boolean has(AnyObjectId objectId, boolean avoidUnreachableObjects)
  257. throws IOException {
  258. try (ObjectReader or = newReader()) {
  259. or.setAvoidUnreachableObjects(avoidUnreachableObjects);
  260. return or.has(objectId);
  261. }
  262. }
  263. /**
  264. * Generate a new unique name for a pack file.
  265. *
  266. * @param source
  267. * where the pack stream is created.
  268. * @return a unique name for the pack file. Must not collide with any other
  269. * pack file name in the same DFS.
  270. * @throws java.io.IOException
  271. * a new unique pack description cannot be generated.
  272. */
  273. protected abstract DfsPackDescription newPack(PackSource source)
  274. throws IOException;
  275. /**
  276. * Generate a new unique name for a pack file.
  277. *
  278. * <p>
  279. * Default implementation of this method would be equivalent to
  280. * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the
  281. * clients can override this method to use the given
  282. * {@code estomatedPackSize} value more efficiently in the process of
  283. * creating a new
  284. * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object.
  285. *
  286. * @param source
  287. * where the pack stream is created.
  288. * @param estimatedPackSize
  289. * the estimated size of the pack.
  290. * @return a unique name for the pack file. Must not collide with any other
  291. * pack file name in the same DFS.
  292. * @throws java.io.IOException
  293. * a new unique pack description cannot be generated.
  294. */
  295. protected DfsPackDescription newPack(PackSource source,
  296. long estimatedPackSize) throws IOException {
  297. DfsPackDescription pack = newPack(source);
  298. pack.setEstimatedPackSize(estimatedPackSize);
  299. return pack;
  300. }
  301. /**
  302. * Commit a pack and index pair that was written to the DFS.
  303. * <p>
  304. * Committing the pack/index pair makes them visible to readers. The JGit
  305. * DFS code always writes the pack, then the index. This allows a simple
  306. * commit process to do nothing if readers always look for both files to
  307. * exist and the DFS performs atomic creation of the file (e.g. stream to a
  308. * temporary file and rename to target on close).
  309. * <p>
  310. * During pack compaction or GC the new pack file may be replacing other
  311. * older files. Implementations should remove those older files (if any) as
  312. * part of the commit of the new file.
  313. * <p>
  314. * This method is a trivial wrapper around
  315. * {@link #commitPackImpl(Collection, Collection)} that calls the
  316. * implementation and fires events.
  317. *
  318. * @param desc
  319. * description of the new packs.
  320. * @param replaces
  321. * if not null, list of packs to remove.
  322. * @throws java.io.IOException
  323. * the packs cannot be committed. On failure a rollback must
  324. * also be attempted by the caller.
  325. */
  326. protected void commitPack(Collection<DfsPackDescription> desc,
  327. Collection<DfsPackDescription> replaces) throws IOException {
  328. commitPackImpl(desc, replaces);
  329. getRepository().fireEvent(new DfsPacksChangedEvent());
  330. }
  331. /**
  332. * Implementation of pack commit.
  333. *
  334. * @see #commitPack(Collection, Collection)
  335. * @param desc
  336. * description of the new packs.
  337. * @param replaces
  338. * if not null, list of packs to remove.
  339. * @throws java.io.IOException
  340. * the packs cannot be committed.
  341. */
  342. protected abstract void commitPackImpl(Collection<DfsPackDescription> desc,
  343. Collection<DfsPackDescription> replaces) throws IOException;
  344. /**
  345. * Try to rollback a pack creation.
  346. * <p>
  347. * JGit DFS always writes the pack first, then the index. If the pack does
  348. * not yet exist, then neither does the index. A safe DFS implementation
  349. * would try to remove both files to ensure they are really gone.
  350. * <p>
  351. * A rollback does not support failures, as it only occurs when there is
  352. * already a failure in progress. A DFS implementor may wish to log
  353. * warnings/error messages when a rollback fails, but should not send new
  354. * exceptions up the Java callstack.
  355. *
  356. * @param desc
  357. * pack to delete.
  358. */
  359. protected abstract void rollbackPack(Collection<DfsPackDescription> desc);
  360. /**
  361. * List the available pack files.
  362. * <p>
  363. * The returned list must support random access and must be mutable by the
  364. * caller. It is sorted in place using the natural sorting of the returned
  365. * DfsPackDescription objects.
  366. *
  367. * @return available packs. May be empty if there are no packs.
  368. * @throws java.io.IOException
  369. * the packs cannot be listed and the object database is not
  370. * functional to the caller.
  371. */
  372. protected abstract List<DfsPackDescription> listPacks() throws IOException;
  373. /**
  374. * Open a pack, pack index, or other related file for reading.
  375. *
  376. * @param desc
  377. * description of pack related to the data that will be read.
  378. * This is an instance previously obtained from
  379. * {@link #listPacks()}, but not necessarily from the same
  380. * DfsObjDatabase instance.
  381. * @param ext
  382. * file extension that will be read i.e "pack" or "idx".
  383. * @return channel to read the file.
  384. * @throws java.io.FileNotFoundException
  385. * the file does not exist.
  386. * @throws java.io.IOException
  387. * the file cannot be opened.
  388. */
  389. protected abstract ReadableChannel openFile(
  390. DfsPackDescription desc, PackExt ext)
  391. throws FileNotFoundException, IOException;
  392. /**
  393. * Open a pack, pack index, or other related file for writing.
  394. *
  395. * @param desc
  396. * description of pack related to the data that will be written.
  397. * This is an instance previously obtained from
  398. * {@link #newPack(PackSource)}.
  399. * @param ext
  400. * file extension that will be written i.e "pack" or "idx".
  401. * @return channel to write the file.
  402. * @throws java.io.IOException
  403. * the file cannot be opened.
  404. */
  405. protected abstract DfsOutputStream writeFile(
  406. DfsPackDescription desc, PackExt ext) throws IOException;
  407. void addPack(DfsPackFile newPack) throws IOException {
  408. PackList o, n;
  409. do {
  410. o = packList.get();
  411. if (o == NO_PACKS) {
  412. // The repository may not have needed any existing objects to
  413. // complete the current task of creating a pack (e.g. push of a
  414. // pack with no external deltas). Because we don't scan for
  415. // newly added packs on missed object lookups, scan now to
  416. // make sure all older packs are available in the packList.
  417. o = scanPacks(o);
  418. // Its possible the scan identified the pack we were asked to
  419. // add, as the pack was already committed via commitPack().
  420. // If this is the case return without changing the list.
  421. for (DfsPackFile p : o.packs) {
  422. if (p.key.equals(newPack.key)) {
  423. return;
  424. }
  425. }
  426. }
  427. DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length];
  428. packs[0] = newPack;
  429. System.arraycopy(o.packs, 0, packs, 1, o.packs.length);
  430. n = new PackListImpl(packs, o.reftables);
  431. } while (!packList.compareAndSet(o, n));
  432. }
  433. void addReftable(DfsPackDescription add, Set<DfsPackDescription> remove)
  434. throws IOException {
  435. PackList o, n;
  436. do {
  437. o = packList.get();
  438. if (o == NO_PACKS) {
  439. o = scanPacks(o);
  440. for (DfsReftable t : o.reftables) {
  441. if (t.getPackDescription().equals(add)) {
  442. return;
  443. }
  444. }
  445. }
  446. List<DfsReftable> tables = new ArrayList<>(1 + o.reftables.length);
  447. for (DfsReftable t : o.reftables) {
  448. if (!remove.contains(t.getPackDescription())) {
  449. tables.add(t);
  450. }
  451. }
  452. tables.add(new DfsReftable(add));
  453. n = new PackListImpl(o.packs, tables.toArray(new DfsReftable[0]));
  454. } while (!packList.compareAndSet(o, n));
  455. }
  456. PackList scanPacks(PackList original) throws IOException {
  457. PackList o, n;
  458. synchronized (packList) {
  459. do {
  460. o = packList.get();
  461. if (o != original) {
  462. // Another thread did the scan for us, while we
  463. // were blocked on the monitor above.
  464. //
  465. return o;
  466. }
  467. n = scanPacksImpl(o);
  468. if (n == o)
  469. return n;
  470. } while (!packList.compareAndSet(o, n));
  471. }
  472. getRepository().fireEvent(new DfsPacksChangedEvent());
  473. return n;
  474. }
  475. private PackList scanPacksImpl(PackList old) throws IOException {
  476. DfsBlockCache cache = DfsBlockCache.getInstance();
  477. Map<DfsPackDescription, DfsPackFile> packs = packMap(old);
  478. Map<DfsPackDescription, DfsReftable> reftables = reftableMap(old);
  479. List<DfsPackDescription> scanned = listPacks();
  480. Collections.sort(scanned);
  481. List<DfsPackFile> newPacks = new ArrayList<>(scanned.size());
  482. List<DfsReftable> newReftables = new ArrayList<>(scanned.size());
  483. boolean foundNew = false;
  484. for (DfsPackDescription dsc : scanned) {
  485. DfsPackFile oldPack = packs.remove(dsc);
  486. if (oldPack != null) {
  487. newPacks.add(oldPack);
  488. } else if (dsc.hasFileExt(PackExt.PACK)) {
  489. newPacks.add(new DfsPackFile(cache, dsc));
  490. foundNew = true;
  491. }
  492. DfsReftable oldReftable = reftables.remove(dsc);
  493. if (oldReftable != null) {
  494. newReftables.add(oldReftable);
  495. } else if (dsc.hasFileExt(PackExt.REFTABLE)) {
  496. newReftables.add(new DfsReftable(cache, dsc));
  497. foundNew = true;
  498. }
  499. }
  500. if (newPacks.isEmpty() && newReftables.isEmpty())
  501. return new PackListImpl(NO_PACKS.packs, NO_PACKS.reftables);
  502. if (!foundNew) {
  503. old.clearDirty();
  504. return old;
  505. }
  506. Collections.sort(newReftables, reftableComparator());
  507. return new PackListImpl(
  508. newPacks.toArray(new DfsPackFile[0]),
  509. newReftables.toArray(new DfsReftable[0]));
  510. }
  511. private static Map<DfsPackDescription, DfsPackFile> packMap(PackList old) {
  512. Map<DfsPackDescription, DfsPackFile> forReuse = new HashMap<>();
  513. for (DfsPackFile p : old.packs) {
  514. if (!p.invalid()) {
  515. forReuse.put(p.desc, p);
  516. }
  517. }
  518. return forReuse;
  519. }
  520. private static Map<DfsPackDescription, DfsReftable> reftableMap(PackList old) {
  521. Map<DfsPackDescription, DfsReftable> forReuse = new HashMap<>();
  522. for (DfsReftable p : old.reftables) {
  523. if (!p.invalid()) {
  524. forReuse.put(p.desc, p);
  525. }
  526. }
  527. return forReuse;
  528. }
  529. /**
  530. * Get comparator to sort {@link DfsReftable} by priority.
  531. *
  532. * @return comparator to sort {@link DfsReftable} by priority.
  533. */
  534. protected Comparator<DfsReftable> reftableComparator() {
  535. return (fa, fb) -> {
  536. DfsPackDescription a = fa.getPackDescription();
  537. DfsPackDescription b = fb.getPackDescription();
  538. // GC, COMPACT reftables first by higher category.
  539. int c = category(b) - category(a);
  540. if (c != 0) {
  541. return c;
  542. }
  543. // Lower maxUpdateIndex first.
  544. c = Long.signum(a.getMaxUpdateIndex() - b.getMaxUpdateIndex());
  545. if (c != 0) {
  546. return c;
  547. }
  548. // Older reftable first.
  549. return Long.signum(a.getLastModified() - b.getLastModified());
  550. };
  551. }
  552. static int category(DfsPackDescription d) {
  553. PackSource s = d.getPackSource();
  554. return s != null ? s.category : 0;
  555. }
  556. /**
  557. * Clears the cached list of packs, forcing them to be scanned again.
  558. */
  559. protected void clearCache() {
  560. packList.set(NO_PACKS);
  561. }
  562. /** {@inheritDoc} */
  563. @Override
  564. public void close() {
  565. packList.set(NO_PACKS);
  566. }
  567. /** Snapshot of packs scanned in a single pass. */
  568. public static abstract class PackList {
  569. /** All known packs, sorted. */
  570. public final DfsPackFile[] packs;
  571. /** All known reftables, sorted. */
  572. public final DfsReftable[] reftables;
  573. private long lastModified = -1;
  574. PackList(DfsPackFile[] packs, DfsReftable[] reftables) {
  575. this.packs = packs;
  576. this.reftables = reftables;
  577. }
  578. /** @return last modified time of all packs, in milliseconds. */
  579. public long getLastModified() {
  580. if (lastModified < 0) {
  581. long max = 0;
  582. for (DfsPackFile pack : packs) {
  583. max = Math.max(max, pack.getPackDescription().getLastModified());
  584. }
  585. lastModified = max;
  586. }
  587. return lastModified;
  588. }
  589. abstract boolean dirty();
  590. abstract void clearDirty();
  591. /**
  592. * Mark pack list as dirty.
  593. * <p>
  594. * Used when the caller knows that new data might have been written to the
  595. * repository that could invalidate open readers depending on this pack list,
  596. * for example if refs are newly scanned.
  597. */
  598. public abstract void markDirty();
  599. }
  600. private static final class PackListImpl extends PackList {
  601. private volatile boolean dirty;
  602. PackListImpl(DfsPackFile[] packs, DfsReftable[] reftables) {
  603. super(packs, reftables);
  604. }
  605. @Override
  606. boolean dirty() {
  607. return dirty;
  608. }
  609. @Override
  610. void clearDirty() {
  611. dirty = false;
  612. }
  613. @Override
  614. public void markDirty() {
  615. dirty = true;
  616. }
  617. }
  618. }