You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsGarbageCollector.java 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.internal.storage.dfs;
  44. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC;
  45. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
  46. import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
  47. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  48. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  49. import static org.eclipse.jgit.lib.RefDatabase.ALL;
  50. import java.io.IOException;
  51. import java.util.ArrayList;
  52. import java.util.Collections;
  53. import java.util.HashSet;
  54. import java.util.List;
  55. import java.util.Map;
  56. import java.util.Set;
  57. import org.eclipse.jgit.internal.JGitText;
  58. import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
  59. import org.eclipse.jgit.internal.storage.file.PackIndex;
  60. import org.eclipse.jgit.internal.storage.pack.PackExt;
  61. import org.eclipse.jgit.internal.storage.pack.PackWriter;
  62. import org.eclipse.jgit.lib.AnyObjectId;
  63. import org.eclipse.jgit.lib.Constants;
  64. import org.eclipse.jgit.lib.NullProgressMonitor;
  65. import org.eclipse.jgit.lib.ObjectId;
  66. import org.eclipse.jgit.lib.ObjectIdSet;
  67. import org.eclipse.jgit.lib.ProgressMonitor;
  68. import org.eclipse.jgit.lib.Ref;
  69. import org.eclipse.jgit.lib.RefDatabase;
  70. import org.eclipse.jgit.revwalk.RevWalk;
  71. import org.eclipse.jgit.storage.pack.PackConfig;
  72. import org.eclipse.jgit.storage.pack.PackStatistics;
  73. import org.eclipse.jgit.util.io.CountingOutputStream;
  74. /** Repack and garbage collect a repository. */
  75. public class DfsGarbageCollector {
  76. private final DfsRepository repo;
  77. private final RefDatabase refdb;
  78. private final DfsObjDatabase objdb;
  79. private final List<DfsPackDescription> newPackDesc;
  80. private final List<PackStatistics> newPackStats;
  81. private final List<ObjectIdSet> newPackObj;
  82. private DfsReader ctx;
  83. private PackConfig packConfig;
  84. private long coalesceGarbageLimit = 50 << 20;
  85. private Map<String, Ref> refsBefore;
  86. private List<DfsPackFile> packsBefore;
  87. private Set<ObjectId> allHeads;
  88. private Set<ObjectId> nonHeads;
  89. private Set<ObjectId> tagTargets;
  90. /**
  91. * Initialize a garbage collector.
  92. *
  93. * @param repository
  94. * repository objects to be packed will be read from.
  95. */
  96. public DfsGarbageCollector(DfsRepository repository) {
  97. repo = repository;
  98. refdb = repo.getRefDatabase();
  99. objdb = repo.getObjectDatabase();
  100. newPackDesc = new ArrayList<DfsPackDescription>(4);
  101. newPackStats = new ArrayList<PackStatistics>(4);
  102. newPackObj = new ArrayList<ObjectIdSet>(4);
  103. packConfig = new PackConfig(repo);
  104. packConfig.setIndexVersion(2);
  105. }
  106. /** @return configuration used to generate the new pack file. */
  107. public PackConfig getPackConfig() {
  108. return packConfig;
  109. }
  110. /**
  111. * @param newConfig
  112. * the new configuration to use when creating the pack file.
  113. * @return {@code this}
  114. */
  115. public DfsGarbageCollector setPackConfig(PackConfig newConfig) {
  116. packConfig = newConfig;
  117. return this;
  118. }
  119. /** @return garbage packs smaller than this size will be repacked. */
  120. public long getCoalesceGarbageLimit() {
  121. return coalesceGarbageLimit;
  122. }
  123. /**
  124. * Set the byte size limit for garbage packs to be repacked.
  125. * <p>
  126. * Any UNREACHABLE_GARBAGE pack smaller than this limit will be repacked at
  127. * the end of the run. This allows the garbage collector to coalesce
  128. * unreachable objects into a single file.
  129. * <p>
  130. * If an UNREACHABLE_GARBAGE pack is already larger than this limit it will
  131. * be left alone by the garbage collector. This avoids unnecessary disk IO
  132. * reading and copying the objects.
  133. * <p>
  134. * If limit is set to 0 the UNREACHABLE_GARBAGE coalesce is disabled.<br>
  135. * If limit is set to {@link Long#MAX_VALUE}, everything is coalesced.
  136. * <p>
  137. * Keeping unreachable garbage prevents race conditions with repository
  138. * changes that may suddenly need an object whose only copy was stored in
  139. * the UNREACHABLE_GARBAGE pack.
  140. *
  141. * @param limit
  142. * size in bytes.
  143. * @return {@code this}
  144. */
  145. public DfsGarbageCollector setCoalesceGarbageLimit(long limit) {
  146. coalesceGarbageLimit = limit;
  147. return this;
  148. }
  149. /**
  150. * Create a single new pack file containing all of the live objects.
  151. * <p>
  152. * This method safely decides which packs can be expired after the new pack
  153. * is created by validating the references have not been modified in an
  154. * incompatible way.
  155. *
  156. * @param pm
  157. * progress monitor to receive updates on as packing may take a
  158. * while, depending on the size of the repository.
  159. * @return true if the repack was successful without race conditions. False
  160. * if a race condition was detected and the repack should be run
  161. * again later.
  162. * @throws IOException
  163. * a new pack cannot be created.
  164. */
  165. public boolean pack(ProgressMonitor pm) throws IOException {
  166. if (pm == null)
  167. pm = NullProgressMonitor.INSTANCE;
  168. if (packConfig.getIndexVersion() != 2)
  169. throw new IllegalStateException(
  170. JGitText.get().supportOnlyPackIndexVersion2);
  171. ctx = (DfsReader) objdb.newReader();
  172. try {
  173. refdb.refresh();
  174. objdb.clearCache();
  175. refsBefore = refdb.getRefs(ALL);
  176. packsBefore = packsToRebuild();
  177. if (packsBefore.isEmpty())
  178. return true;
  179. allHeads = new HashSet<ObjectId>();
  180. nonHeads = new HashSet<ObjectId>();
  181. tagTargets = new HashSet<ObjectId>();
  182. for (Ref ref : refsBefore.values()) {
  183. if (ref.isSymbolic() || ref.getObjectId() == null)
  184. continue;
  185. if (isHead(ref))
  186. allHeads.add(ref.getObjectId());
  187. else
  188. nonHeads.add(ref.getObjectId());
  189. if (ref.getPeeledObjectId() != null)
  190. tagTargets.add(ref.getPeeledObjectId());
  191. }
  192. tagTargets.addAll(allHeads);
  193. boolean rollback = true;
  194. try {
  195. packHeads(pm);
  196. packRest(pm);
  197. packGarbage(pm);
  198. objdb.commitPack(newPackDesc, toPrune());
  199. rollback = false;
  200. return true;
  201. } finally {
  202. if (rollback)
  203. objdb.rollbackPack(newPackDesc);
  204. }
  205. } finally {
  206. ctx.close();
  207. }
  208. }
  209. private List<DfsPackFile> packsToRebuild() throws IOException {
  210. DfsPackFile[] packs = objdb.getPacks();
  211. List<DfsPackFile> out = new ArrayList<DfsPackFile>(packs.length);
  212. for (DfsPackFile p : packs) {
  213. DfsPackDescription d = p.getPackDescription();
  214. if (d.getPackSource() != UNREACHABLE_GARBAGE)
  215. out.add(p);
  216. else if (d.getFileSize(PackExt.PACK) < coalesceGarbageLimit)
  217. out.add(p);
  218. }
  219. return out;
  220. }
  221. /** @return all of the source packs that fed into this compaction. */
  222. public List<DfsPackDescription> getSourcePacks() {
  223. return toPrune();
  224. }
  225. /** @return new packs created by this compaction. */
  226. public List<DfsPackDescription> getNewPacks() {
  227. return newPackDesc;
  228. }
  229. /** @return statistics corresponding to the {@link #getNewPacks()}. */
  230. public List<PackStatistics> getNewPackStatistics() {
  231. return newPackStats;
  232. }
  233. private List<DfsPackDescription> toPrune() {
  234. int cnt = packsBefore.size();
  235. List<DfsPackDescription> all = new ArrayList<DfsPackDescription>(cnt);
  236. for (DfsPackFile pack : packsBefore)
  237. all.add(pack.getPackDescription());
  238. return all;
  239. }
  240. private void packHeads(ProgressMonitor pm) throws IOException {
  241. if (allHeads.isEmpty())
  242. return;
  243. try (PackWriter pw = newPackWriter()) {
  244. pw.setTagTargets(tagTargets);
  245. pw.preparePack(pm, allHeads, Collections.<ObjectId> emptySet());
  246. if (0 < pw.getObjectCount())
  247. writePack(GC, pw, pm);
  248. }
  249. }
  250. private void packRest(ProgressMonitor pm) throws IOException {
  251. if (nonHeads.isEmpty())
  252. return;
  253. try (PackWriter pw = newPackWriter()) {
  254. for (ObjectIdSet packedObjs : newPackObj)
  255. pw.excludeObjects(packedObjs);
  256. pw.preparePack(pm, nonHeads, allHeads);
  257. if (0 < pw.getObjectCount())
  258. writePack(GC, pw, pm);
  259. }
  260. }
  261. private void packGarbage(ProgressMonitor pm) throws IOException {
  262. // TODO(sop) This is ugly. The garbage pack needs to be deleted.
  263. PackConfig cfg = new PackConfig(packConfig);
  264. cfg.setReuseDeltas(true);
  265. cfg.setReuseObjects(true);
  266. cfg.setDeltaCompress(false);
  267. cfg.setBuildBitmaps(false);
  268. try (PackWriter pw = new PackWriter(cfg, ctx);
  269. RevWalk pool = new RevWalk(ctx)) {
  270. pw.setDeltaBaseAsOffset(true);
  271. pw.setReuseDeltaCommits(true);
  272. pm.beginTask(JGitText.get().findingGarbage, objectsBefore());
  273. for (DfsPackFile oldPack : packsBefore) {
  274. PackIndex oldIdx = oldPack.getPackIndex(ctx);
  275. for (PackIndex.MutableEntry ent : oldIdx) {
  276. pm.update(1);
  277. ObjectId id = ent.toObjectId();
  278. if (pool.lookupOrNull(id) != null || anyPackHas(id))
  279. continue;
  280. int type = oldPack.getObjectType(ctx, ent.getOffset());
  281. pw.addObject(pool.lookupAny(id, type));
  282. }
  283. }
  284. pm.endTask();
  285. if (0 < pw.getObjectCount())
  286. writePack(UNREACHABLE_GARBAGE, pw, pm);
  287. }
  288. }
  289. private boolean anyPackHas(AnyObjectId id) {
  290. for (ObjectIdSet packedObjs : newPackObj)
  291. if (packedObjs.contains(id))
  292. return true;
  293. return false;
  294. }
  295. private static boolean isHead(Ref ref) {
  296. return ref.getName().startsWith(Constants.R_HEADS);
  297. }
  298. private int objectsBefore() {
  299. int cnt = 0;
  300. for (DfsPackFile p : packsBefore)
  301. cnt += p.getPackDescription().getObjectCount();
  302. return cnt;
  303. }
  304. private PackWriter newPackWriter() {
  305. PackWriter pw = new PackWriter(packConfig, ctx);
  306. pw.setDeltaBaseAsOffset(true);
  307. pw.setReuseDeltaCommits(false);
  308. return pw;
  309. }
  310. private DfsPackDescription writePack(PackSource source, PackWriter pw,
  311. ProgressMonitor pm) throws IOException {
  312. DfsOutputStream out;
  313. DfsPackDescription pack = repo.getObjectDatabase().newPack(source);
  314. newPackDesc.add(pack);
  315. out = objdb.writeFile(pack, PACK);
  316. try {
  317. pw.writePack(pm, pm, out);
  318. pack.addFileExt(PACK);
  319. } finally {
  320. out.close();
  321. }
  322. out = objdb.writeFile(pack, INDEX);
  323. try {
  324. CountingOutputStream cnt = new CountingOutputStream(out);
  325. pw.writeIndex(cnt);
  326. pack.addFileExt(INDEX);
  327. pack.setFileSize(INDEX, cnt.getCount());
  328. pack.setIndexVersion(pw.getIndexVersion());
  329. } finally {
  330. out.close();
  331. }
  332. if (pw.prepareBitmapIndex(pm)) {
  333. out = objdb.writeFile(pack, BITMAP_INDEX);
  334. try {
  335. CountingOutputStream cnt = new CountingOutputStream(out);
  336. pw.writeBitmapIndex(cnt);
  337. pack.addFileExt(BITMAP_INDEX);
  338. pack.setFileSize(BITMAP_INDEX, cnt.getCount());
  339. } finally {
  340. out.close();
  341. }
  342. }
  343. PackStatistics stats = pw.getStatistics();
  344. pack.setPackStats(stats);
  345. newPackStats.add(stats);
  346. newPackObj.add(pw.getObjectSet());
  347. DfsBlockCache.getInstance().getOrCreate(pack, null);
  348. return pack;
  349. }
  350. }