選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

DfsGarbageCollector.java 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.internal.storage.dfs;
  44. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC;
  45. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
  46. import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
  47. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  48. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  49. import static org.eclipse.jgit.lib.RefDatabase.ALL;
  50. import java.io.IOException;
  51. import java.util.ArrayList;
  52. import java.util.Collections;
  53. import java.util.HashSet;
  54. import java.util.List;
  55. import java.util.Map;
  56. import java.util.Set;
  57. import org.eclipse.jgit.internal.JGitText;
  58. import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
  59. import org.eclipse.jgit.internal.storage.file.PackIndex;
  60. import org.eclipse.jgit.internal.storage.pack.PackExt;
  61. import org.eclipse.jgit.internal.storage.pack.PackWriter;
  62. import org.eclipse.jgit.lib.AnyObjectId;
  63. import org.eclipse.jgit.lib.Constants;
  64. import org.eclipse.jgit.lib.NullProgressMonitor;
  65. import org.eclipse.jgit.lib.ObjectId;
  66. import org.eclipse.jgit.lib.ObjectIdSet;
  67. import org.eclipse.jgit.lib.ProgressMonitor;
  68. import org.eclipse.jgit.lib.Ref;
  69. import org.eclipse.jgit.revwalk.RevWalk;
  70. import org.eclipse.jgit.storage.pack.PackConfig;
  71. import org.eclipse.jgit.storage.pack.PackStatistics;
  72. import org.eclipse.jgit.util.io.CountingOutputStream;
  73. /** Repack and garbage collect a repository. */
  74. public class DfsGarbageCollector {
  75. private final DfsRepository repo;
  76. private final DfsRefDatabase refdb;
  77. private final DfsObjDatabase objdb;
  78. private final List<DfsPackDescription> newPackDesc;
  79. private final List<PackStatistics> newPackStats;
  80. private final List<ObjectIdSet> newPackObj;
  81. private DfsReader ctx;
  82. private PackConfig packConfig;
  83. private long coalesceGarbageLimit = 50 << 20;
  84. private Map<String, Ref> refsBefore;
  85. private List<DfsPackFile> packsBefore;
  86. private Set<ObjectId> allHeads;
  87. private Set<ObjectId> nonHeads;
  88. private Set<ObjectId> tagTargets;
  89. /**
  90. * Initialize a garbage collector.
  91. *
  92. * @param repository
  93. * repository objects to be packed will be read from.
  94. */
  95. public DfsGarbageCollector(DfsRepository repository) {
  96. repo = repository;
  97. refdb = repo.getRefDatabase();
  98. objdb = repo.getObjectDatabase();
  99. newPackDesc = new ArrayList<DfsPackDescription>(4);
  100. newPackStats = new ArrayList<PackStatistics>(4);
  101. newPackObj = new ArrayList<ObjectIdSet>(4);
  102. packConfig = new PackConfig(repo);
  103. packConfig.setIndexVersion(2);
  104. }
  105. /** @return configuration used to generate the new pack file. */
  106. public PackConfig getPackConfig() {
  107. return packConfig;
  108. }
  109. /**
  110. * @param newConfig
  111. * the new configuration to use when creating the pack file.
  112. * @return {@code this}
  113. */
  114. public DfsGarbageCollector setPackConfig(PackConfig newConfig) {
  115. packConfig = newConfig;
  116. return this;
  117. }
  118. /** @return garbage packs smaller than this size will be repacked. */
  119. public long getCoalesceGarbageLimit() {
  120. return coalesceGarbageLimit;
  121. }
  122. /**
  123. * Set the byte size limit for garbage packs to be repacked.
  124. * <p>
  125. * Any UNREACHABLE_GARBAGE pack smaller than this limit will be repacked at
  126. * the end of the run. This allows the garbage collector to coalesce
  127. * unreachable objects into a single file.
  128. * <p>
  129. * If an UNREACHABLE_GARBAGE pack is already larger than this limit it will
  130. * be left alone by the garbage collector. This avoids unnecessary disk IO
  131. * reading and copying the objects.
  132. * <p>
  133. * If limit is set to 0 the UNREACHABLE_GARBAGE coalesce is disabled.<br>
  134. * If limit is set to {@link Long#MAX_VALUE}, everything is coalesced.
  135. * <p>
  136. * Keeping unreachable garbage prevents race conditions with repository
  137. * changes that may suddenly need an object whose only copy was stored in
  138. * the UNREACHABLE_GARBAGE pack.
  139. *
  140. * @param limit
  141. * size in bytes.
  142. * @return {@code this}
  143. */
  144. public DfsGarbageCollector setCoalesceGarbageLimit(long limit) {
  145. coalesceGarbageLimit = limit;
  146. return this;
  147. }
  148. /**
  149. * Create a single new pack file containing all of the live objects.
  150. * <p>
  151. * This method safely decides which packs can be expired after the new pack
  152. * is created by validating the references have not been modified in an
  153. * incompatible way.
  154. *
  155. * @param pm
  156. * progress monitor to receive updates on as packing may take a
  157. * while, depending on the size of the repository.
  158. * @return true if the repack was successful without race conditions. False
  159. * if a race condition was detected and the repack should be run
  160. * again later.
  161. * @throws IOException
  162. * a new pack cannot be created.
  163. */
  164. public boolean pack(ProgressMonitor pm) throws IOException {
  165. if (pm == null)
  166. pm = NullProgressMonitor.INSTANCE;
  167. if (packConfig.getIndexVersion() != 2)
  168. throw new IllegalStateException(
  169. JGitText.get().supportOnlyPackIndexVersion2);
  170. ctx = (DfsReader) objdb.newReader();
  171. try {
  172. refdb.clearCache();
  173. objdb.clearCache();
  174. refsBefore = refdb.getRefs(ALL);
  175. packsBefore = packsToRebuild();
  176. if (packsBefore.isEmpty())
  177. return true;
  178. allHeads = new HashSet<ObjectId>();
  179. nonHeads = new HashSet<ObjectId>();
  180. tagTargets = new HashSet<ObjectId>();
  181. for (Ref ref : refsBefore.values()) {
  182. if (ref.isSymbolic() || ref.getObjectId() == null)
  183. continue;
  184. if (isHead(ref))
  185. allHeads.add(ref.getObjectId());
  186. else
  187. nonHeads.add(ref.getObjectId());
  188. if (ref.getPeeledObjectId() != null)
  189. tagTargets.add(ref.getPeeledObjectId());
  190. }
  191. tagTargets.addAll(allHeads);
  192. boolean rollback = true;
  193. try {
  194. packHeads(pm);
  195. packRest(pm);
  196. packGarbage(pm);
  197. objdb.commitPack(newPackDesc, toPrune());
  198. rollback = false;
  199. return true;
  200. } finally {
  201. if (rollback)
  202. objdb.rollbackPack(newPackDesc);
  203. }
  204. } finally {
  205. ctx.close();
  206. }
  207. }
  208. private List<DfsPackFile> packsToRebuild() throws IOException {
  209. DfsPackFile[] packs = objdb.getPacks();
  210. List<DfsPackFile> out = new ArrayList<DfsPackFile>(packs.length);
  211. for (DfsPackFile p : packs) {
  212. DfsPackDescription d = p.getPackDescription();
  213. if (d.getPackSource() != UNREACHABLE_GARBAGE)
  214. out.add(p);
  215. else if (d.getFileSize(PackExt.PACK) < coalesceGarbageLimit)
  216. out.add(p);
  217. }
  218. return out;
  219. }
  220. /** @return all of the source packs that fed into this compaction. */
  221. public List<DfsPackDescription> getSourcePacks() {
  222. return toPrune();
  223. }
  224. /** @return new packs created by this compaction. */
  225. public List<DfsPackDescription> getNewPacks() {
  226. return newPackDesc;
  227. }
  228. /** @return statistics corresponding to the {@link #getNewPacks()}. */
  229. public List<PackStatistics> getNewPackStatistics() {
  230. return newPackStats;
  231. }
  232. private List<DfsPackDescription> toPrune() {
  233. int cnt = packsBefore.size();
  234. List<DfsPackDescription> all = new ArrayList<DfsPackDescription>(cnt);
  235. for (DfsPackFile pack : packsBefore)
  236. all.add(pack.getPackDescription());
  237. return all;
  238. }
  239. private void packHeads(ProgressMonitor pm) throws IOException {
  240. if (allHeads.isEmpty())
  241. return;
  242. try (PackWriter pw = newPackWriter()) {
  243. pw.setTagTargets(tagTargets);
  244. pw.preparePack(pm, allHeads, Collections.<ObjectId> emptySet());
  245. if (0 < pw.getObjectCount())
  246. writePack(GC, pw, pm);
  247. }
  248. }
  249. private void packRest(ProgressMonitor pm) throws IOException {
  250. if (nonHeads.isEmpty())
  251. return;
  252. try (PackWriter pw = newPackWriter()) {
  253. for (ObjectIdSet packedObjs : newPackObj)
  254. pw.excludeObjects(packedObjs);
  255. pw.preparePack(pm, nonHeads, allHeads);
  256. if (0 < pw.getObjectCount())
  257. writePack(GC, pw, pm);
  258. }
  259. }
  260. private void packGarbage(ProgressMonitor pm) throws IOException {
  261. // TODO(sop) This is ugly. The garbage pack needs to be deleted.
  262. PackConfig cfg = new PackConfig(packConfig);
  263. cfg.setReuseDeltas(true);
  264. cfg.setReuseObjects(true);
  265. cfg.setDeltaCompress(false);
  266. cfg.setBuildBitmaps(false);
  267. try (PackWriter pw = new PackWriter(cfg, ctx);
  268. RevWalk pool = new RevWalk(ctx)) {
  269. pw.setDeltaBaseAsOffset(true);
  270. pw.setReuseDeltaCommits(true);
  271. pm.beginTask(JGitText.get().findingGarbage, objectsBefore());
  272. for (DfsPackFile oldPack : packsBefore) {
  273. PackIndex oldIdx = oldPack.getPackIndex(ctx);
  274. for (PackIndex.MutableEntry ent : oldIdx) {
  275. pm.update(1);
  276. ObjectId id = ent.toObjectId();
  277. if (pool.lookupOrNull(id) != null || anyPackHas(id))
  278. continue;
  279. int type = oldPack.getObjectType(ctx, ent.getOffset());
  280. pw.addObject(pool.lookupAny(id, type));
  281. }
  282. }
  283. pm.endTask();
  284. if (0 < pw.getObjectCount())
  285. writePack(UNREACHABLE_GARBAGE, pw, pm);
  286. }
  287. }
  288. private boolean anyPackHas(AnyObjectId id) {
  289. for (ObjectIdSet packedObjs : newPackObj)
  290. if (packedObjs.contains(id))
  291. return true;
  292. return false;
  293. }
  294. private static boolean isHead(Ref ref) {
  295. return ref.getName().startsWith(Constants.R_HEADS);
  296. }
  297. private int objectsBefore() {
  298. int cnt = 0;
  299. for (DfsPackFile p : packsBefore)
  300. cnt += p.getPackDescription().getObjectCount();
  301. return cnt;
  302. }
  303. private PackWriter newPackWriter() {
  304. PackWriter pw = new PackWriter(packConfig, ctx);
  305. pw.setDeltaBaseAsOffset(true);
  306. pw.setReuseDeltaCommits(false);
  307. return pw;
  308. }
  309. private DfsPackDescription writePack(PackSource source, PackWriter pw,
  310. ProgressMonitor pm) throws IOException {
  311. DfsOutputStream out;
  312. DfsPackDescription pack = repo.getObjectDatabase().newPack(source);
  313. newPackDesc.add(pack);
  314. out = objdb.writeFile(pack, PACK);
  315. try {
  316. pw.writePack(pm, pm, out);
  317. pack.addFileExt(PACK);
  318. } finally {
  319. out.close();
  320. }
  321. out = objdb.writeFile(pack, INDEX);
  322. try {
  323. CountingOutputStream cnt = new CountingOutputStream(out);
  324. pw.writeIndex(cnt);
  325. pack.addFileExt(INDEX);
  326. pack.setFileSize(INDEX, cnt.getCount());
  327. pack.setIndexVersion(pw.getIndexVersion());
  328. } finally {
  329. out.close();
  330. }
  331. if (pw.prepareBitmapIndex(pm)) {
  332. out = objdb.writeFile(pack, BITMAP_INDEX);
  333. try {
  334. CountingOutputStream cnt = new CountingOutputStream(out);
  335. pw.writeBitmapIndex(cnt);
  336. pack.addFileExt(BITMAP_INDEX);
  337. pack.setFileSize(BITMAP_INDEX, cnt.getCount());
  338. } finally {
  339. out.close();
  340. }
  341. }
  342. PackStatistics stats = pw.getStatistics();
  343. pack.setPackStats(stats);
  344. newPackStats.add(stats);
  345. newPackObj.add(pw.getObjectSet());
  346. DfsBlockCache.getInstance().getOrCreate(pack, null);
  347. return pack;
  348. }
  349. }