You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsPackFile.java 35KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2007, Robin Rosenberg <robin.rosenberg@dewire.com>
  4. * Copyright (C) 2006-2008, Shawn O. Pearce <spearce@spearce.org>
  5. * and other copyright owners as documented in the project's IP log.
  6. *
  7. * This program and the accompanying materials are made available
  8. * under the terms of the Eclipse Distribution License v1.0 which
  9. * accompanies this distribution, is reproduced below, and is
  10. * available at http://www.eclipse.org/org/documents/edl-v10.php
  11. *
  12. * All rights reserved.
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above copyright
  19. * notice, this list of conditions and the following disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials provided
  24. * with the distribution.
  25. *
  26. * - Neither the name of the Eclipse Foundation, Inc. nor the
  27. * names of its contributors may be used to endorse or promote
  28. * products derived from this software without specific prior
  29. * written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  32. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  33. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  34. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  35. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  36. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  37. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  38. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  39. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  40. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  43. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44. */
  45. package org.eclipse.jgit.internal.storage.dfs;
  46. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
  47. import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
  48. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  49. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  50. import java.io.BufferedInputStream;
  51. import java.io.EOFException;
  52. import java.io.IOException;
  53. import java.io.InputStream;
  54. import java.nio.ByteBuffer;
  55. import java.nio.channels.Channels;
  56. import java.text.MessageFormat;
  57. import java.util.Set;
  58. import java.util.zip.CRC32;
  59. import java.util.zip.DataFormatException;
  60. import java.util.zip.Inflater;
  61. import org.eclipse.jgit.errors.CorruptObjectException;
  62. import org.eclipse.jgit.errors.LargeObjectException;
  63. import org.eclipse.jgit.errors.MissingObjectException;
  64. import org.eclipse.jgit.errors.PackInvalidException;
  65. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  66. import org.eclipse.jgit.internal.JGitText;
  67. import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
  68. import org.eclipse.jgit.internal.storage.file.PackIndex;
  69. import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
  70. import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
  71. import org.eclipse.jgit.internal.storage.pack.PackExt;
  72. import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
  73. import org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation;
  74. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  75. import org.eclipse.jgit.lib.AnyObjectId;
  76. import org.eclipse.jgit.lib.Constants;
  77. import org.eclipse.jgit.lib.ObjectId;
  78. import org.eclipse.jgit.lib.ObjectLoader;
  79. import org.eclipse.jgit.lib.Repository;
  80. import org.eclipse.jgit.util.LongList;
  81. /**
  82. * A Git version 2 pack file representation. A pack file contains Git objects in
  83. * delta packed format yielding high compression of lots of object where some
  84. * objects are similar.
  85. */
  86. public final class DfsPackFile {
  87. /**
  88. * File offset used to cache {@link #index} in {@link DfsBlockCache}.
  89. * <p>
  90. * To better manage memory, the forward index is stored as a single block in
  91. * the block cache under this file position. A negative value is used
  92. * because it cannot occur in a normal pack file, and it is less likely to
  93. * collide with a valid data block from the file as the high bits will all
  94. * be set when treated as an unsigned long by the cache code.
  95. */
  96. private static final long POS_INDEX = -1;
  97. /** Offset used to cache {@link #reverseIndex}. See {@link #POS_INDEX}. */
  98. private static final long POS_REVERSE_INDEX = -2;
  99. /** Offset used to cache {@link #bitmapIndex}. See {@link #POS_INDEX}. */
  100. private static final long POS_BITMAP_INDEX = -3;
  101. /** Cache that owns this pack file and its data. */
  102. private final DfsBlockCache cache;
  103. /** Description of the pack file's storage. */
  104. private final DfsPackDescription packDesc;
  105. /** Unique identity of this pack while in-memory. */
  106. final DfsPackKey key;
  107. /**
  108. * Total number of bytes in this pack file.
  109. * <p>
  110. * This field initializes to -1 and gets populated when a block is loaded.
  111. */
  112. volatile long length;
  113. /**
  114. * Preferred alignment for loading blocks from the backing file.
  115. * <p>
  116. * It is initialized to 0 and filled in on the first read made from the
  117. * file. Block sizes may be odd, e.g. 4091, caused by the underling DFS
  118. * storing 4091 user bytes and 5 bytes block metadata into a lower level
  119. * 4096 byte block on disk.
  120. */
  121. private volatile int blockSize;
  122. /** True once corruption has been detected that cannot be worked around. */
  123. private volatile boolean invalid;
  124. /** Exception that caused the packfile to be flagged as invalid */
  125. private volatile Exception invalidatingCause;
  126. /**
  127. * Lock for initialization of {@link #index} and {@link #corruptObjects}.
  128. * <p>
  129. * This lock ensures only one thread can perform the initialization work.
  130. */
  131. private final Object initLock = new Object();
  132. /** Index mapping {@link ObjectId} to position within the pack stream. */
  133. private volatile DfsBlockCache.Ref<PackIndex> index;
  134. /** Reverse version of {@link #index} mapping position to {@link ObjectId}. */
  135. private volatile DfsBlockCache.Ref<PackReverseIndex> reverseIndex;
  136. /** Index of compressed bitmap mapping entire object graph. */
  137. private volatile DfsBlockCache.Ref<PackBitmapIndex> bitmapIndex;
  138. /**
  139. * Objects we have tried to read, and discovered to be corrupt.
  140. * <p>
  141. * The list is allocated after the first corruption is found, and filled in
  142. * as more entries are discovered. Typically this list is never used, as
  143. * pack files do not usually contain corrupt objects.
  144. */
  145. private volatile LongList corruptObjects;
  146. /**
  147. * Construct a reader for an existing, packfile.
  148. *
  149. * @param cache
  150. * cache that owns the pack data.
  151. * @param desc
  152. * description of the pack within the DFS.
  153. * @param key
  154. * interned key used to identify blocks in the block cache.
  155. */
  156. DfsPackFile(DfsBlockCache cache, DfsPackDescription desc, DfsPackKey key) {
  157. this.cache = cache;
  158. this.packDesc = desc;
  159. this.key = key;
  160. length = desc.getFileSize(PACK);
  161. if (length <= 0)
  162. length = -1;
  163. }
  164. /** @return description that was originally used to configure this pack file. */
  165. public DfsPackDescription getPackDescription() {
  166. return packDesc;
  167. }
  168. /**
  169. * @return whether the pack index file is loaded and cached in memory.
  170. */
  171. public boolean isIndexLoaded() {
  172. DfsBlockCache.Ref<PackIndex> idxref = index;
  173. return idxref != null && idxref.has();
  174. }
  175. /** @return bytes cached in memory for this pack, excluding the index. */
  176. public long getCachedSize() {
  177. return key.cachedSize.get();
  178. }
  179. String getPackName() {
  180. return packDesc.getFileName(PACK);
  181. }
  182. void setBlockSize(int newSize) {
  183. blockSize = newSize;
  184. }
  185. void setPackIndex(PackIndex idx) {
  186. long objCnt = idx.getObjectCount();
  187. int recSize = Constants.OBJECT_ID_LENGTH + 8;
  188. int sz = (int) Math.min(objCnt * recSize, Integer.MAX_VALUE);
  189. index = cache.put(key, POS_INDEX, sz, idx);
  190. }
  191. /**
  192. * Get the PackIndex for this PackFile.
  193. *
  194. * @param ctx
  195. * reader context to support reading from the backing store if
  196. * the index is not already loaded in memory.
  197. * @return the PackIndex.
  198. * @throws IOException
  199. * the pack index is not available, or is corrupt.
  200. */
  201. public PackIndex getPackIndex(DfsReader ctx) throws IOException {
  202. return idx(ctx);
  203. }
  204. private PackIndex idx(DfsReader ctx) throws IOException {
  205. DfsBlockCache.Ref<PackIndex> idxref = index;
  206. if (idxref != null) {
  207. PackIndex idx = idxref.get();
  208. if (idx != null)
  209. return idx;
  210. }
  211. if (invalid) {
  212. throw new PackInvalidException(getPackName(), invalidatingCause);
  213. }
  214. Repository.getGlobalListenerList()
  215. .dispatch(new BeforeDfsPackIndexLoadedEvent(this));
  216. synchronized (initLock) {
  217. idxref = index;
  218. if (idxref != null) {
  219. PackIndex idx = idxref.get();
  220. if (idx != null)
  221. return idx;
  222. }
  223. PackIndex idx;
  224. try {
  225. ReadableChannel rc = ctx.db.openFile(packDesc, INDEX);
  226. try {
  227. InputStream in = Channels.newInputStream(rc);
  228. int wantSize = 8192;
  229. int bs = rc.blockSize();
  230. if (0 < bs && bs < wantSize)
  231. bs = (wantSize / bs) * bs;
  232. else if (bs <= 0)
  233. bs = wantSize;
  234. in = new BufferedInputStream(in, bs);
  235. idx = PackIndex.read(in);
  236. } finally {
  237. rc.close();
  238. }
  239. } catch (EOFException e) {
  240. invalid = true;
  241. invalidatingCause = e;
  242. IOException e2 = new IOException(MessageFormat.format(
  243. DfsText.get().shortReadOfIndex,
  244. packDesc.getFileName(INDEX)));
  245. e2.initCause(e);
  246. throw e2;
  247. } catch (IOException e) {
  248. invalid = true;
  249. invalidatingCause = e;
  250. IOException e2 = new IOException(MessageFormat.format(
  251. DfsText.get().cannotReadIndex,
  252. packDesc.getFileName(INDEX)));
  253. e2.initCause(e);
  254. throw e2;
  255. }
  256. setPackIndex(idx);
  257. return idx;
  258. }
  259. }
  260. final boolean isGarbage() {
  261. return packDesc.getPackSource() == UNREACHABLE_GARBAGE;
  262. }
  263. PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException {
  264. if (invalid || isGarbage())
  265. return null;
  266. DfsBlockCache.Ref<PackBitmapIndex> idxref = bitmapIndex;
  267. if (idxref != null) {
  268. PackBitmapIndex idx = idxref.get();
  269. if (idx != null)
  270. return idx;
  271. }
  272. if (!packDesc.hasFileExt(PackExt.BITMAP_INDEX))
  273. return null;
  274. synchronized (initLock) {
  275. idxref = bitmapIndex;
  276. if (idxref != null) {
  277. PackBitmapIndex idx = idxref.get();
  278. if (idx != null)
  279. return idx;
  280. }
  281. long size;
  282. PackBitmapIndex idx;
  283. try {
  284. ReadableChannel rc = ctx.db.openFile(packDesc, BITMAP_INDEX);
  285. try {
  286. InputStream in = Channels.newInputStream(rc);
  287. int wantSize = 8192;
  288. int bs = rc.blockSize();
  289. if (0 < bs && bs < wantSize)
  290. bs = (wantSize / bs) * bs;
  291. else if (bs <= 0)
  292. bs = wantSize;
  293. in = new BufferedInputStream(in, bs);
  294. idx = PackBitmapIndex.read(
  295. in, idx(ctx), getReverseIdx(ctx));
  296. } finally {
  297. size = rc.position();
  298. rc.close();
  299. }
  300. } catch (EOFException e) {
  301. IOException e2 = new IOException(MessageFormat.format(
  302. DfsText.get().shortReadOfIndex,
  303. packDesc.getFileName(BITMAP_INDEX)));
  304. e2.initCause(e);
  305. throw e2;
  306. } catch (IOException e) {
  307. IOException e2 = new IOException(MessageFormat.format(
  308. DfsText.get().cannotReadIndex,
  309. packDesc.getFileName(BITMAP_INDEX)));
  310. e2.initCause(e);
  311. throw e2;
  312. }
  313. bitmapIndex = cache.put(key, POS_BITMAP_INDEX,
  314. (int) Math.min(size, Integer.MAX_VALUE), idx);
  315. return idx;
  316. }
  317. }
  318. PackReverseIndex getReverseIdx(DfsReader ctx) throws IOException {
  319. DfsBlockCache.Ref<PackReverseIndex> revref = reverseIndex;
  320. if (revref != null) {
  321. PackReverseIndex revidx = revref.get();
  322. if (revidx != null)
  323. return revidx;
  324. }
  325. synchronized (initLock) {
  326. revref = reverseIndex;
  327. if (revref != null) {
  328. PackReverseIndex revidx = revref.get();
  329. if (revidx != null)
  330. return revidx;
  331. }
  332. PackIndex idx = idx(ctx);
  333. PackReverseIndex revidx = new PackReverseIndex(idx);
  334. int sz = (int) Math.min(
  335. idx.getObjectCount() * 8, Integer.MAX_VALUE);
  336. reverseIndex = cache.put(key, POS_REVERSE_INDEX, sz, revidx);
  337. return revidx;
  338. }
  339. }
  340. /**
  341. * Check if an object is stored within this pack.
  342. *
  343. * @param ctx
  344. * reader context to support reading from the backing store if
  345. * the index is not already loaded in memory.
  346. * @param id
  347. * object to be located.
  348. * @return true if the object exists in this pack; false if it does not.
  349. * @throws IOException
  350. * the pack index is not available, or is corrupt.
  351. */
  352. public boolean hasObject(DfsReader ctx, AnyObjectId id) throws IOException {
  353. final long offset = idx(ctx).findOffset(id);
  354. return 0 < offset && !isCorrupt(offset);
  355. }
  356. /**
  357. * Get an object from this pack.
  358. *
  359. * @param ctx
  360. * temporary working space associated with the calling thread.
  361. * @param id
  362. * the object to obtain from the pack. Must not be null.
  363. * @return the object loader for the requested object if it is contained in
  364. * this pack; null if the object was not found.
  365. * @throws IOException
  366. * the pack file or the index could not be read.
  367. */
  368. ObjectLoader get(DfsReader ctx, AnyObjectId id)
  369. throws IOException {
  370. long offset = idx(ctx).findOffset(id);
  371. return 0 < offset && !isCorrupt(offset) ? load(ctx, offset) : null;
  372. }
  373. long findOffset(DfsReader ctx, AnyObjectId id) throws IOException {
  374. return idx(ctx).findOffset(id);
  375. }
  376. void resolve(DfsReader ctx, Set<ObjectId> matches, AbbreviatedObjectId id,
  377. int matchLimit) throws IOException {
  378. idx(ctx).resolve(matches, id, matchLimit);
  379. }
  380. /** Release all memory used by this DfsPackFile instance. */
  381. public void close() {
  382. cache.remove(this);
  383. index = null;
  384. reverseIndex = null;
  385. }
  386. /**
  387. * Obtain the total number of objects available in this pack. This method
  388. * relies on pack index, giving number of effectively available objects.
  389. *
  390. * @param ctx
  391. * current reader for the calling thread.
  392. * @return number of objects in index of this pack, likewise in this pack
  393. * @throws IOException
  394. * the index file cannot be loaded into memory.
  395. */
  396. long getObjectCount(DfsReader ctx) throws IOException {
  397. return idx(ctx).getObjectCount();
  398. }
  399. private byte[] decompress(long position, int sz, DfsReader ctx)
  400. throws IOException, DataFormatException {
  401. byte[] dstbuf;
  402. try {
  403. dstbuf = new byte[sz];
  404. } catch (OutOfMemoryError noMemory) {
  405. // The size may be larger than our heap allows, return null to
  406. // let the caller know allocation isn't possible and it should
  407. // use the large object streaming approach instead.
  408. //
  409. // For example, this can occur when sz is 640 MB, and JRE
  410. // maximum heap size is only 256 MB. Even if the JRE has
  411. // 200 MB free, it cannot allocate a 640 MB byte array.
  412. return null;
  413. }
  414. if (ctx.inflate(this, position, dstbuf, false) != sz)
  415. throw new EOFException(MessageFormat.format(
  416. JGitText.get().shortCompressedStreamAt,
  417. Long.valueOf(position)));
  418. return dstbuf;
  419. }
  420. void copyPackAsIs(PackOutputStream out, DfsReader ctx)
  421. throws IOException {
  422. // If the length hasn't been determined yet, pin to set it.
  423. if (length == -1) {
  424. ctx.pin(this, 0);
  425. ctx.unpin();
  426. }
  427. if (cache.shouldCopyThroughCache(length))
  428. copyPackThroughCache(out, ctx);
  429. else
  430. copyPackBypassCache(out, ctx);
  431. }
  432. private void copyPackThroughCache(PackOutputStream out, DfsReader ctx)
  433. throws IOException {
  434. long position = 12;
  435. long remaining = length - (12 + 20);
  436. while (0 < remaining) {
  437. DfsBlock b = cache.getOrLoad(this, position, ctx);
  438. int ptr = (int) (position - b.start);
  439. int n = (int) Math.min(b.size() - ptr, remaining);
  440. b.write(out, position, n);
  441. position += n;
  442. remaining -= n;
  443. }
  444. }
  445. private long copyPackBypassCache(PackOutputStream out, DfsReader ctx)
  446. throws IOException {
  447. try (ReadableChannel rc = ctx.db.openFile(packDesc, PACK)) {
  448. ByteBuffer buf = newCopyBuffer(out, rc);
  449. if (ctx.getOptions().getStreamPackBufferSize() > 0)
  450. rc.setReadAheadBytes(ctx.getOptions().getStreamPackBufferSize());
  451. long position = 12;
  452. long remaining = length - (12 + 20);
  453. boolean packHeadSkipped = false;
  454. while (0 < remaining) {
  455. DfsBlock b = cache.get(key, alignToBlock(position));
  456. if (b != null) {
  457. int ptr = (int) (position - b.start);
  458. int n = (int) Math.min(b.size() - ptr, remaining);
  459. b.write(out, position, n);
  460. position += n;
  461. remaining -= n;
  462. rc.position(position);
  463. packHeadSkipped = true;
  464. continue;
  465. }
  466. buf.position(0);
  467. int n = read(rc, buf);
  468. if (n <= 0)
  469. throw packfileIsTruncated();
  470. else if (n > remaining)
  471. n = (int) remaining;
  472. if (!packHeadSkipped) {
  473. // Need skip the 'PACK' header for the first read
  474. out.write(buf.array(), 12, n - 12);
  475. packHeadSkipped = true;
  476. } else {
  477. out.write(buf.array(), 0, n);
  478. }
  479. position += n;
  480. remaining -= n;
  481. }
  482. return position;
  483. }
  484. }
  485. private ByteBuffer newCopyBuffer(PackOutputStream out, ReadableChannel rc) {
  486. int bs = blockSize(rc);
  487. byte[] copyBuf = out.getCopyBuffer();
  488. if (bs > copyBuf.length)
  489. copyBuf = new byte[bs];
  490. return ByteBuffer.wrap(copyBuf, 0, bs);
  491. }
  492. void copyAsIs(PackOutputStream out, DfsObjectToPack src,
  493. boolean validate, DfsReader ctx) throws IOException,
  494. StoredObjectRepresentationNotAvailableException {
  495. final CRC32 crc1 = validate ? new CRC32() : null;
  496. final CRC32 crc2 = validate ? new CRC32() : null;
  497. final byte[] buf = out.getCopyBuffer();
  498. // Rip apart the header so we can discover the size.
  499. //
  500. try {
  501. readFully(src.offset, buf, 0, 20, ctx);
  502. } catch (IOException ioError) {
  503. StoredObjectRepresentationNotAvailableException gone;
  504. gone = new StoredObjectRepresentationNotAvailableException(src);
  505. gone.initCause(ioError);
  506. throw gone;
  507. }
  508. int c = buf[0] & 0xff;
  509. final int typeCode = (c >> 4) & 7;
  510. long inflatedLength = c & 15;
  511. int shift = 4;
  512. int headerCnt = 1;
  513. while ((c & 0x80) != 0) {
  514. c = buf[headerCnt++] & 0xff;
  515. inflatedLength += ((long) (c & 0x7f)) << shift;
  516. shift += 7;
  517. }
  518. if (typeCode == Constants.OBJ_OFS_DELTA) {
  519. do {
  520. c = buf[headerCnt++] & 0xff;
  521. } while ((c & 128) != 0);
  522. if (validate) {
  523. assert(crc1 != null && crc2 != null);
  524. crc1.update(buf, 0, headerCnt);
  525. crc2.update(buf, 0, headerCnt);
  526. }
  527. } else if (typeCode == Constants.OBJ_REF_DELTA) {
  528. if (validate) {
  529. assert(crc1 != null && crc2 != null);
  530. crc1.update(buf, 0, headerCnt);
  531. crc2.update(buf, 0, headerCnt);
  532. }
  533. readFully(src.offset + headerCnt, buf, 0, 20, ctx);
  534. if (validate) {
  535. assert(crc1 != null && crc2 != null);
  536. crc1.update(buf, 0, 20);
  537. crc2.update(buf, 0, 20);
  538. }
  539. headerCnt += 20;
  540. } else if (validate) {
  541. assert(crc1 != null && crc2 != null);
  542. crc1.update(buf, 0, headerCnt);
  543. crc2.update(buf, 0, headerCnt);
  544. }
  545. final long dataOffset = src.offset + headerCnt;
  546. final long dataLength = src.length;
  547. final long expectedCRC;
  548. final DfsBlock quickCopy;
  549. // Verify the object isn't corrupt before sending. If it is,
  550. // we report it missing instead.
  551. //
  552. try {
  553. quickCopy = ctx.quickCopy(this, dataOffset, dataLength);
  554. if (validate && idx(ctx).hasCRC32Support()) {
  555. assert(crc1 != null);
  556. // Index has the CRC32 code cached, validate the object.
  557. //
  558. expectedCRC = idx(ctx).findCRC32(src);
  559. if (quickCopy != null) {
  560. quickCopy.crc32(crc1, dataOffset, (int) dataLength);
  561. } else {
  562. long pos = dataOffset;
  563. long cnt = dataLength;
  564. while (cnt > 0) {
  565. final int n = (int) Math.min(cnt, buf.length);
  566. readFully(pos, buf, 0, n, ctx);
  567. crc1.update(buf, 0, n);
  568. pos += n;
  569. cnt -= n;
  570. }
  571. }
  572. if (crc1.getValue() != expectedCRC) {
  573. setCorrupt(src.offset);
  574. throw new CorruptObjectException(MessageFormat.format(
  575. JGitText.get().objectAtHasBadZlibStream,
  576. Long.valueOf(src.offset), getPackName()));
  577. }
  578. } else if (validate) {
  579. assert(crc1 != null);
  580. // We don't have a CRC32 code in the index, so compute it
  581. // now while inflating the raw data to get zlib to tell us
  582. // whether or not the data is safe.
  583. //
  584. Inflater inf = ctx.inflater();
  585. byte[] tmp = new byte[1024];
  586. if (quickCopy != null) {
  587. quickCopy.check(inf, tmp, dataOffset, (int) dataLength);
  588. } else {
  589. long pos = dataOffset;
  590. long cnt = dataLength;
  591. while (cnt > 0) {
  592. final int n = (int) Math.min(cnt, buf.length);
  593. readFully(pos, buf, 0, n, ctx);
  594. crc1.update(buf, 0, n);
  595. inf.setInput(buf, 0, n);
  596. while (inf.inflate(tmp, 0, tmp.length) > 0)
  597. continue;
  598. pos += n;
  599. cnt -= n;
  600. }
  601. }
  602. if (!inf.finished() || inf.getBytesRead() != dataLength) {
  603. setCorrupt(src.offset);
  604. throw new EOFException(MessageFormat.format(
  605. JGitText.get().shortCompressedStreamAt,
  606. Long.valueOf(src.offset)));
  607. }
  608. expectedCRC = crc1.getValue();
  609. } else {
  610. expectedCRC = -1;
  611. }
  612. } catch (DataFormatException dataFormat) {
  613. setCorrupt(src.offset);
  614. CorruptObjectException corruptObject = new CorruptObjectException(
  615. MessageFormat.format(
  616. JGitText.get().objectAtHasBadZlibStream,
  617. Long.valueOf(src.offset), getPackName()));
  618. corruptObject.initCause(dataFormat);
  619. StoredObjectRepresentationNotAvailableException gone;
  620. gone = new StoredObjectRepresentationNotAvailableException(src);
  621. gone.initCause(corruptObject);
  622. throw gone;
  623. } catch (IOException ioError) {
  624. StoredObjectRepresentationNotAvailableException gone;
  625. gone = new StoredObjectRepresentationNotAvailableException(src);
  626. gone.initCause(ioError);
  627. throw gone;
  628. }
  629. if (quickCopy != null) {
  630. // The entire object fits into a single byte array window slice,
  631. // and we have it pinned. Write this out without copying.
  632. //
  633. out.writeHeader(src, inflatedLength);
  634. quickCopy.write(out, dataOffset, (int) dataLength);
  635. } else if (dataLength <= buf.length) {
  636. // Tiny optimization: Lots of objects are very small deltas or
  637. // deflated commits that are likely to fit in the copy buffer.
  638. //
  639. if (!validate) {
  640. long pos = dataOffset;
  641. long cnt = dataLength;
  642. while (cnt > 0) {
  643. final int n = (int) Math.min(cnt, buf.length);
  644. readFully(pos, buf, 0, n, ctx);
  645. pos += n;
  646. cnt -= n;
  647. }
  648. }
  649. out.writeHeader(src, inflatedLength);
  650. out.write(buf, 0, (int) dataLength);
  651. } else {
  652. // Now we are committed to sending the object. As we spool it out,
  653. // check its CRC32 code to make sure there wasn't corruption between
  654. // the verification we did above, and us actually outputting it.
  655. //
  656. out.writeHeader(src, inflatedLength);
  657. long pos = dataOffset;
  658. long cnt = dataLength;
  659. while (cnt > 0) {
  660. final int n = (int) Math.min(cnt, buf.length);
  661. readFully(pos, buf, 0, n, ctx);
  662. if (validate) {
  663. assert(crc2 != null);
  664. crc2.update(buf, 0, n);
  665. }
  666. out.write(buf, 0, n);
  667. pos += n;
  668. cnt -= n;
  669. }
  670. if (validate) {
  671. assert(crc2 != null);
  672. if (crc2.getValue() != expectedCRC) {
  673. throw new CorruptObjectException(MessageFormat.format(
  674. JGitText.get().objectAtHasBadZlibStream,
  675. Long.valueOf(src.offset), getPackName()));
  676. }
  677. }
  678. }
  679. }
  680. boolean invalid() {
  681. return invalid;
  682. }
  683. void setInvalid() {
  684. invalid = true;
  685. }
  686. private IOException packfileIsTruncated() {
  687. invalid = true;
  688. IOException exc = new IOException(MessageFormat.format(
  689. JGitText.get().packfileIsTruncated, getPackName()));
  690. invalidatingCause = exc;
  691. return exc;
  692. }
  693. private void readFully(long position, byte[] dstbuf, int dstoff, int cnt,
  694. DfsReader ctx) throws IOException {
  695. if (ctx.copy(this, position, dstbuf, dstoff, cnt) != cnt)
  696. throw new EOFException();
  697. }
  698. long alignToBlock(long pos) {
  699. int size = blockSize;
  700. if (size == 0)
  701. size = cache.getBlockSize();
  702. return (pos / size) * size;
  703. }
  704. DfsBlock getOrLoadBlock(long pos, DfsReader ctx) throws IOException {
  705. return cache.getOrLoad(this, pos, ctx);
  706. }
  707. DfsBlock readOneBlock(long pos, DfsReader ctx)
  708. throws IOException {
  709. if (invalid) {
  710. throw new PackInvalidException(getPackName(), invalidatingCause);
  711. }
  712. ReadableChannel rc = ctx.db.openFile(packDesc, PACK);
  713. try {
  714. int size = blockSize(rc);
  715. pos = (pos / size) * size;
  716. // If the size of the file is not yet known, try to discover it.
  717. // Channels may choose to return -1 to indicate they don't
  718. // know the length yet, in this case read up to the size unit
  719. // given by the caller, then recheck the length.
  720. long len = length;
  721. if (len < 0) {
  722. len = rc.size();
  723. if (0 <= len)
  724. length = len;
  725. }
  726. if (0 <= len && len < pos + size)
  727. size = (int) (len - pos);
  728. if (size <= 0)
  729. throw new EOFException(MessageFormat.format(
  730. DfsText.get().shortReadOfBlock, Long.valueOf(pos),
  731. getPackName(), Long.valueOf(0), Long.valueOf(0)));
  732. byte[] buf = new byte[size];
  733. rc.position(pos);
  734. int cnt = read(rc, ByteBuffer.wrap(buf, 0, size));
  735. if (cnt != size) {
  736. if (0 <= len) {
  737. throw new EOFException(MessageFormat.format(
  738. DfsText.get().shortReadOfBlock,
  739. Long.valueOf(pos),
  740. getPackName(),
  741. Integer.valueOf(size),
  742. Integer.valueOf(cnt)));
  743. }
  744. // Assume the entire thing was read in a single shot, compact
  745. // the buffer to only the space required.
  746. byte[] n = new byte[cnt];
  747. System.arraycopy(buf, 0, n, 0, n.length);
  748. buf = n;
  749. } else if (len < 0) {
  750. // With no length at the start of the read, the channel should
  751. // have the length available at the end.
  752. length = len = rc.size();
  753. }
  754. DfsBlock v = new DfsBlock(key, pos, buf);
  755. return v;
  756. } finally {
  757. rc.close();
  758. }
  759. }
  760. private int blockSize(ReadableChannel rc) {
  761. // If the block alignment is not yet known, discover it. Prefer the
  762. // larger size from either the cache or the file itself.
  763. int size = blockSize;
  764. if (size == 0) {
  765. size = rc.blockSize();
  766. if (size <= 0)
  767. size = cache.getBlockSize();
  768. else if (size < cache.getBlockSize())
  769. size = (cache.getBlockSize() / size) * size;
  770. blockSize = size;
  771. }
  772. return size;
  773. }
  774. private static int read(ReadableChannel rc, ByteBuffer buf)
  775. throws IOException {
  776. int n;
  777. do {
  778. n = rc.read(buf);
  779. } while (0 < n && buf.hasRemaining());
  780. return buf.position();
  781. }
  782. ObjectLoader load(DfsReader ctx, long pos)
  783. throws IOException {
  784. try {
  785. final byte[] ib = ctx.tempId;
  786. Delta delta = null;
  787. byte[] data = null;
  788. int type = Constants.OBJ_BAD;
  789. boolean cached = false;
  790. SEARCH: for (;;) {
  791. readFully(pos, ib, 0, 20, ctx);
  792. int c = ib[0] & 0xff;
  793. final int typeCode = (c >> 4) & 7;
  794. long sz = c & 15;
  795. int shift = 4;
  796. int p = 1;
  797. while ((c & 0x80) != 0) {
  798. c = ib[p++] & 0xff;
  799. sz += ((long) (c & 0x7f)) << shift;
  800. shift += 7;
  801. }
  802. switch (typeCode) {
  803. case Constants.OBJ_COMMIT:
  804. case Constants.OBJ_TREE:
  805. case Constants.OBJ_BLOB:
  806. case Constants.OBJ_TAG: {
  807. if (delta != null) {
  808. data = decompress(pos + p, (int) sz, ctx);
  809. type = typeCode;
  810. break SEARCH;
  811. }
  812. if (sz < ctx.getStreamFileThreshold()) {
  813. data = decompress(pos + p, (int) sz, ctx);
  814. if (data != null)
  815. return new ObjectLoader.SmallObject(typeCode, data);
  816. }
  817. return new LargePackedWholeObject(typeCode, sz, pos, p, this, ctx.db);
  818. }
  819. case Constants.OBJ_OFS_DELTA: {
  820. c = ib[p++] & 0xff;
  821. long base = c & 127;
  822. while ((c & 128) != 0) {
  823. base += 1;
  824. c = ib[p++] & 0xff;
  825. base <<= 7;
  826. base += (c & 127);
  827. }
  828. base = pos - base;
  829. delta = new Delta(delta, pos, (int) sz, p, base);
  830. if (sz != delta.deltaSize)
  831. break SEARCH;
  832. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  833. if (e != null) {
  834. type = e.type;
  835. data = e.data;
  836. cached = true;
  837. break SEARCH;
  838. }
  839. pos = base;
  840. continue SEARCH;
  841. }
  842. case Constants.OBJ_REF_DELTA: {
  843. readFully(pos + p, ib, 0, 20, ctx);
  844. long base = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  845. delta = new Delta(delta, pos, (int) sz, p + 20, base);
  846. if (sz != delta.deltaSize)
  847. break SEARCH;
  848. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  849. if (e != null) {
  850. type = e.type;
  851. data = e.data;
  852. cached = true;
  853. break SEARCH;
  854. }
  855. pos = base;
  856. continue SEARCH;
  857. }
  858. default:
  859. throw new IOException(MessageFormat.format(
  860. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  861. }
  862. }
  863. // At this point there is at least one delta to apply to data.
  864. // (Whole objects with no deltas to apply return early above.)
  865. if (data == null)
  866. throw new LargeObjectException();
  867. assert(delta != null);
  868. do {
  869. // Cache only the base immediately before desired object.
  870. if (cached)
  871. cached = false;
  872. else if (delta.next == null)
  873. ctx.getDeltaBaseCache().put(key, delta.basePos, type, data);
  874. pos = delta.deltaPos;
  875. byte[] cmds = decompress(pos + delta.hdrLen, delta.deltaSize, ctx);
  876. if (cmds == null) {
  877. data = null; // Discard base in case of OutOfMemoryError
  878. throw new LargeObjectException();
  879. }
  880. final long sz = BinaryDelta.getResultSize(cmds);
  881. if (Integer.MAX_VALUE <= sz)
  882. throw new LargeObjectException.ExceedsByteArrayLimit();
  883. final byte[] result;
  884. try {
  885. result = new byte[(int) sz];
  886. } catch (OutOfMemoryError tooBig) {
  887. data = null; // Discard base in case of OutOfMemoryError
  888. cmds = null;
  889. throw new LargeObjectException.OutOfMemory(tooBig);
  890. }
  891. BinaryDelta.apply(data, cmds, result);
  892. data = result;
  893. delta = delta.next;
  894. } while (delta != null);
  895. return new ObjectLoader.SmallObject(type, data);
  896. } catch (DataFormatException dfe) {
  897. CorruptObjectException coe = new CorruptObjectException(
  898. MessageFormat.format(
  899. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  900. getPackName()));
  901. coe.initCause(dfe);
  902. throw coe;
  903. }
  904. }
  905. private long findDeltaBase(DfsReader ctx, ObjectId baseId)
  906. throws IOException, MissingObjectException {
  907. long ofs = idx(ctx).findOffset(baseId);
  908. if (ofs < 0)
  909. throw new MissingObjectException(baseId,
  910. JGitText.get().missingDeltaBase);
  911. return ofs;
  912. }
  913. private static class Delta {
  914. /** Child that applies onto this object. */
  915. final Delta next;
  916. /** Offset of the delta object. */
  917. final long deltaPos;
  918. /** Size of the inflated delta stream. */
  919. final int deltaSize;
  920. /** Total size of the delta's pack entry header (including base). */
  921. final int hdrLen;
  922. /** Offset of the base object this delta applies onto. */
  923. final long basePos;
  924. Delta(Delta next, long ofs, int sz, int hdrLen, long baseOffset) {
  925. this.next = next;
  926. this.deltaPos = ofs;
  927. this.deltaSize = sz;
  928. this.hdrLen = hdrLen;
  929. this.basePos = baseOffset;
  930. }
  931. }
  932. byte[] getDeltaHeader(DfsReader wc, long pos)
  933. throws IOException, DataFormatException {
  934. // The delta stream starts as two variable length integers. If we
  935. // assume they are 64 bits each, we need 16 bytes to encode them,
  936. // plus 2 extra bytes for the variable length overhead. So 18 is
  937. // the longest delta instruction header.
  938. //
  939. final byte[] hdr = new byte[32];
  940. wc.inflate(this, pos, hdr, true /* header only */);
  941. return hdr;
  942. }
  943. int getObjectType(DfsReader ctx, long pos) throws IOException {
  944. final byte[] ib = ctx.tempId;
  945. for (;;) {
  946. readFully(pos, ib, 0, 20, ctx);
  947. int c = ib[0] & 0xff;
  948. final int type = (c >> 4) & 7;
  949. switch (type) {
  950. case Constants.OBJ_COMMIT:
  951. case Constants.OBJ_TREE:
  952. case Constants.OBJ_BLOB:
  953. case Constants.OBJ_TAG:
  954. return type;
  955. case Constants.OBJ_OFS_DELTA: {
  956. int p = 1;
  957. while ((c & 0x80) != 0)
  958. c = ib[p++] & 0xff;
  959. c = ib[p++] & 0xff;
  960. long ofs = c & 127;
  961. while ((c & 128) != 0) {
  962. ofs += 1;
  963. c = ib[p++] & 0xff;
  964. ofs <<= 7;
  965. ofs += (c & 127);
  966. }
  967. pos = pos - ofs;
  968. continue;
  969. }
  970. case Constants.OBJ_REF_DELTA: {
  971. int p = 1;
  972. while ((c & 0x80) != 0)
  973. c = ib[p++] & 0xff;
  974. readFully(pos + p, ib, 0, 20, ctx);
  975. pos = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  976. continue;
  977. }
  978. default:
  979. throw new IOException(MessageFormat.format(
  980. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  981. }
  982. }
  983. }
  984. long getObjectSize(DfsReader ctx, AnyObjectId id) throws IOException {
  985. final long offset = idx(ctx).findOffset(id);
  986. return 0 < offset ? getObjectSize(ctx, offset) : -1;
  987. }
  988. long getObjectSize(DfsReader ctx, long pos)
  989. throws IOException {
  990. final byte[] ib = ctx.tempId;
  991. readFully(pos, ib, 0, 20, ctx);
  992. int c = ib[0] & 0xff;
  993. final int type = (c >> 4) & 7;
  994. long sz = c & 15;
  995. int shift = 4;
  996. int p = 1;
  997. while ((c & 0x80) != 0) {
  998. c = ib[p++] & 0xff;
  999. sz += ((long) (c & 0x7f)) << shift;
  1000. shift += 7;
  1001. }
  1002. long deltaAt;
  1003. switch (type) {
  1004. case Constants.OBJ_COMMIT:
  1005. case Constants.OBJ_TREE:
  1006. case Constants.OBJ_BLOB:
  1007. case Constants.OBJ_TAG:
  1008. return sz;
  1009. case Constants.OBJ_OFS_DELTA:
  1010. c = ib[p++] & 0xff;
  1011. while ((c & 128) != 0)
  1012. c = ib[p++] & 0xff;
  1013. deltaAt = pos + p;
  1014. break;
  1015. case Constants.OBJ_REF_DELTA:
  1016. deltaAt = pos + p + 20;
  1017. break;
  1018. default:
  1019. throw new IOException(MessageFormat.format(
  1020. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  1021. }
  1022. try {
  1023. return BinaryDelta.getResultSize(getDeltaHeader(ctx, deltaAt));
  1024. } catch (DataFormatException dfe) {
  1025. CorruptObjectException coe = new CorruptObjectException(
  1026. MessageFormat.format(
  1027. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  1028. getPackName()));
  1029. coe.initCause(dfe);
  1030. throw coe;
  1031. }
  1032. }
  1033. void representation(DfsObjectRepresentation r, final long pos,
  1034. DfsReader ctx, PackReverseIndex rev)
  1035. throws IOException {
  1036. r.offset = pos;
  1037. final byte[] ib = ctx.tempId;
  1038. readFully(pos, ib, 0, 20, ctx);
  1039. int c = ib[0] & 0xff;
  1040. int p = 1;
  1041. final int typeCode = (c >> 4) & 7;
  1042. while ((c & 0x80) != 0)
  1043. c = ib[p++] & 0xff;
  1044. long len = rev.findNextOffset(pos, length - 20) - pos;
  1045. switch (typeCode) {
  1046. case Constants.OBJ_COMMIT:
  1047. case Constants.OBJ_TREE:
  1048. case Constants.OBJ_BLOB:
  1049. case Constants.OBJ_TAG:
  1050. r.format = StoredObjectRepresentation.PACK_WHOLE;
  1051. r.baseId = null;
  1052. r.length = len - p;
  1053. return;
  1054. case Constants.OBJ_OFS_DELTA: {
  1055. c = ib[p++] & 0xff;
  1056. long ofs = c & 127;
  1057. while ((c & 128) != 0) {
  1058. ofs += 1;
  1059. c = ib[p++] & 0xff;
  1060. ofs <<= 7;
  1061. ofs += (c & 127);
  1062. }
  1063. r.format = StoredObjectRepresentation.PACK_DELTA;
  1064. r.baseId = rev.findObject(pos - ofs);
  1065. r.length = len - p;
  1066. return;
  1067. }
  1068. case Constants.OBJ_REF_DELTA: {
  1069. readFully(pos + p, ib, 0, 20, ctx);
  1070. r.format = StoredObjectRepresentation.PACK_DELTA;
  1071. r.baseId = ObjectId.fromRaw(ib);
  1072. r.length = len - p - 20;
  1073. return;
  1074. }
  1075. default:
  1076. throw new IOException(MessageFormat.format(
  1077. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  1078. }
  1079. }
  1080. boolean isCorrupt(long offset) {
  1081. LongList list = corruptObjects;
  1082. if (list == null)
  1083. return false;
  1084. synchronized (list) {
  1085. return list.contains(offset);
  1086. }
  1087. }
  1088. private void setCorrupt(long offset) {
  1089. LongList list = corruptObjects;
  1090. if (list == null) {
  1091. synchronized (initLock) {
  1092. list = corruptObjects;
  1093. if (list == null) {
  1094. list = new LongList();
  1095. corruptObjects = list;
  1096. }
  1097. }
  1098. }
  1099. synchronized (list) {
  1100. list.add(offset);
  1101. }
  1102. }
  1103. }