You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsPackFile.java 34KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2007, Robin Rosenberg <robin.rosenberg@dewire.com>
  4. * Copyright (C) 2006-2008, Shawn O. Pearce <spearce@spearce.org>
  5. * and other copyright owners as documented in the project's IP log.
  6. *
  7. * This program and the accompanying materials are made available
  8. * under the terms of the Eclipse Distribution License v1.0 which
  9. * accompanies this distribution, is reproduced below, and is
  10. * available at http://www.eclipse.org/org/documents/edl-v10.php
  11. *
  12. * All rights reserved.
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above copyright
  19. * notice, this list of conditions and the following disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials provided
  24. * with the distribution.
  25. *
  26. * - Neither the name of the Eclipse Foundation, Inc. nor the
  27. * names of its contributors may be used to endorse or promote
  28. * products derived from this software without specific prior
  29. * written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  32. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  33. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  34. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  35. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  36. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  37. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  38. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  39. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  40. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  43. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44. */
  45. package org.eclipse.jgit.internal.storage.dfs;
  46. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
  47. import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
  48. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  49. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  50. import java.io.BufferedInputStream;
  51. import java.io.EOFException;
  52. import java.io.IOException;
  53. import java.io.InputStream;
  54. import java.nio.ByteBuffer;
  55. import java.nio.channels.Channels;
  56. import java.text.MessageFormat;
  57. import java.util.Set;
  58. import java.util.zip.CRC32;
  59. import java.util.zip.DataFormatException;
  60. import java.util.zip.Inflater;
  61. import org.eclipse.jgit.errors.CorruptObjectException;
  62. import org.eclipse.jgit.errors.LargeObjectException;
  63. import org.eclipse.jgit.errors.MissingObjectException;
  64. import org.eclipse.jgit.errors.PackInvalidException;
  65. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  66. import org.eclipse.jgit.internal.JGitText;
  67. import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
  68. import org.eclipse.jgit.internal.storage.file.PackIndex;
  69. import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
  70. import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
  71. import org.eclipse.jgit.internal.storage.pack.PackExt;
  72. import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
  73. import org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation;
  74. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  75. import org.eclipse.jgit.lib.AnyObjectId;
  76. import org.eclipse.jgit.lib.Constants;
  77. import org.eclipse.jgit.lib.ObjectId;
  78. import org.eclipse.jgit.lib.ObjectLoader;
  79. import org.eclipse.jgit.lib.Repository;
  80. import org.eclipse.jgit.util.LongList;
  81. /**
  82. * A Git version 2 pack file representation. A pack file contains Git objects in
  83. * delta packed format yielding high compression of lots of object where some
  84. * objects are similar.
  85. */
  86. public final class DfsPackFile {
  87. /**
  88. * File offset used to cache {@link #index} in {@link DfsBlockCache}.
  89. * <p>
  90. * To better manage memory, the forward index is stored as a single block in
  91. * the block cache under this file position. A negative value is used
  92. * because it cannot occur in a normal pack file, and it is less likely to
  93. * collide with a valid data block from the file as the high bits will all
  94. * be set when treated as an unsigned long by the cache code.
  95. */
  96. private static final long POS_INDEX = -1;
  97. /** Offset used to cache {@link #reverseIndex}. See {@link #POS_INDEX}. */
  98. private static final long POS_REVERSE_INDEX = -2;
  99. /** Offset used to cache {@link #bitmapIndex}. See {@link #POS_INDEX}. */
  100. private static final long POS_BITMAP_INDEX = -3;
  101. /** Cache that owns this pack file and its data. */
  102. private final DfsBlockCache cache;
  103. /** Description of the pack file's storage. */
  104. private final DfsPackDescription packDesc;
  105. /** Unique identity of this pack while in-memory. */
  106. final DfsPackKey key;
  107. /**
  108. * Total number of bytes in this pack file.
  109. * <p>
  110. * This field initializes to -1 and gets populated when a block is loaded.
  111. */
  112. volatile long length;
  113. /**
  114. * Preferred alignment for loading blocks from the backing file.
  115. * <p>
  116. * It is initialized to 0 and filled in on the first read made from the
  117. * file. Block sizes may be odd, e.g. 4091, caused by the underling DFS
  118. * storing 4091 user bytes and 5 bytes block metadata into a lower level
  119. * 4096 byte block on disk.
  120. */
  121. private volatile int blockSize;
  122. /** True once corruption has been detected that cannot be worked around. */
  123. private volatile boolean invalid;
  124. /**
  125. * Lock for initialization of {@link #index} and {@link #corruptObjects}.
  126. * <p>
  127. * This lock ensures only one thread can perform the initialization work.
  128. */
  129. private final Object initLock = new Object();
  130. /** Index mapping {@link ObjectId} to position within the pack stream. */
  131. private volatile DfsBlockCache.Ref<PackIndex> index;
  132. /** Reverse version of {@link #index} mapping position to {@link ObjectId}. */
  133. private volatile DfsBlockCache.Ref<PackReverseIndex> reverseIndex;
  134. /** Index of compressed bitmap mapping entire object graph. */
  135. private volatile DfsBlockCache.Ref<PackBitmapIndex> bitmapIndex;
  136. /**
  137. * Objects we have tried to read, and discovered to be corrupt.
  138. * <p>
  139. * The list is allocated after the first corruption is found, and filled in
  140. * as more entries are discovered. Typically this list is never used, as
  141. * pack files do not usually contain corrupt objects.
  142. */
  143. private volatile LongList corruptObjects;
  144. /**
  145. * Construct a reader for an existing, packfile.
  146. *
  147. * @param cache
  148. * cache that owns the pack data.
  149. * @param desc
  150. * description of the pack within the DFS.
  151. * @param key
  152. * interned key used to identify blocks in the block cache.
  153. */
  154. DfsPackFile(DfsBlockCache cache, DfsPackDescription desc, DfsPackKey key) {
  155. this.cache = cache;
  156. this.packDesc = desc;
  157. this.key = key;
  158. length = desc.getFileSize(PACK);
  159. if (length <= 0)
  160. length = -1;
  161. }
  162. /** @return description that was originally used to configure this pack file. */
  163. public DfsPackDescription getPackDescription() {
  164. return packDesc;
  165. }
  166. /**
  167. * @return whether the pack index file is loaded and cached in memory.
  168. * @since 2.2
  169. */
  170. public boolean isIndexLoaded() {
  171. DfsBlockCache.Ref<PackIndex> idxref = index;
  172. return idxref != null && idxref.has();
  173. }
  174. /** @return bytes cached in memory for this pack, excluding the index. */
  175. public long getCachedSize() {
  176. return key.cachedSize.get();
  177. }
  178. String getPackName() {
  179. return packDesc.getFileName(PACK);
  180. }
  181. void setBlockSize(int newSize) {
  182. blockSize = newSize;
  183. }
  184. void setPackIndex(PackIndex idx) {
  185. long objCnt = idx.getObjectCount();
  186. int recSize = Constants.OBJECT_ID_LENGTH + 8;
  187. int sz = (int) Math.min(objCnt * recSize, Integer.MAX_VALUE);
  188. index = cache.put(key, POS_INDEX, sz, idx);
  189. }
  190. /**
  191. * Get the PackIndex for this PackFile.
  192. *
  193. * @param ctx
  194. * reader context to support reading from the backing store if
  195. * the index is not already loaded in memory.
  196. * @return the PackIndex.
  197. * @throws IOException
  198. * the pack index is not available, or is corrupt.
  199. */
  200. public PackIndex getPackIndex(DfsReader ctx) throws IOException {
  201. return idx(ctx);
  202. }
  203. private PackIndex idx(DfsReader ctx) throws IOException {
  204. DfsBlockCache.Ref<PackIndex> idxref = index;
  205. if (idxref != null) {
  206. PackIndex idx = idxref.get();
  207. if (idx != null)
  208. return idx;
  209. }
  210. if (invalid)
  211. throw new PackInvalidException(getPackName());
  212. Repository.getGlobalListenerList()
  213. .dispatch(new BeforeDfsPackIndexLoadedEvent(this));
  214. synchronized (initLock) {
  215. idxref = index;
  216. if (idxref != null) {
  217. PackIndex idx = idxref.get();
  218. if (idx != null)
  219. return idx;
  220. }
  221. PackIndex idx;
  222. try {
  223. ReadableChannel rc = ctx.db.openFile(packDesc, INDEX);
  224. try {
  225. InputStream in = Channels.newInputStream(rc);
  226. int wantSize = 8192;
  227. int bs = rc.blockSize();
  228. if (0 < bs && bs < wantSize)
  229. bs = (wantSize / bs) * bs;
  230. else if (bs <= 0)
  231. bs = wantSize;
  232. in = new BufferedInputStream(in, bs);
  233. idx = PackIndex.read(in);
  234. } finally {
  235. rc.close();
  236. }
  237. } catch (EOFException e) {
  238. invalid = true;
  239. IOException e2 = new IOException(MessageFormat.format(
  240. DfsText.get().shortReadOfIndex,
  241. packDesc.getFileName(INDEX)));
  242. e2.initCause(e);
  243. throw e2;
  244. } catch (IOException e) {
  245. invalid = true;
  246. IOException e2 = new IOException(MessageFormat.format(
  247. DfsText.get().cannotReadIndex,
  248. packDesc.getFileName(INDEX)));
  249. e2.initCause(e);
  250. throw e2;
  251. }
  252. setPackIndex(idx);
  253. return idx;
  254. }
  255. }
  256. final boolean isGarbage() {
  257. return packDesc.getPackSource() == UNREACHABLE_GARBAGE;
  258. }
  259. PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException {
  260. if (invalid || isGarbage())
  261. return null;
  262. DfsBlockCache.Ref<PackBitmapIndex> idxref = bitmapIndex;
  263. if (idxref != null) {
  264. PackBitmapIndex idx = idxref.get();
  265. if (idx != null)
  266. return idx;
  267. }
  268. if (!packDesc.hasFileExt(PackExt.BITMAP_INDEX))
  269. return null;
  270. synchronized (initLock) {
  271. idxref = bitmapIndex;
  272. if (idxref != null) {
  273. PackBitmapIndex idx = idxref.get();
  274. if (idx != null)
  275. return idx;
  276. }
  277. long size;
  278. PackBitmapIndex idx;
  279. try {
  280. ReadableChannel rc = ctx.db.openFile(packDesc, BITMAP_INDEX);
  281. try {
  282. InputStream in = Channels.newInputStream(rc);
  283. int wantSize = 8192;
  284. int bs = rc.blockSize();
  285. if (0 < bs && bs < wantSize)
  286. bs = (wantSize / bs) * bs;
  287. else if (bs <= 0)
  288. bs = wantSize;
  289. in = new BufferedInputStream(in, bs);
  290. idx = PackBitmapIndex.read(
  291. in, idx(ctx), getReverseIdx(ctx));
  292. } finally {
  293. size = rc.position();
  294. rc.close();
  295. }
  296. } catch (EOFException e) {
  297. IOException e2 = new IOException(MessageFormat.format(
  298. DfsText.get().shortReadOfIndex,
  299. packDesc.getFileName(BITMAP_INDEX)));
  300. e2.initCause(e);
  301. throw e2;
  302. } catch (IOException e) {
  303. IOException e2 = new IOException(MessageFormat.format(
  304. DfsText.get().cannotReadIndex,
  305. packDesc.getFileName(BITMAP_INDEX)));
  306. e2.initCause(e);
  307. throw e2;
  308. }
  309. bitmapIndex = cache.put(key, POS_BITMAP_INDEX,
  310. (int) Math.min(size, Integer.MAX_VALUE), idx);
  311. return idx;
  312. }
  313. }
  314. PackReverseIndex getReverseIdx(DfsReader ctx) throws IOException {
  315. DfsBlockCache.Ref<PackReverseIndex> revref = reverseIndex;
  316. if (revref != null) {
  317. PackReverseIndex revidx = revref.get();
  318. if (revidx != null)
  319. return revidx;
  320. }
  321. synchronized (initLock) {
  322. revref = reverseIndex;
  323. if (revref != null) {
  324. PackReverseIndex revidx = revref.get();
  325. if (revidx != null)
  326. return revidx;
  327. }
  328. PackIndex idx = idx(ctx);
  329. PackReverseIndex revidx = new PackReverseIndex(idx);
  330. int sz = (int) Math.min(
  331. idx.getObjectCount() * 8, Integer.MAX_VALUE);
  332. reverseIndex = cache.put(key, POS_REVERSE_INDEX, sz, revidx);
  333. return revidx;
  334. }
  335. }
  336. /**
  337. * Check if an object is stored within this pack.
  338. *
  339. * @param ctx
  340. * reader context to support reading from the backing store if
  341. * the index is not already loaded in memory.
  342. * @param id
  343. * object to be located.
  344. * @return true if the object exists in this pack; false if it does not.
  345. * @throws IOException
  346. * the pack index is not available, or is corrupt.
  347. */
  348. public boolean hasObject(DfsReader ctx, AnyObjectId id) throws IOException {
  349. final long offset = idx(ctx).findOffset(id);
  350. return 0 < offset && !isCorrupt(offset);
  351. }
  352. /**
  353. * Get an object from this pack.
  354. *
  355. * @param ctx
  356. * temporary working space associated with the calling thread.
  357. * @param id
  358. * the object to obtain from the pack. Must not be null.
  359. * @return the object loader for the requested object if it is contained in
  360. * this pack; null if the object was not found.
  361. * @throws IOException
  362. * the pack file or the index could not be read.
  363. */
  364. ObjectLoader get(DfsReader ctx, AnyObjectId id)
  365. throws IOException {
  366. long offset = idx(ctx).findOffset(id);
  367. return 0 < offset && !isCorrupt(offset) ? load(ctx, offset) : null;
  368. }
  369. long findOffset(DfsReader ctx, AnyObjectId id) throws IOException {
  370. return idx(ctx).findOffset(id);
  371. }
  372. void resolve(DfsReader ctx, Set<ObjectId> matches, AbbreviatedObjectId id,
  373. int matchLimit) throws IOException {
  374. idx(ctx).resolve(matches, id, matchLimit);
  375. }
  376. /** Release all memory used by this DfsPackFile instance. */
  377. public void close() {
  378. cache.remove(this);
  379. index = null;
  380. reverseIndex = null;
  381. }
  382. /**
  383. * Obtain the total number of objects available in this pack. This method
  384. * relies on pack index, giving number of effectively available objects.
  385. *
  386. * @param ctx
  387. * current reader for the calling thread.
  388. * @return number of objects in index of this pack, likewise in this pack
  389. * @throws IOException
  390. * the index file cannot be loaded into memory.
  391. */
  392. long getObjectCount(DfsReader ctx) throws IOException {
  393. return idx(ctx).getObjectCount();
  394. }
  395. private byte[] decompress(long position, int sz, DfsReader ctx)
  396. throws IOException, DataFormatException {
  397. byte[] dstbuf;
  398. try {
  399. dstbuf = new byte[sz];
  400. } catch (OutOfMemoryError noMemory) {
  401. // The size may be larger than our heap allows, return null to
  402. // let the caller know allocation isn't possible and it should
  403. // use the large object streaming approach instead.
  404. //
  405. // For example, this can occur when sz is 640 MB, and JRE
  406. // maximum heap size is only 256 MB. Even if the JRE has
  407. // 200 MB free, it cannot allocate a 640 MB byte array.
  408. return null;
  409. }
  410. if (ctx.inflate(this, position, dstbuf, false) != sz)
  411. throw new EOFException(MessageFormat.format(
  412. JGitText.get().shortCompressedStreamAt,
  413. Long.valueOf(position)));
  414. return dstbuf;
  415. }
  416. void copyPackAsIs(PackOutputStream out, DfsReader ctx)
  417. throws IOException {
  418. // If the length hasn't been determined yet, pin to set it.
  419. if (length == -1) {
  420. ctx.pin(this, 0);
  421. ctx.unpin();
  422. }
  423. if (cache.shouldCopyThroughCache(length))
  424. copyPackThroughCache(out, ctx);
  425. else
  426. copyPackBypassCache(out, ctx);
  427. }
  428. private void copyPackThroughCache(PackOutputStream out, DfsReader ctx)
  429. throws IOException {
  430. long position = 12;
  431. long remaining = length - (12 + 20);
  432. while (0 < remaining) {
  433. DfsBlock b = cache.getOrLoad(this, position, ctx);
  434. int ptr = (int) (position - b.start);
  435. int n = (int) Math.min(b.size() - ptr, remaining);
  436. b.write(out, position, n);
  437. position += n;
  438. remaining -= n;
  439. }
  440. }
  441. private long copyPackBypassCache(PackOutputStream out, DfsReader ctx)
  442. throws IOException {
  443. try (ReadableChannel rc = ctx.db.openFile(packDesc, PACK)) {
  444. ByteBuffer buf = newCopyBuffer(out, rc);
  445. if (ctx.getOptions().getStreamPackBufferSize() > 0)
  446. rc.setReadAheadBytes(ctx.getOptions().getStreamPackBufferSize());
  447. long position = 12;
  448. long remaining = length - (12 + 20);
  449. while (0 < remaining) {
  450. DfsBlock b = cache.get(key, alignToBlock(position));
  451. if (b != null) {
  452. int ptr = (int) (position - b.start);
  453. int n = (int) Math.min(b.size() - ptr, remaining);
  454. b.write(out, position, n);
  455. position += n;
  456. remaining -= n;
  457. rc.position(position);
  458. continue;
  459. }
  460. buf.position(0);
  461. int n = read(rc, buf);
  462. if (n <= 0)
  463. throw packfileIsTruncated();
  464. else if (n > remaining)
  465. n = (int) remaining;
  466. out.write(buf.array(), 0, n);
  467. position += n;
  468. remaining -= n;
  469. }
  470. return position;
  471. }
  472. }
  473. private ByteBuffer newCopyBuffer(PackOutputStream out, ReadableChannel rc) {
  474. int bs = blockSize(rc);
  475. byte[] copyBuf = out.getCopyBuffer();
  476. if (bs > copyBuf.length)
  477. copyBuf = new byte[bs];
  478. return ByteBuffer.wrap(copyBuf, 0, bs);
  479. }
  480. @SuppressWarnings("null")
  481. void copyAsIs(PackOutputStream out, DfsObjectToPack src,
  482. boolean validate, DfsReader ctx) throws IOException,
  483. StoredObjectRepresentationNotAvailableException {
  484. final CRC32 crc1 = validate ? new CRC32() : null;
  485. final CRC32 crc2 = validate ? new CRC32() : null;
  486. final byte[] buf = out.getCopyBuffer();
  487. // Rip apart the header so we can discover the size.
  488. //
  489. try {
  490. readFully(src.offset, buf, 0, 20, ctx);
  491. } catch (IOException ioError) {
  492. StoredObjectRepresentationNotAvailableException gone;
  493. gone = new StoredObjectRepresentationNotAvailableException(src);
  494. gone.initCause(ioError);
  495. throw gone;
  496. }
  497. int c = buf[0] & 0xff;
  498. final int typeCode = (c >> 4) & 7;
  499. long inflatedLength = c & 15;
  500. int shift = 4;
  501. int headerCnt = 1;
  502. while ((c & 0x80) != 0) {
  503. c = buf[headerCnt++] & 0xff;
  504. inflatedLength += ((long) (c & 0x7f)) << shift;
  505. shift += 7;
  506. }
  507. if (typeCode == Constants.OBJ_OFS_DELTA) {
  508. do {
  509. c = buf[headerCnt++] & 0xff;
  510. } while ((c & 128) != 0);
  511. if (validate) {
  512. crc1.update(buf, 0, headerCnt);
  513. crc2.update(buf, 0, headerCnt);
  514. }
  515. } else if (typeCode == Constants.OBJ_REF_DELTA) {
  516. if (validate) {
  517. crc1.update(buf, 0, headerCnt);
  518. crc2.update(buf, 0, headerCnt);
  519. }
  520. readFully(src.offset + headerCnt, buf, 0, 20, ctx);
  521. if (validate) {
  522. crc1.update(buf, 0, 20);
  523. crc2.update(buf, 0, 20);
  524. }
  525. headerCnt += 20;
  526. } else if (validate) {
  527. crc1.update(buf, 0, headerCnt);
  528. crc2.update(buf, 0, headerCnt);
  529. }
  530. final long dataOffset = src.offset + headerCnt;
  531. final long dataLength = src.length;
  532. final long expectedCRC;
  533. final DfsBlock quickCopy;
  534. // Verify the object isn't corrupt before sending. If it is,
  535. // we report it missing instead.
  536. //
  537. try {
  538. quickCopy = ctx.quickCopy(this, dataOffset, dataLength);
  539. if (validate && idx(ctx).hasCRC32Support()) {
  540. // Index has the CRC32 code cached, validate the object.
  541. //
  542. expectedCRC = idx(ctx).findCRC32(src);
  543. if (quickCopy != null) {
  544. quickCopy.crc32(crc1, dataOffset, (int) dataLength);
  545. } else {
  546. long pos = dataOffset;
  547. long cnt = dataLength;
  548. while (cnt > 0) {
  549. final int n = (int) Math.min(cnt, buf.length);
  550. readFully(pos, buf, 0, n, ctx);
  551. crc1.update(buf, 0, n);
  552. pos += n;
  553. cnt -= n;
  554. }
  555. }
  556. if (crc1.getValue() != expectedCRC) {
  557. setCorrupt(src.offset);
  558. throw new CorruptObjectException(MessageFormat.format(
  559. JGitText.get().objectAtHasBadZlibStream,
  560. Long.valueOf(src.offset), getPackName()));
  561. }
  562. } else if (validate) {
  563. // We don't have a CRC32 code in the index, so compute it
  564. // now while inflating the raw data to get zlib to tell us
  565. // whether or not the data is safe.
  566. //
  567. Inflater inf = ctx.inflater();
  568. byte[] tmp = new byte[1024];
  569. if (quickCopy != null) {
  570. quickCopy.check(inf, tmp, dataOffset, (int) dataLength);
  571. } else {
  572. long pos = dataOffset;
  573. long cnt = dataLength;
  574. while (cnt > 0) {
  575. final int n = (int) Math.min(cnt, buf.length);
  576. readFully(pos, buf, 0, n, ctx);
  577. crc1.update(buf, 0, n);
  578. inf.setInput(buf, 0, n);
  579. while (inf.inflate(tmp, 0, tmp.length) > 0)
  580. continue;
  581. pos += n;
  582. cnt -= n;
  583. }
  584. }
  585. if (!inf.finished() || inf.getBytesRead() != dataLength) {
  586. setCorrupt(src.offset);
  587. throw new EOFException(MessageFormat.format(
  588. JGitText.get().shortCompressedStreamAt,
  589. Long.valueOf(src.offset)));
  590. }
  591. expectedCRC = crc1.getValue();
  592. } else {
  593. expectedCRC = -1;
  594. }
  595. } catch (DataFormatException dataFormat) {
  596. setCorrupt(src.offset);
  597. CorruptObjectException corruptObject = new CorruptObjectException(
  598. MessageFormat.format(
  599. JGitText.get().objectAtHasBadZlibStream,
  600. Long.valueOf(src.offset), getPackName()));
  601. corruptObject.initCause(dataFormat);
  602. StoredObjectRepresentationNotAvailableException gone;
  603. gone = new StoredObjectRepresentationNotAvailableException(src);
  604. gone.initCause(corruptObject);
  605. throw gone;
  606. } catch (IOException ioError) {
  607. StoredObjectRepresentationNotAvailableException gone;
  608. gone = new StoredObjectRepresentationNotAvailableException(src);
  609. gone.initCause(ioError);
  610. throw gone;
  611. }
  612. if (quickCopy != null) {
  613. // The entire object fits into a single byte array window slice,
  614. // and we have it pinned. Write this out without copying.
  615. //
  616. out.writeHeader(src, inflatedLength);
  617. quickCopy.write(out, dataOffset, (int) dataLength);
  618. } else if (dataLength <= buf.length) {
  619. // Tiny optimization: Lots of objects are very small deltas or
  620. // deflated commits that are likely to fit in the copy buffer.
  621. //
  622. if (!validate) {
  623. long pos = dataOffset;
  624. long cnt = dataLength;
  625. while (cnt > 0) {
  626. final int n = (int) Math.min(cnt, buf.length);
  627. readFully(pos, buf, 0, n, ctx);
  628. pos += n;
  629. cnt -= n;
  630. }
  631. }
  632. out.writeHeader(src, inflatedLength);
  633. out.write(buf, 0, (int) dataLength);
  634. } else {
  635. // Now we are committed to sending the object. As we spool it out,
  636. // check its CRC32 code to make sure there wasn't corruption between
  637. // the verification we did above, and us actually outputting it.
  638. //
  639. out.writeHeader(src, inflatedLength);
  640. long pos = dataOffset;
  641. long cnt = dataLength;
  642. while (cnt > 0) {
  643. final int n = (int) Math.min(cnt, buf.length);
  644. readFully(pos, buf, 0, n, ctx);
  645. if (validate)
  646. crc2.update(buf, 0, n);
  647. out.write(buf, 0, n);
  648. pos += n;
  649. cnt -= n;
  650. }
  651. if (validate && crc2.getValue() != expectedCRC) {
  652. throw new CorruptObjectException(MessageFormat.format(
  653. JGitText.get().objectAtHasBadZlibStream,
  654. Long.valueOf(src.offset), getPackName()));
  655. }
  656. }
  657. }
  658. boolean invalid() {
  659. return invalid;
  660. }
  661. void setInvalid() {
  662. invalid = true;
  663. }
  664. private IOException packfileIsTruncated() {
  665. invalid = true;
  666. return new IOException(MessageFormat.format(
  667. JGitText.get().packfileIsTruncated, getPackName()));
  668. }
  669. private void readFully(long position, byte[] dstbuf, int dstoff, int cnt,
  670. DfsReader ctx) throws IOException {
  671. if (ctx.copy(this, position, dstbuf, dstoff, cnt) != cnt)
  672. throw new EOFException();
  673. }
  674. long alignToBlock(long pos) {
  675. int size = blockSize;
  676. if (size == 0)
  677. size = cache.getBlockSize();
  678. return (pos / size) * size;
  679. }
  680. DfsBlock getOrLoadBlock(long pos, DfsReader ctx) throws IOException {
  681. return cache.getOrLoad(this, pos, ctx);
  682. }
  683. DfsBlock readOneBlock(long pos, DfsReader ctx)
  684. throws IOException {
  685. if (invalid)
  686. throw new PackInvalidException(getPackName());
  687. ReadableChannel rc = ctx.db.openFile(packDesc, PACK);
  688. try {
  689. int size = blockSize(rc);
  690. pos = (pos / size) * size;
  691. // If the size of the file is not yet known, try to discover it.
  692. // Channels may choose to return -1 to indicate they don't
  693. // know the length yet, in this case read up to the size unit
  694. // given by the caller, then recheck the length.
  695. long len = length;
  696. if (len < 0) {
  697. len = rc.size();
  698. if (0 <= len)
  699. length = len;
  700. }
  701. if (0 <= len && len < pos + size)
  702. size = (int) (len - pos);
  703. if (size <= 0)
  704. throw new EOFException(MessageFormat.format(
  705. DfsText.get().shortReadOfBlock, Long.valueOf(pos),
  706. getPackName(), Long.valueOf(0), Long.valueOf(0)));
  707. byte[] buf = new byte[size];
  708. rc.position(pos);
  709. int cnt = read(rc, ByteBuffer.wrap(buf, 0, size));
  710. if (cnt != size) {
  711. if (0 <= len) {
  712. throw new EOFException(MessageFormat.format(
  713. DfsText.get().shortReadOfBlock,
  714. Long.valueOf(pos),
  715. getPackName(),
  716. Integer.valueOf(size),
  717. Integer.valueOf(cnt)));
  718. }
  719. // Assume the entire thing was read in a single shot, compact
  720. // the buffer to only the space required.
  721. byte[] n = new byte[cnt];
  722. System.arraycopy(buf, 0, n, 0, n.length);
  723. buf = n;
  724. } else if (len < 0) {
  725. // With no length at the start of the read, the channel should
  726. // have the length available at the end.
  727. length = len = rc.size();
  728. }
  729. DfsBlock v = new DfsBlock(key, pos, buf);
  730. return v;
  731. } finally {
  732. rc.close();
  733. }
  734. }
  735. private int blockSize(ReadableChannel rc) {
  736. // If the block alignment is not yet known, discover it. Prefer the
  737. // larger size from either the cache or the file itself.
  738. int size = blockSize;
  739. if (size == 0) {
  740. size = rc.blockSize();
  741. if (size <= 0)
  742. size = cache.getBlockSize();
  743. else if (size < cache.getBlockSize())
  744. size = (cache.getBlockSize() / size) * size;
  745. blockSize = size;
  746. }
  747. return size;
  748. }
  749. private static int read(ReadableChannel rc, ByteBuffer buf)
  750. throws IOException {
  751. int n;
  752. do {
  753. n = rc.read(buf);
  754. } while (0 < n && buf.hasRemaining());
  755. return buf.position();
  756. }
  757. @SuppressWarnings("null")
  758. ObjectLoader load(DfsReader ctx, long pos)
  759. throws IOException {
  760. try {
  761. final byte[] ib = ctx.tempId;
  762. Delta delta = null;
  763. byte[] data = null;
  764. int type = Constants.OBJ_BAD;
  765. boolean cached = false;
  766. SEARCH: for (;;) {
  767. readFully(pos, ib, 0, 20, ctx);
  768. int c = ib[0] & 0xff;
  769. final int typeCode = (c >> 4) & 7;
  770. long sz = c & 15;
  771. int shift = 4;
  772. int p = 1;
  773. while ((c & 0x80) != 0) {
  774. c = ib[p++] & 0xff;
  775. sz += ((long) (c & 0x7f)) << shift;
  776. shift += 7;
  777. }
  778. switch (typeCode) {
  779. case Constants.OBJ_COMMIT:
  780. case Constants.OBJ_TREE:
  781. case Constants.OBJ_BLOB:
  782. case Constants.OBJ_TAG: {
  783. if (delta != null) {
  784. data = decompress(pos + p, (int) sz, ctx);
  785. type = typeCode;
  786. break SEARCH;
  787. }
  788. if (sz < ctx.getStreamFileThreshold()) {
  789. data = decompress(pos + p, (int) sz, ctx);
  790. if (data != null)
  791. return new ObjectLoader.SmallObject(typeCode, data);
  792. }
  793. return new LargePackedWholeObject(typeCode, sz, pos, p, this, ctx.db);
  794. }
  795. case Constants.OBJ_OFS_DELTA: {
  796. c = ib[p++] & 0xff;
  797. long base = c & 127;
  798. while ((c & 128) != 0) {
  799. base += 1;
  800. c = ib[p++] & 0xff;
  801. base <<= 7;
  802. base += (c & 127);
  803. }
  804. base = pos - base;
  805. delta = new Delta(delta, pos, (int) sz, p, base);
  806. if (sz != delta.deltaSize)
  807. break SEARCH;
  808. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  809. if (e != null) {
  810. type = e.type;
  811. data = e.data;
  812. cached = true;
  813. break SEARCH;
  814. }
  815. pos = base;
  816. continue SEARCH;
  817. }
  818. case Constants.OBJ_REF_DELTA: {
  819. readFully(pos + p, ib, 0, 20, ctx);
  820. long base = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  821. delta = new Delta(delta, pos, (int) sz, p + 20, base);
  822. if (sz != delta.deltaSize)
  823. break SEARCH;
  824. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  825. if (e != null) {
  826. type = e.type;
  827. data = e.data;
  828. cached = true;
  829. break SEARCH;
  830. }
  831. pos = base;
  832. continue SEARCH;
  833. }
  834. default:
  835. throw new IOException(MessageFormat.format(
  836. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  837. }
  838. }
  839. // At this point there is at least one delta to apply to data.
  840. // (Whole objects with no deltas to apply return early above.)
  841. if (data == null)
  842. throw new LargeObjectException();
  843. do {
  844. // Cache only the base immediately before desired object.
  845. if (cached)
  846. cached = false;
  847. else if (delta.next == null)
  848. ctx.getDeltaBaseCache().put(key, delta.basePos, type, data);
  849. pos = delta.deltaPos;
  850. byte[] cmds = decompress(pos + delta.hdrLen, delta.deltaSize, ctx);
  851. if (cmds == null) {
  852. data = null; // Discard base in case of OutOfMemoryError
  853. throw new LargeObjectException();
  854. }
  855. final long sz = BinaryDelta.getResultSize(cmds);
  856. if (Integer.MAX_VALUE <= sz)
  857. throw new LargeObjectException.ExceedsByteArrayLimit();
  858. final byte[] result;
  859. try {
  860. result = new byte[(int) sz];
  861. } catch (OutOfMemoryError tooBig) {
  862. data = null; // Discard base in case of OutOfMemoryError
  863. cmds = null;
  864. throw new LargeObjectException.OutOfMemory(tooBig);
  865. }
  866. BinaryDelta.apply(data, cmds, result);
  867. data = result;
  868. delta = delta.next;
  869. } while (delta != null);
  870. return new ObjectLoader.SmallObject(type, data);
  871. } catch (DataFormatException dfe) {
  872. CorruptObjectException coe = new CorruptObjectException(
  873. MessageFormat.format(
  874. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  875. getPackName()));
  876. coe.initCause(dfe);
  877. throw coe;
  878. }
  879. }
  880. private long findDeltaBase(DfsReader ctx, ObjectId baseId)
  881. throws IOException, MissingObjectException {
  882. long ofs = idx(ctx).findOffset(baseId);
  883. if (ofs < 0)
  884. throw new MissingObjectException(baseId,
  885. JGitText.get().missingDeltaBase);
  886. return ofs;
  887. }
  888. private static class Delta {
  889. /** Child that applies onto this object. */
  890. final Delta next;
  891. /** Offset of the delta object. */
  892. final long deltaPos;
  893. /** Size of the inflated delta stream. */
  894. final int deltaSize;
  895. /** Total size of the delta's pack entry header (including base). */
  896. final int hdrLen;
  897. /** Offset of the base object this delta applies onto. */
  898. final long basePos;
  899. Delta(Delta next, long ofs, int sz, int hdrLen, long baseOffset) {
  900. this.next = next;
  901. this.deltaPos = ofs;
  902. this.deltaSize = sz;
  903. this.hdrLen = hdrLen;
  904. this.basePos = baseOffset;
  905. }
  906. }
  907. byte[] getDeltaHeader(DfsReader wc, long pos)
  908. throws IOException, DataFormatException {
  909. // The delta stream starts as two variable length integers. If we
  910. // assume they are 64 bits each, we need 16 bytes to encode them,
  911. // plus 2 extra bytes for the variable length overhead. So 18 is
  912. // the longest delta instruction header.
  913. //
  914. final byte[] hdr = new byte[32];
  915. wc.inflate(this, pos, hdr, true /* header only */);
  916. return hdr;
  917. }
  918. int getObjectType(DfsReader ctx, long pos) throws IOException {
  919. final byte[] ib = ctx.tempId;
  920. for (;;) {
  921. readFully(pos, ib, 0, 20, ctx);
  922. int c = ib[0] & 0xff;
  923. final int type = (c >> 4) & 7;
  924. switch (type) {
  925. case Constants.OBJ_COMMIT:
  926. case Constants.OBJ_TREE:
  927. case Constants.OBJ_BLOB:
  928. case Constants.OBJ_TAG:
  929. return type;
  930. case Constants.OBJ_OFS_DELTA: {
  931. int p = 1;
  932. while ((c & 0x80) != 0)
  933. c = ib[p++] & 0xff;
  934. c = ib[p++] & 0xff;
  935. long ofs = c & 127;
  936. while ((c & 128) != 0) {
  937. ofs += 1;
  938. c = ib[p++] & 0xff;
  939. ofs <<= 7;
  940. ofs += (c & 127);
  941. }
  942. pos = pos - ofs;
  943. continue;
  944. }
  945. case Constants.OBJ_REF_DELTA: {
  946. int p = 1;
  947. while ((c & 0x80) != 0)
  948. c = ib[p++] & 0xff;
  949. readFully(pos + p, ib, 0, 20, ctx);
  950. pos = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  951. continue;
  952. }
  953. default:
  954. throw new IOException(MessageFormat.format(
  955. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  956. }
  957. }
  958. }
  959. long getObjectSize(DfsReader ctx, AnyObjectId id) throws IOException {
  960. final long offset = idx(ctx).findOffset(id);
  961. return 0 < offset ? getObjectSize(ctx, offset) : -1;
  962. }
  963. long getObjectSize(DfsReader ctx, long pos)
  964. throws IOException {
  965. final byte[] ib = ctx.tempId;
  966. readFully(pos, ib, 0, 20, ctx);
  967. int c = ib[0] & 0xff;
  968. final int type = (c >> 4) & 7;
  969. long sz = c & 15;
  970. int shift = 4;
  971. int p = 1;
  972. while ((c & 0x80) != 0) {
  973. c = ib[p++] & 0xff;
  974. sz += ((long) (c & 0x7f)) << shift;
  975. shift += 7;
  976. }
  977. long deltaAt;
  978. switch (type) {
  979. case Constants.OBJ_COMMIT:
  980. case Constants.OBJ_TREE:
  981. case Constants.OBJ_BLOB:
  982. case Constants.OBJ_TAG:
  983. return sz;
  984. case Constants.OBJ_OFS_DELTA:
  985. c = ib[p++] & 0xff;
  986. while ((c & 128) != 0)
  987. c = ib[p++] & 0xff;
  988. deltaAt = pos + p;
  989. break;
  990. case Constants.OBJ_REF_DELTA:
  991. deltaAt = pos + p + 20;
  992. break;
  993. default:
  994. throw new IOException(MessageFormat.format(
  995. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  996. }
  997. try {
  998. return BinaryDelta.getResultSize(getDeltaHeader(ctx, deltaAt));
  999. } catch (DataFormatException dfe) {
  1000. CorruptObjectException coe = new CorruptObjectException(
  1001. MessageFormat.format(
  1002. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  1003. getPackName()));
  1004. coe.initCause(dfe);
  1005. throw coe;
  1006. }
  1007. }
  1008. void representation(DfsObjectRepresentation r, final long pos,
  1009. DfsReader ctx, PackReverseIndex rev)
  1010. throws IOException {
  1011. r.offset = pos;
  1012. final byte[] ib = ctx.tempId;
  1013. readFully(pos, ib, 0, 20, ctx);
  1014. int c = ib[0] & 0xff;
  1015. int p = 1;
  1016. final int typeCode = (c >> 4) & 7;
  1017. while ((c & 0x80) != 0)
  1018. c = ib[p++] & 0xff;
  1019. long len = rev.findNextOffset(pos, length - 20) - pos;
  1020. switch (typeCode) {
  1021. case Constants.OBJ_COMMIT:
  1022. case Constants.OBJ_TREE:
  1023. case Constants.OBJ_BLOB:
  1024. case Constants.OBJ_TAG:
  1025. r.format = StoredObjectRepresentation.PACK_WHOLE;
  1026. r.baseId = null;
  1027. r.length = len - p;
  1028. return;
  1029. case Constants.OBJ_OFS_DELTA: {
  1030. c = ib[p++] & 0xff;
  1031. long ofs = c & 127;
  1032. while ((c & 128) != 0) {
  1033. ofs += 1;
  1034. c = ib[p++] & 0xff;
  1035. ofs <<= 7;
  1036. ofs += (c & 127);
  1037. }
  1038. r.format = StoredObjectRepresentation.PACK_DELTA;
  1039. r.baseId = rev.findObject(pos - ofs);
  1040. r.length = len - p;
  1041. return;
  1042. }
  1043. case Constants.OBJ_REF_DELTA: {
  1044. readFully(pos + p, ib, 0, 20, ctx);
  1045. r.format = StoredObjectRepresentation.PACK_DELTA;
  1046. r.baseId = ObjectId.fromRaw(ib);
  1047. r.length = len - p - 20;
  1048. return;
  1049. }
  1050. default:
  1051. throw new IOException(MessageFormat.format(
  1052. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  1053. }
  1054. }
  1055. private boolean isCorrupt(long offset) {
  1056. LongList list = corruptObjects;
  1057. if (list == null)
  1058. return false;
  1059. synchronized (list) {
  1060. return list.contains(offset);
  1061. }
  1062. }
  1063. private void setCorrupt(long offset) {
  1064. LongList list = corruptObjects;
  1065. if (list == null) {
  1066. synchronized (initLock) {
  1067. list = corruptObjects;
  1068. if (list == null) {
  1069. list = new LongList();
  1070. corruptObjects = list;
  1071. }
  1072. }
  1073. }
  1074. synchronized (list) {
  1075. list.add(offset);
  1076. }
  1077. }
  1078. }