You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsPackFile.java 31KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2007, Robin Rosenberg <robin.rosenberg@dewire.com>
  4. * Copyright (C) 2006-2008, Shawn O. Pearce <spearce@spearce.org>
  5. * and other copyright owners as documented in the project's IP log.
  6. *
  7. * This program and the accompanying materials are made available
  8. * under the terms of the Eclipse Distribution License v1.0 which
  9. * accompanies this distribution, is reproduced below, and is
  10. * available at http://www.eclipse.org/org/documents/edl-v10.php
  11. *
  12. * All rights reserved.
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above copyright
  19. * notice, this list of conditions and the following disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials provided
  24. * with the distribution.
  25. *
  26. * - Neither the name of the Eclipse Foundation, Inc. nor the
  27. * names of its contributors may be used to endorse or promote
  28. * products derived from this software without specific prior
  29. * written permission.
  30. *
  31. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  32. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  33. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  34. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  35. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  36. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  37. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  38. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  39. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  40. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  43. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44. */
  45. package org.eclipse.jgit.internal.storage.dfs;
  46. import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
  47. import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
  48. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  49. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  50. import java.io.BufferedInputStream;
  51. import java.io.EOFException;
  52. import java.io.IOException;
  53. import java.io.InputStream;
  54. import java.nio.ByteBuffer;
  55. import java.nio.channels.Channels;
  56. import java.text.MessageFormat;
  57. import java.util.Set;
  58. import java.util.zip.CRC32;
  59. import java.util.zip.DataFormatException;
  60. import java.util.zip.Inflater;
  61. import org.eclipse.jgit.errors.CorruptObjectException;
  62. import org.eclipse.jgit.errors.LargeObjectException;
  63. import org.eclipse.jgit.errors.MissingObjectException;
  64. import org.eclipse.jgit.errors.PackInvalidException;
  65. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  66. import org.eclipse.jgit.internal.JGitText;
  67. import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
  68. import org.eclipse.jgit.internal.storage.file.PackIndex;
  69. import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
  70. import org.eclipse.jgit.internal.storage.pack.BinaryDelta;
  71. import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
  72. import org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation;
  73. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  74. import org.eclipse.jgit.lib.AnyObjectId;
  75. import org.eclipse.jgit.lib.Constants;
  76. import org.eclipse.jgit.lib.ObjectId;
  77. import org.eclipse.jgit.lib.ObjectLoader;
  78. import org.eclipse.jgit.lib.Repository;
  79. import org.eclipse.jgit.util.LongList;
  80. /**
  81. * A Git version 2 pack file representation. A pack file contains Git objects in
  82. * delta packed format yielding high compression of lots of object where some
  83. * objects are similar.
  84. */
  85. public final class DfsPackFile extends BlockBasedFile {
  86. private static final int REC_SIZE = Constants.OBJECT_ID_LENGTH + 8;
  87. /**
  88. * Lock for initialization of {@link #index} and {@link #corruptObjects}.
  89. * <p>
  90. * This lock ensures only one thread can perform the initialization work.
  91. */
  92. private final Object initLock = new Object();
  93. /** Index mapping {@link ObjectId} to position within the pack stream. */
  94. private volatile DfsBlockCache.Ref<PackIndex> index;
  95. /** Reverse version of {@link #index} mapping position to {@link ObjectId}. */
  96. private volatile DfsBlockCache.Ref<PackReverseIndex> reverseIndex;
  97. /** Index of compressed bitmap mapping entire object graph. */
  98. private volatile DfsBlockCache.Ref<PackBitmapIndex> bitmapIndex;
  99. /**
  100. * Objects we have tried to read, and discovered to be corrupt.
  101. * <p>
  102. * The list is allocated after the first corruption is found, and filled in
  103. * as more entries are discovered. Typically this list is never used, as
  104. * pack files do not usually contain corrupt objects.
  105. */
  106. private volatile LongList corruptObjects;
  107. /**
  108. * Construct a reader for an existing, packfile.
  109. *
  110. * @param cache
  111. * cache that owns the pack data.
  112. * @param desc
  113. * description of the pack within the DFS.
  114. */
  115. DfsPackFile(DfsBlockCache cache, DfsPackDescription desc) {
  116. super(cache, desc, PACK);
  117. int bs = desc.getBlockSize(PACK);
  118. if (bs > 0) {
  119. setBlockSize(bs);
  120. }
  121. long sz = desc.getFileSize(PACK);
  122. length = sz > 0 ? sz : -1;
  123. }
  124. /**
  125. * Get description that was originally used to configure this pack file.
  126. *
  127. * @return description that was originally used to configure this pack file.
  128. */
  129. public DfsPackDescription getPackDescription() {
  130. return desc;
  131. }
  132. /**
  133. * Whether the pack index file is loaded and cached in memory.
  134. *
  135. * @return whether the pack index file is loaded and cached in memory.
  136. */
  137. public boolean isIndexLoaded() {
  138. DfsBlockCache.Ref<PackIndex> idxref = index;
  139. return idxref != null && idxref.has();
  140. }
  141. void setPackIndex(PackIndex idx) {
  142. long objCnt = idx.getObjectCount();
  143. int recSize = Constants.OBJECT_ID_LENGTH + 8;
  144. long sz = objCnt * recSize;
  145. index = cache.putRef(desc.getStreamKey(INDEX), sz, idx);
  146. }
  147. /**
  148. * Get the PackIndex for this PackFile.
  149. *
  150. * @param ctx
  151. * reader context to support reading from the backing store if
  152. * the index is not already loaded in memory.
  153. * @return the PackIndex.
  154. * @throws java.io.IOException
  155. * the pack index is not available, or is corrupt.
  156. */
  157. public PackIndex getPackIndex(DfsReader ctx) throws IOException {
  158. return idx(ctx);
  159. }
  160. private PackIndex idx(DfsReader ctx) throws IOException {
  161. DfsBlockCache.Ref<PackIndex> idxref = index;
  162. if (idxref != null) {
  163. PackIndex idx = idxref.get();
  164. if (idx != null) {
  165. return idx;
  166. }
  167. }
  168. if (invalid) {
  169. throw new PackInvalidException(getFileName());
  170. }
  171. Repository.getGlobalListenerList()
  172. .dispatch(new BeforeDfsPackIndexLoadedEvent(this));
  173. synchronized (initLock) {
  174. idxref = index;
  175. if (idxref != null) {
  176. PackIndex idx = idxref.get();
  177. if (idx != null) {
  178. return idx;
  179. }
  180. }
  181. DfsStreamKey idxKey = desc.getStreamKey(INDEX);
  182. try {
  183. idxref = cache.getOrLoadRef(idxKey, () -> {
  184. try {
  185. ctx.stats.readIdx++;
  186. long start = System.nanoTime();
  187. try (ReadableChannel rc = ctx.db.openFile(desc,
  188. INDEX)) {
  189. InputStream in = Channels.newInputStream(rc);
  190. int wantSize = 8192;
  191. int bs = rc.blockSize();
  192. if (0 < bs && bs < wantSize) {
  193. bs = (wantSize / bs) * bs;
  194. } else if (bs <= 0) {
  195. bs = wantSize;
  196. }
  197. PackIndex idx = PackIndex
  198. .read(new BufferedInputStream(in, bs));
  199. int sz = (int) Math.min(
  200. idx.getObjectCount() * REC_SIZE,
  201. Integer.MAX_VALUE);
  202. ctx.stats.readIdxBytes += rc.position();
  203. return new DfsBlockCache.Ref<>(idxKey, 0, sz, idx);
  204. } finally {
  205. ctx.stats.readIdxMicros += elapsedMicros(start);
  206. }
  207. } catch (EOFException e) {
  208. throw new IOException(MessageFormat.format(
  209. DfsText.get().shortReadOfIndex,
  210. desc.getFileName(INDEX)), e);
  211. } catch (IOException e) {
  212. throw new IOException(MessageFormat.format(
  213. DfsText.get().cannotReadIndex,
  214. desc.getFileName(INDEX)), e);
  215. }
  216. });
  217. } catch (IOException e) {
  218. invalid = true;
  219. throw e;
  220. }
  221. PackIndex idx = idxref.get();
  222. if (idx != null) {
  223. index = idxref;
  224. }
  225. return idx;
  226. }
  227. }
  228. final boolean isGarbage() {
  229. return desc.getPackSource() == UNREACHABLE_GARBAGE;
  230. }
  231. PackBitmapIndex getBitmapIndex(DfsReader ctx) throws IOException {
  232. if (invalid || isGarbage() || !desc.hasFileExt(BITMAP_INDEX)) {
  233. return null;
  234. }
  235. DfsBlockCache.Ref<PackBitmapIndex> idxref = bitmapIndex;
  236. if (idxref != null) {
  237. PackBitmapIndex bmidx = idxref.get();
  238. if (bmidx != null) {
  239. return bmidx;
  240. }
  241. }
  242. synchronized (initLock) {
  243. idxref = bitmapIndex;
  244. if (idxref != null) {
  245. PackBitmapIndex bmidx = idxref.get();
  246. if (bmidx != null) {
  247. return bmidx;
  248. }
  249. }
  250. PackIndex idx = idx(ctx);
  251. PackReverseIndex revidx = getReverseIdx(ctx);
  252. DfsStreamKey bitmapKey = desc.getStreamKey(BITMAP_INDEX);
  253. idxref = cache.getOrLoadRef(bitmapKey, () -> {
  254. ctx.stats.readBitmap++;
  255. long start = System.nanoTime();
  256. try (ReadableChannel rc = ctx.db.openFile(desc, BITMAP_INDEX)) {
  257. long size;
  258. PackBitmapIndex bmidx;
  259. try {
  260. InputStream in = Channels.newInputStream(rc);
  261. int wantSize = 8192;
  262. int bs = rc.blockSize();
  263. if (0 < bs && bs < wantSize) {
  264. bs = (wantSize / bs) * bs;
  265. } else if (bs <= 0) {
  266. bs = wantSize;
  267. }
  268. in = new BufferedInputStream(in, bs);
  269. bmidx = PackBitmapIndex.read(in, idx, revidx);
  270. } finally {
  271. size = rc.position();
  272. ctx.stats.readIdxBytes += size;
  273. ctx.stats.readIdxMicros += elapsedMicros(start);
  274. }
  275. int sz = (int) Math.min(size, Integer.MAX_VALUE);
  276. return new DfsBlockCache.Ref<>(bitmapKey, 0, sz, bmidx);
  277. } catch (EOFException e) {
  278. throw new IOException(
  279. MessageFormat.format(DfsText.get().shortReadOfIndex,
  280. desc.getFileName(BITMAP_INDEX)),
  281. e);
  282. } catch (IOException e) {
  283. throw new IOException(
  284. MessageFormat.format(DfsText.get().cannotReadIndex,
  285. desc.getFileName(BITMAP_INDEX)),
  286. e);
  287. }
  288. });
  289. PackBitmapIndex bmidx = idxref.get();
  290. if (bmidx != null) {
  291. bitmapIndex = idxref;
  292. }
  293. return bmidx;
  294. }
  295. }
  296. PackReverseIndex getReverseIdx(DfsReader ctx) throws IOException {
  297. DfsBlockCache.Ref<PackReverseIndex> revref = reverseIndex;
  298. if (revref != null) {
  299. PackReverseIndex revidx = revref.get();
  300. if (revidx != null) {
  301. return revidx;
  302. }
  303. }
  304. synchronized (initLock) {
  305. revref = reverseIndex;
  306. if (revref != null) {
  307. PackReverseIndex revidx = revref.get();
  308. if (revidx != null) {
  309. return revidx;
  310. }
  311. }
  312. PackIndex idx = idx(ctx);
  313. DfsStreamKey revKey = new DfsStreamKey.ForReverseIndex(
  314. desc.getStreamKey(INDEX));
  315. revref = cache.getOrLoadRef(revKey, () -> {
  316. PackReverseIndex revidx = new PackReverseIndex(idx);
  317. int sz = (int) Math.min(idx.getObjectCount() * 8,
  318. Integer.MAX_VALUE);
  319. return new DfsBlockCache.Ref<>(revKey, 0, sz, revidx);
  320. });
  321. PackReverseIndex revidx = revref.get();
  322. if (revidx != null) {
  323. reverseIndex = revref;
  324. }
  325. return revidx;
  326. }
  327. }
  328. /**
  329. * Check if an object is stored within this pack.
  330. *
  331. * @param ctx
  332. * reader context to support reading from the backing store if
  333. * the index is not already loaded in memory.
  334. * @param id
  335. * object to be located.
  336. * @return true if the object exists in this pack; false if it does not.
  337. * @throws java.io.IOException
  338. * the pack index is not available, or is corrupt.
  339. */
  340. public boolean hasObject(DfsReader ctx, AnyObjectId id) throws IOException {
  341. final long offset = idx(ctx).findOffset(id);
  342. return 0 < offset && !isCorrupt(offset);
  343. }
  344. /**
  345. * Get an object from this pack.
  346. *
  347. * @param ctx
  348. * temporary working space associated with the calling thread.
  349. * @param id
  350. * the object to obtain from the pack. Must not be null.
  351. * @return the object loader for the requested object if it is contained in
  352. * this pack; null if the object was not found.
  353. * @throws IOException
  354. * the pack file or the index could not be read.
  355. */
  356. ObjectLoader get(DfsReader ctx, AnyObjectId id)
  357. throws IOException {
  358. long offset = idx(ctx).findOffset(id);
  359. return 0 < offset && !isCorrupt(offset) ? load(ctx, offset) : null;
  360. }
  361. long findOffset(DfsReader ctx, AnyObjectId id) throws IOException {
  362. return idx(ctx).findOffset(id);
  363. }
  364. void resolve(DfsReader ctx, Set<ObjectId> matches, AbbreviatedObjectId id,
  365. int matchLimit) throws IOException {
  366. idx(ctx).resolve(matches, id, matchLimit);
  367. }
  368. /**
  369. * Obtain the total number of objects available in this pack. This method
  370. * relies on pack index, giving number of effectively available objects.
  371. *
  372. * @param ctx
  373. * current reader for the calling thread.
  374. * @return number of objects in index of this pack, likewise in this pack
  375. * @throws IOException
  376. * the index file cannot be loaded into memory.
  377. */
  378. long getObjectCount(DfsReader ctx) throws IOException {
  379. return idx(ctx).getObjectCount();
  380. }
  381. private byte[] decompress(long position, int sz, DfsReader ctx)
  382. throws IOException, DataFormatException {
  383. byte[] dstbuf;
  384. try {
  385. dstbuf = new byte[sz];
  386. } catch (OutOfMemoryError noMemory) {
  387. // The size may be larger than our heap allows, return null to
  388. // let the caller know allocation isn't possible and it should
  389. // use the large object streaming approach instead.
  390. //
  391. // For example, this can occur when sz is 640 MB, and JRE
  392. // maximum heap size is only 256 MB. Even if the JRE has
  393. // 200 MB free, it cannot allocate a 640 MB byte array.
  394. return null;
  395. }
  396. if (ctx.inflate(this, position, dstbuf, false) != sz) {
  397. throw new EOFException(MessageFormat.format(
  398. JGitText.get().shortCompressedStreamAt,
  399. Long.valueOf(position)));
  400. }
  401. return dstbuf;
  402. }
  403. void copyPackAsIs(PackOutputStream out, DfsReader ctx) throws IOException {
  404. // If the length hasn't been determined yet, pin to set it.
  405. if (length == -1) {
  406. ctx.pin(this, 0);
  407. ctx.unpin();
  408. }
  409. try (ReadableChannel rc = ctx.db.openFile(desc, PACK)) {
  410. int sz = ctx.getOptions().getStreamPackBufferSize();
  411. if (sz > 0) {
  412. rc.setReadAheadBytes(sz);
  413. }
  414. if (cache.shouldCopyThroughCache(length)) {
  415. copyPackThroughCache(out, ctx, rc);
  416. } else {
  417. copyPackBypassCache(out, rc);
  418. }
  419. }
  420. }
  421. private void copyPackThroughCache(PackOutputStream out, DfsReader ctx,
  422. ReadableChannel rc) throws IOException {
  423. long position = 12;
  424. long remaining = length - (12 + 20);
  425. while (0 < remaining) {
  426. DfsBlock b = cache.getOrLoad(this, position, ctx, () -> rc);
  427. int ptr = (int) (position - b.start);
  428. if (b.size() <= ptr) {
  429. throw packfileIsTruncated();
  430. }
  431. int n = (int) Math.min(b.size() - ptr, remaining);
  432. b.write(out, position, n);
  433. position += n;
  434. remaining -= n;
  435. }
  436. }
  437. private long copyPackBypassCache(PackOutputStream out, ReadableChannel rc)
  438. throws IOException {
  439. ByteBuffer buf = newCopyBuffer(out, rc);
  440. long position = 12;
  441. long remaining = length - (12 + 20);
  442. boolean packHeadSkipped = false;
  443. while (0 < remaining) {
  444. DfsBlock b = cache.get(key, alignToBlock(position));
  445. if (b != null) {
  446. int ptr = (int) (position - b.start);
  447. if (b.size() <= ptr) {
  448. throw packfileIsTruncated();
  449. }
  450. int n = (int) Math.min(b.size() - ptr, remaining);
  451. b.write(out, position, n);
  452. position += n;
  453. remaining -= n;
  454. rc.position(position);
  455. packHeadSkipped = true;
  456. continue;
  457. }
  458. // Need to skip the 'PACK' header for the first read
  459. int ptr = packHeadSkipped ? 0 : 12;
  460. buf.position(0);
  461. int bufLen = read(rc, buf);
  462. if (bufLen <= ptr) {
  463. throw packfileIsTruncated();
  464. }
  465. int n = (int) Math.min(bufLen - ptr, remaining);
  466. out.write(buf.array(), ptr, n);
  467. position += n;
  468. remaining -= n;
  469. packHeadSkipped = true;
  470. }
  471. return position;
  472. }
  473. private ByteBuffer newCopyBuffer(PackOutputStream out, ReadableChannel rc) {
  474. int bs = blockSize(rc);
  475. byte[] copyBuf = out.getCopyBuffer();
  476. if (bs > copyBuf.length) {
  477. copyBuf = new byte[bs];
  478. }
  479. return ByteBuffer.wrap(copyBuf, 0, bs);
  480. }
  481. void copyAsIs(PackOutputStream out, DfsObjectToPack src,
  482. boolean validate, DfsReader ctx) throws IOException,
  483. StoredObjectRepresentationNotAvailableException {
  484. final CRC32 crc1 = validate ? new CRC32() : null;
  485. final CRC32 crc2 = validate ? new CRC32() : null;
  486. final byte[] buf = out.getCopyBuffer();
  487. // Rip apart the header so we can discover the size.
  488. //
  489. try {
  490. readFully(src.offset, buf, 0, 20, ctx);
  491. } catch (IOException ioError) {
  492. throw new StoredObjectRepresentationNotAvailableException(src,
  493. ioError);
  494. }
  495. int c = buf[0] & 0xff;
  496. final int typeCode = (c >> 4) & 7;
  497. long inflatedLength = c & 15;
  498. int shift = 4;
  499. int headerCnt = 1;
  500. while ((c & 0x80) != 0) {
  501. c = buf[headerCnt++] & 0xff;
  502. inflatedLength += ((long) (c & 0x7f)) << shift;
  503. shift += 7;
  504. }
  505. if (typeCode == Constants.OBJ_OFS_DELTA) {
  506. do {
  507. c = buf[headerCnt++] & 0xff;
  508. } while ((c & 128) != 0);
  509. if (validate) {
  510. assert(crc1 != null && crc2 != null);
  511. crc1.update(buf, 0, headerCnt);
  512. crc2.update(buf, 0, headerCnt);
  513. }
  514. } else if (typeCode == Constants.OBJ_REF_DELTA) {
  515. if (validate) {
  516. assert(crc1 != null && crc2 != null);
  517. crc1.update(buf, 0, headerCnt);
  518. crc2.update(buf, 0, headerCnt);
  519. }
  520. readFully(src.offset + headerCnt, buf, 0, 20, ctx);
  521. if (validate) {
  522. assert(crc1 != null && crc2 != null);
  523. crc1.update(buf, 0, 20);
  524. crc2.update(buf, 0, 20);
  525. }
  526. headerCnt += 20;
  527. } else if (validate) {
  528. assert(crc1 != null && crc2 != null);
  529. crc1.update(buf, 0, headerCnt);
  530. crc2.update(buf, 0, headerCnt);
  531. }
  532. final long dataOffset = src.offset + headerCnt;
  533. final long dataLength = src.length;
  534. final long expectedCRC;
  535. final DfsBlock quickCopy;
  536. // Verify the object isn't corrupt before sending. If it is,
  537. // we report it missing instead.
  538. //
  539. try {
  540. quickCopy = ctx.quickCopy(this, dataOffset, dataLength);
  541. if (validate && idx(ctx).hasCRC32Support()) {
  542. assert(crc1 != null);
  543. // Index has the CRC32 code cached, validate the object.
  544. //
  545. expectedCRC = idx(ctx).findCRC32(src);
  546. if (quickCopy != null) {
  547. quickCopy.crc32(crc1, dataOffset, (int) dataLength);
  548. } else {
  549. long pos = dataOffset;
  550. long cnt = dataLength;
  551. while (cnt > 0) {
  552. final int n = (int) Math.min(cnt, buf.length);
  553. readFully(pos, buf, 0, n, ctx);
  554. crc1.update(buf, 0, n);
  555. pos += n;
  556. cnt -= n;
  557. }
  558. }
  559. if (crc1.getValue() != expectedCRC) {
  560. setCorrupt(src.offset);
  561. throw new CorruptObjectException(MessageFormat.format(
  562. JGitText.get().objectAtHasBadZlibStream,
  563. Long.valueOf(src.offset), getFileName()));
  564. }
  565. } else if (validate) {
  566. assert(crc1 != null);
  567. // We don't have a CRC32 code in the index, so compute it
  568. // now while inflating the raw data to get zlib to tell us
  569. // whether or not the data is safe.
  570. //
  571. Inflater inf = ctx.inflater();
  572. byte[] tmp = new byte[1024];
  573. if (quickCopy != null) {
  574. quickCopy.check(inf, tmp, dataOffset, (int) dataLength);
  575. } else {
  576. long pos = dataOffset;
  577. long cnt = dataLength;
  578. while (cnt > 0) {
  579. final int n = (int) Math.min(cnt, buf.length);
  580. readFully(pos, buf, 0, n, ctx);
  581. crc1.update(buf, 0, n);
  582. inf.setInput(buf, 0, n);
  583. while (inf.inflate(tmp, 0, tmp.length) > 0) {
  584. continue;
  585. }
  586. pos += n;
  587. cnt -= n;
  588. }
  589. }
  590. if (!inf.finished() || inf.getBytesRead() != dataLength) {
  591. setCorrupt(src.offset);
  592. throw new EOFException(MessageFormat.format(
  593. JGitText.get().shortCompressedStreamAt,
  594. Long.valueOf(src.offset)));
  595. }
  596. expectedCRC = crc1.getValue();
  597. } else {
  598. expectedCRC = -1;
  599. }
  600. } catch (DataFormatException dataFormat) {
  601. setCorrupt(src.offset);
  602. CorruptObjectException corruptObject = new CorruptObjectException(
  603. MessageFormat.format(
  604. JGitText.get().objectAtHasBadZlibStream,
  605. Long.valueOf(src.offset), getFileName()),
  606. dataFormat);
  607. throw new StoredObjectRepresentationNotAvailableException(src,
  608. corruptObject);
  609. } catch (IOException ioError) {
  610. throw new StoredObjectRepresentationNotAvailableException(src,
  611. ioError);
  612. }
  613. if (quickCopy != null) {
  614. // The entire object fits into a single byte array window slice,
  615. // and we have it pinned. Write this out without copying.
  616. //
  617. out.writeHeader(src, inflatedLength);
  618. quickCopy.write(out, dataOffset, (int) dataLength);
  619. } else if (dataLength <= buf.length) {
  620. // Tiny optimization: Lots of objects are very small deltas or
  621. // deflated commits that are likely to fit in the copy buffer.
  622. //
  623. if (!validate) {
  624. long pos = dataOffset;
  625. long cnt = dataLength;
  626. while (cnt > 0) {
  627. final int n = (int) Math.min(cnt, buf.length);
  628. readFully(pos, buf, 0, n, ctx);
  629. pos += n;
  630. cnt -= n;
  631. }
  632. }
  633. out.writeHeader(src, inflatedLength);
  634. out.write(buf, 0, (int) dataLength);
  635. } else {
  636. // Now we are committed to sending the object. As we spool it out,
  637. // check its CRC32 code to make sure there wasn't corruption between
  638. // the verification we did above, and us actually outputting it.
  639. //
  640. out.writeHeader(src, inflatedLength);
  641. long pos = dataOffset;
  642. long cnt = dataLength;
  643. while (cnt > 0) {
  644. final int n = (int) Math.min(cnt, buf.length);
  645. readFully(pos, buf, 0, n, ctx);
  646. if (validate) {
  647. assert(crc2 != null);
  648. crc2.update(buf, 0, n);
  649. }
  650. out.write(buf, 0, n);
  651. pos += n;
  652. cnt -= n;
  653. }
  654. if (validate) {
  655. assert(crc2 != null);
  656. if (crc2.getValue() != expectedCRC) {
  657. throw new CorruptObjectException(MessageFormat.format(
  658. JGitText.get().objectAtHasBadZlibStream,
  659. Long.valueOf(src.offset), getFileName()));
  660. }
  661. }
  662. }
  663. }
  664. private IOException packfileIsTruncated() {
  665. invalid = true;
  666. return new IOException(MessageFormat.format(
  667. JGitText.get().packfileIsTruncated, getFileName()));
  668. }
  669. private void readFully(long position, byte[] dstbuf, int dstoff, int cnt,
  670. DfsReader ctx) throws IOException {
  671. if (ctx.copy(this, position, dstbuf, dstoff, cnt) != cnt)
  672. throw new EOFException();
  673. }
  674. ObjectLoader load(DfsReader ctx, long pos)
  675. throws IOException {
  676. try {
  677. final byte[] ib = ctx.tempId;
  678. Delta delta = null;
  679. byte[] data = null;
  680. int type = Constants.OBJ_BAD;
  681. boolean cached = false;
  682. SEARCH: for (;;) {
  683. readFully(pos, ib, 0, 20, ctx);
  684. int c = ib[0] & 0xff;
  685. final int typeCode = (c >> 4) & 7;
  686. long sz = c & 15;
  687. int shift = 4;
  688. int p = 1;
  689. while ((c & 0x80) != 0) {
  690. c = ib[p++] & 0xff;
  691. sz += ((long) (c & 0x7f)) << shift;
  692. shift += 7;
  693. }
  694. switch (typeCode) {
  695. case Constants.OBJ_COMMIT:
  696. case Constants.OBJ_TREE:
  697. case Constants.OBJ_BLOB:
  698. case Constants.OBJ_TAG: {
  699. if (delta != null) {
  700. data = decompress(pos + p, (int) sz, ctx);
  701. type = typeCode;
  702. break SEARCH;
  703. }
  704. if (sz < ctx.getStreamFileThreshold()) {
  705. data = decompress(pos + p, (int) sz, ctx);
  706. if (data != null) {
  707. return new ObjectLoader.SmallObject(typeCode, data);
  708. }
  709. }
  710. return new LargePackedWholeObject(typeCode, sz, pos, p, this, ctx.db);
  711. }
  712. case Constants.OBJ_OFS_DELTA: {
  713. c = ib[p++] & 0xff;
  714. long base = c & 127;
  715. while ((c & 128) != 0) {
  716. base += 1;
  717. c = ib[p++] & 0xff;
  718. base <<= 7;
  719. base += (c & 127);
  720. }
  721. base = pos - base;
  722. delta = new Delta(delta, pos, (int) sz, p, base);
  723. if (sz != delta.deltaSize) {
  724. break SEARCH;
  725. }
  726. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  727. if (e != null) {
  728. type = e.type;
  729. data = e.data;
  730. cached = true;
  731. break SEARCH;
  732. }
  733. pos = base;
  734. continue SEARCH;
  735. }
  736. case Constants.OBJ_REF_DELTA: {
  737. readFully(pos + p, ib, 0, 20, ctx);
  738. long base = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  739. delta = new Delta(delta, pos, (int) sz, p + 20, base);
  740. if (sz != delta.deltaSize) {
  741. break SEARCH;
  742. }
  743. DeltaBaseCache.Entry e = ctx.getDeltaBaseCache().get(key, base);
  744. if (e != null) {
  745. type = e.type;
  746. data = e.data;
  747. cached = true;
  748. break SEARCH;
  749. }
  750. pos = base;
  751. continue SEARCH;
  752. }
  753. default:
  754. throw new IOException(MessageFormat.format(
  755. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  756. }
  757. }
  758. // At this point there is at least one delta to apply to data.
  759. // (Whole objects with no deltas to apply return early above.)
  760. if (data == null)
  761. throw new LargeObjectException();
  762. assert(delta != null);
  763. do {
  764. // Cache only the base immediately before desired object.
  765. if (cached) {
  766. cached = false;
  767. } else if (delta.next == null) {
  768. ctx.getDeltaBaseCache().put(key, delta.basePos, type, data);
  769. }
  770. pos = delta.deltaPos;
  771. byte[] cmds = decompress(pos + delta.hdrLen, delta.deltaSize, ctx);
  772. if (cmds == null) {
  773. data = null; // Discard base in case of OutOfMemoryError
  774. throw new LargeObjectException();
  775. }
  776. final long sz = BinaryDelta.getResultSize(cmds);
  777. if (Integer.MAX_VALUE <= sz) {
  778. throw new LargeObjectException.ExceedsByteArrayLimit();
  779. }
  780. final byte[] result;
  781. try {
  782. result = new byte[(int) sz];
  783. } catch (OutOfMemoryError tooBig) {
  784. data = null; // Discard base in case of OutOfMemoryError
  785. cmds = null;
  786. throw new LargeObjectException.OutOfMemory(tooBig);
  787. }
  788. BinaryDelta.apply(data, cmds, result);
  789. data = result;
  790. delta = delta.next;
  791. } while (delta != null);
  792. return new ObjectLoader.SmallObject(type, data);
  793. } catch (DataFormatException dfe) {
  794. throw new CorruptObjectException(
  795. MessageFormat.format(
  796. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  797. getFileName()),
  798. dfe);
  799. }
  800. }
  801. private long findDeltaBase(DfsReader ctx, ObjectId baseId)
  802. throws IOException, MissingObjectException {
  803. long ofs = idx(ctx).findOffset(baseId);
  804. if (ofs < 0) {
  805. throw new MissingObjectException(baseId,
  806. JGitText.get().missingDeltaBase);
  807. }
  808. return ofs;
  809. }
  810. private static class Delta {
  811. /** Child that applies onto this object. */
  812. final Delta next;
  813. /** Offset of the delta object. */
  814. final long deltaPos;
  815. /** Size of the inflated delta stream. */
  816. final int deltaSize;
  817. /** Total size of the delta's pack entry header (including base). */
  818. final int hdrLen;
  819. /** Offset of the base object this delta applies onto. */
  820. final long basePos;
  821. Delta(Delta next, long ofs, int sz, int hdrLen, long baseOffset) {
  822. this.next = next;
  823. this.deltaPos = ofs;
  824. this.deltaSize = sz;
  825. this.hdrLen = hdrLen;
  826. this.basePos = baseOffset;
  827. }
  828. }
  829. byte[] getDeltaHeader(DfsReader wc, long pos)
  830. throws IOException, DataFormatException {
  831. // The delta stream starts as two variable length integers. If we
  832. // assume they are 64 bits each, we need 16 bytes to encode them,
  833. // plus 2 extra bytes for the variable length overhead. So 18 is
  834. // the longest delta instruction header.
  835. //
  836. final byte[] hdr = new byte[32];
  837. wc.inflate(this, pos, hdr, true /* header only */);
  838. return hdr;
  839. }
  840. int getObjectType(DfsReader ctx, long pos) throws IOException {
  841. final byte[] ib = ctx.tempId;
  842. for (;;) {
  843. readFully(pos, ib, 0, 20, ctx);
  844. int c = ib[0] & 0xff;
  845. final int type = (c >> 4) & 7;
  846. switch (type) {
  847. case Constants.OBJ_COMMIT:
  848. case Constants.OBJ_TREE:
  849. case Constants.OBJ_BLOB:
  850. case Constants.OBJ_TAG:
  851. return type;
  852. case Constants.OBJ_OFS_DELTA: {
  853. int p = 1;
  854. while ((c & 0x80) != 0) {
  855. c = ib[p++] & 0xff;
  856. }
  857. c = ib[p++] & 0xff;
  858. long ofs = c & 127;
  859. while ((c & 128) != 0) {
  860. ofs += 1;
  861. c = ib[p++] & 0xff;
  862. ofs <<= 7;
  863. ofs += (c & 127);
  864. }
  865. pos = pos - ofs;
  866. continue;
  867. }
  868. case Constants.OBJ_REF_DELTA: {
  869. int p = 1;
  870. while ((c & 0x80) != 0) {
  871. c = ib[p++] & 0xff;
  872. }
  873. readFully(pos + p, ib, 0, 20, ctx);
  874. pos = findDeltaBase(ctx, ObjectId.fromRaw(ib));
  875. continue;
  876. }
  877. default:
  878. throw new IOException(MessageFormat.format(
  879. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  880. }
  881. }
  882. }
  883. long getObjectSize(DfsReader ctx, AnyObjectId id) throws IOException {
  884. final long offset = idx(ctx).findOffset(id);
  885. return 0 < offset ? getObjectSize(ctx, offset) : -1;
  886. }
  887. long getObjectSize(DfsReader ctx, long pos)
  888. throws IOException {
  889. final byte[] ib = ctx.tempId;
  890. readFully(pos, ib, 0, 20, ctx);
  891. int c = ib[0] & 0xff;
  892. final int type = (c >> 4) & 7;
  893. long sz = c & 15;
  894. int shift = 4;
  895. int p = 1;
  896. while ((c & 0x80) != 0) {
  897. c = ib[p++] & 0xff;
  898. sz += ((long) (c & 0x7f)) << shift;
  899. shift += 7;
  900. }
  901. long deltaAt;
  902. switch (type) {
  903. case Constants.OBJ_COMMIT:
  904. case Constants.OBJ_TREE:
  905. case Constants.OBJ_BLOB:
  906. case Constants.OBJ_TAG:
  907. return sz;
  908. case Constants.OBJ_OFS_DELTA:
  909. c = ib[p++] & 0xff;
  910. while ((c & 128) != 0) {
  911. c = ib[p++] & 0xff;
  912. }
  913. deltaAt = pos + p;
  914. break;
  915. case Constants.OBJ_REF_DELTA:
  916. deltaAt = pos + p + 20;
  917. break;
  918. default:
  919. throw new IOException(MessageFormat.format(
  920. JGitText.get().unknownObjectType, Integer.valueOf(type)));
  921. }
  922. try {
  923. return BinaryDelta.getResultSize(getDeltaHeader(ctx, deltaAt));
  924. } catch (DataFormatException dfe) {
  925. throw new CorruptObjectException(
  926. MessageFormat.format(
  927. JGitText.get().objectAtHasBadZlibStream, Long.valueOf(pos),
  928. getFileName()),
  929. dfe);
  930. }
  931. }
  932. void representation(DfsObjectRepresentation r, final long pos,
  933. DfsReader ctx, PackReverseIndex rev)
  934. throws IOException {
  935. r.offset = pos;
  936. final byte[] ib = ctx.tempId;
  937. readFully(pos, ib, 0, 20, ctx);
  938. int c = ib[0] & 0xff;
  939. int p = 1;
  940. final int typeCode = (c >> 4) & 7;
  941. while ((c & 0x80) != 0) {
  942. c = ib[p++] & 0xff;
  943. }
  944. long len = rev.findNextOffset(pos, length - 20) - pos;
  945. switch (typeCode) {
  946. case Constants.OBJ_COMMIT:
  947. case Constants.OBJ_TREE:
  948. case Constants.OBJ_BLOB:
  949. case Constants.OBJ_TAG:
  950. r.format = StoredObjectRepresentation.PACK_WHOLE;
  951. r.baseId = null;
  952. r.length = len - p;
  953. return;
  954. case Constants.OBJ_OFS_DELTA: {
  955. c = ib[p++] & 0xff;
  956. long ofs = c & 127;
  957. while ((c & 128) != 0) {
  958. ofs += 1;
  959. c = ib[p++] & 0xff;
  960. ofs <<= 7;
  961. ofs += (c & 127);
  962. }
  963. r.format = StoredObjectRepresentation.PACK_DELTA;
  964. r.baseId = rev.findObject(pos - ofs);
  965. r.length = len - p;
  966. return;
  967. }
  968. case Constants.OBJ_REF_DELTA: {
  969. readFully(pos + p, ib, 0, 20, ctx);
  970. r.format = StoredObjectRepresentation.PACK_DELTA;
  971. r.baseId = ObjectId.fromRaw(ib);
  972. r.length = len - p - 20;
  973. return;
  974. }
  975. default:
  976. throw new IOException(MessageFormat.format(
  977. JGitText.get().unknownObjectType, Integer.valueOf(typeCode)));
  978. }
  979. }
  980. boolean isCorrupt(long offset) {
  981. LongList list = corruptObjects;
  982. if (list == null) {
  983. return false;
  984. }
  985. synchronized (list) {
  986. return list.contains(offset);
  987. }
  988. }
  989. private void setCorrupt(long offset) {
  990. LongList list = corruptObjects;
  991. if (list == null) {
  992. synchronized (initLock) {
  993. list = corruptObjects;
  994. if (list == null) {
  995. list = new LongList();
  996. corruptObjects = list;
  997. }
  998. }
  999. }
  1000. synchronized (list) {
  1001. list.add(offset);
  1002. }
  1003. }
  1004. }