You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

PackInserter.java 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * Copyright (C) 2017, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.internal.storage.file;
  44. import static java.nio.file.StandardCopyOption.ATOMIC_MOVE;
  45. import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
  46. import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
  47. import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
  48. import java.io.BufferedInputStream;
  49. import java.io.EOFException;
  50. import java.io.File;
  51. import java.io.FileOutputStream;
  52. import java.io.FilterInputStream;
  53. import java.io.IOException;
  54. import java.io.InputStream;
  55. import java.io.OutputStream;
  56. import java.io.RandomAccessFile;
  57. import java.nio.channels.Channels;
  58. import java.text.MessageFormat;
  59. import java.util.Collection;
  60. import java.util.Collections;
  61. import java.util.HashSet;
  62. import java.util.List;
  63. import java.util.Set;
  64. import java.util.zip.CRC32;
  65. import java.util.zip.DataFormatException;
  66. import java.util.zip.Deflater;
  67. import java.util.zip.DeflaterOutputStream;
  68. import java.util.zip.Inflater;
  69. import java.util.zip.InflaterInputStream;
  70. import org.eclipse.jgit.errors.CorruptObjectException;
  71. import org.eclipse.jgit.errors.IncorrectObjectTypeException;
  72. import org.eclipse.jgit.errors.LargeObjectException;
  73. import org.eclipse.jgit.errors.MissingObjectException;
  74. import org.eclipse.jgit.internal.JGitText;
  75. import org.eclipse.jgit.internal.storage.pack.PackExt;
  76. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  77. import org.eclipse.jgit.lib.AnyObjectId;
  78. import org.eclipse.jgit.lib.Constants;
  79. import org.eclipse.jgit.lib.InflaterCache;
  80. import org.eclipse.jgit.lib.ObjectId;
  81. import org.eclipse.jgit.lib.ObjectIdOwnerMap;
  82. import org.eclipse.jgit.lib.ObjectInserter;
  83. import org.eclipse.jgit.lib.ObjectLoader;
  84. import org.eclipse.jgit.lib.ObjectReader;
  85. import org.eclipse.jgit.lib.ObjectStream;
  86. import org.eclipse.jgit.storage.pack.PackConfig;
  87. import org.eclipse.jgit.transport.PackParser;
  88. import org.eclipse.jgit.transport.PackedObjectInfo;
  89. import org.eclipse.jgit.util.BlockList;
  90. import org.eclipse.jgit.util.FileUtils;
  91. import org.eclipse.jgit.util.IO;
  92. import org.eclipse.jgit.util.NB;
  93. import org.eclipse.jgit.util.io.CountingOutputStream;
  94. import org.eclipse.jgit.util.sha1.SHA1;
  95. /**
  96. * Object inserter that inserts one pack per call to {@link #flush()}, and never
  97. * inserts loose objects.
  98. */
  99. public class PackInserter extends ObjectInserter {
  100. /** Always produce version 2 indexes, to get CRC data. */
  101. private static final int INDEX_VERSION = 2;
  102. private final ObjectDirectory db;
  103. private List<PackedObjectInfo> objectList;
  104. private ObjectIdOwnerMap<PackedObjectInfo> objectMap;
  105. private boolean rollback;
  106. private boolean checkExisting = true;
  107. private int compression = Deflater.BEST_COMPRESSION;
  108. private File tmpPack;
  109. private PackStream packOut;
  110. private Inflater cachedInflater;
  111. private PackConfig pconfig;
  112. PackInserter(ObjectDirectory db) {
  113. this.db = db;
  114. this.pconfig = new PackConfig(db.getConfig());
  115. }
  116. /**
  117. * Whether to check if objects exist in the repo
  118. *
  119. * @param check
  120. * if {@code false}, will write out possibly-duplicate objects
  121. * without first checking whether they exist in the repo; default
  122. * is true.
  123. */
  124. public void checkExisting(boolean check) {
  125. checkExisting = check;
  126. }
  127. /**
  128. * Set compression level for zlib deflater.
  129. *
  130. * @param compression
  131. * compression level for zlib deflater.
  132. */
  133. public void setCompressionLevel(int compression) {
  134. this.compression = compression;
  135. }
  136. int getBufferSize() {
  137. return buffer().length;
  138. }
  139. /** {@inheritDoc} */
  140. @Override
  141. public ObjectId insert(int type, byte[] data, int off, int len)
  142. throws IOException {
  143. ObjectId id = idFor(type, data, off, len);
  144. if (objectMap != null && objectMap.contains(id)) {
  145. return id;
  146. }
  147. // Ignore loose objects, which are potentially unreachable.
  148. if (checkExisting && db.hasPackedObject(id)) {
  149. return id;
  150. }
  151. long offset = beginObject(type, len);
  152. packOut.compress.write(data, off, len);
  153. packOut.compress.finish();
  154. return endObject(id, offset);
  155. }
  156. /** {@inheritDoc} */
  157. @Override
  158. public ObjectId insert(int type, long len, InputStream in)
  159. throws IOException {
  160. byte[] buf = buffer();
  161. if (len <= buf.length) {
  162. IO.readFully(in, buf, 0, (int) len);
  163. return insert(type, buf, 0, (int) len);
  164. }
  165. long offset = beginObject(type, len);
  166. SHA1 md = digest();
  167. md.update(Constants.encodedTypeString(type));
  168. md.update((byte) ' ');
  169. md.update(Constants.encodeASCII(len));
  170. md.update((byte) 0);
  171. while (0 < len) {
  172. int n = in.read(buf, 0, (int) Math.min(buf.length, len));
  173. if (n <= 0) {
  174. throw new EOFException();
  175. }
  176. md.update(buf, 0, n);
  177. packOut.compress.write(buf, 0, n);
  178. len -= n;
  179. }
  180. packOut.compress.finish();
  181. return endObject(md.toObjectId(), offset);
  182. }
  183. private long beginObject(int type, long len) throws IOException {
  184. if (packOut == null) {
  185. beginPack();
  186. }
  187. long offset = packOut.getOffset();
  188. packOut.beginObject(type, len);
  189. return offset;
  190. }
  191. private ObjectId endObject(ObjectId id, long offset) {
  192. PackedObjectInfo obj = new PackedObjectInfo(id);
  193. obj.setOffset(offset);
  194. obj.setCRC((int) packOut.crc32.getValue());
  195. objectList.add(obj);
  196. objectMap.addIfAbsent(obj);
  197. return id;
  198. }
  199. private static File idxFor(File packFile) {
  200. String p = packFile.getName();
  201. return new File(
  202. packFile.getParentFile(),
  203. p.substring(0, p.lastIndexOf('.')) + ".idx"); //$NON-NLS-1$
  204. }
  205. private void beginPack() throws IOException {
  206. objectList = new BlockList<>();
  207. objectMap = new ObjectIdOwnerMap<>();
  208. rollback = true;
  209. tmpPack = File.createTempFile("insert_", ".pack", db.getDirectory()); //$NON-NLS-1$ //$NON-NLS-2$
  210. packOut = new PackStream(tmpPack);
  211. // Write the header as though it were a single object pack.
  212. packOut.write(packOut.hdrBuf, 0, writePackHeader(packOut.hdrBuf, 1));
  213. }
  214. private static int writePackHeader(byte[] buf, int objectCount) {
  215. System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4);
  216. NB.encodeInt32(buf, 4, 2); // Always use pack version 2.
  217. NB.encodeInt32(buf, 8, objectCount);
  218. return 12;
  219. }
  220. /** {@inheritDoc} */
  221. @Override
  222. public PackParser newPackParser(InputStream in) {
  223. throw new UnsupportedOperationException();
  224. }
  225. /** {@inheritDoc} */
  226. @Override
  227. public ObjectReader newReader() {
  228. return new Reader();
  229. }
  230. /** {@inheritDoc} */
  231. @Override
  232. public void flush() throws IOException {
  233. if (tmpPack == null) {
  234. return;
  235. }
  236. if (packOut == null) {
  237. throw new IOException();
  238. }
  239. byte[] packHash;
  240. try {
  241. packHash = packOut.finishPack();
  242. } finally {
  243. packOut = null;
  244. }
  245. Collections.sort(objectList);
  246. File tmpIdx = idxFor(tmpPack); // TODO(nasserg) Use PackFile?
  247. writePackIndex(tmpIdx, packHash, objectList);
  248. PackFile realPack = new PackFile(db.getPackDirectory(),
  249. computeName(objectList), PackExt.PACK);
  250. db.closeAllPackHandles(realPack);
  251. tmpPack.setReadOnly();
  252. FileUtils.rename(tmpPack, realPack, ATOMIC_MOVE);
  253. PackFile realIdx = realPack.create(PackExt.INDEX);
  254. tmpIdx.setReadOnly();
  255. try {
  256. FileUtils.rename(tmpIdx, realIdx, ATOMIC_MOVE);
  257. } catch (IOException e) {
  258. File newIdx = new File(
  259. realIdx.getParentFile(), realIdx.getName() + ".new"); //$NON-NLS-1$
  260. try {
  261. FileUtils.rename(tmpIdx, newIdx, ATOMIC_MOVE);
  262. } catch (IOException e2) {
  263. newIdx = tmpIdx;
  264. e = e2;
  265. }
  266. throw new IOException(MessageFormat.format(
  267. JGitText.get().panicCantRenameIndexFile, newIdx,
  268. realIdx), e);
  269. }
  270. boolean interrupted = false;
  271. try {
  272. FileSnapshot snapshot = FileSnapshot.save(realPack);
  273. if (pconfig.doWaitPreventRacyPack(snapshot.size())) {
  274. snapshot.waitUntilNotRacy();
  275. }
  276. } catch (InterruptedException e) {
  277. interrupted = true;
  278. }
  279. try {
  280. db.openPack(realPack);
  281. rollback = false;
  282. } finally {
  283. clear();
  284. if (interrupted) {
  285. // Re-set interrupted flag
  286. Thread.currentThread().interrupt();
  287. }
  288. }
  289. }
  290. private static void writePackIndex(File idx, byte[] packHash,
  291. List<PackedObjectInfo> list) throws IOException {
  292. try (OutputStream os = new FileOutputStream(idx)) {
  293. PackIndexWriter w = PackIndexWriter.createVersion(os, INDEX_VERSION);
  294. w.write(list, packHash);
  295. }
  296. }
  297. private ObjectId computeName(List<PackedObjectInfo> list) {
  298. SHA1 md = digest().reset();
  299. byte[] buf = buffer();
  300. for (PackedObjectInfo otp : list) {
  301. otp.copyRawTo(buf, 0);
  302. md.update(buf, 0, OBJECT_ID_LENGTH);
  303. }
  304. return ObjectId.fromRaw(md.digest());
  305. }
  306. /** {@inheritDoc} */
  307. @Override
  308. public void close() {
  309. try {
  310. if (packOut != null) {
  311. try {
  312. packOut.close();
  313. } catch (IOException err) {
  314. // Ignore a close failure, the pack should be removed.
  315. }
  316. }
  317. if (rollback && tmpPack != null) {
  318. try {
  319. FileUtils.delete(tmpPack);
  320. } catch (IOException e) {
  321. // Still delete idx.
  322. }
  323. try {
  324. FileUtils.delete(idxFor(tmpPack));
  325. } catch (IOException e) {
  326. // Ignore error deleting temp idx.
  327. }
  328. rollback = false;
  329. }
  330. } finally {
  331. clear();
  332. try {
  333. InflaterCache.release(cachedInflater);
  334. } finally {
  335. cachedInflater = null;
  336. }
  337. }
  338. }
  339. private void clear() {
  340. objectList = null;
  341. objectMap = null;
  342. tmpPack = null;
  343. packOut = null;
  344. }
  345. private Inflater inflater() {
  346. if (cachedInflater == null) {
  347. cachedInflater = InflaterCache.get();
  348. } else {
  349. cachedInflater.reset();
  350. }
  351. return cachedInflater;
  352. }
  353. /**
  354. * Stream that writes to a pack file.
  355. * <p>
  356. * Backed by two views of the same open file descriptor: a random-access file,
  357. * and an output stream. Seeking in the file causes subsequent writes to the
  358. * output stream to occur wherever the file pointer is pointing, so we need to
  359. * take care to always seek to the end of the file before writing a new
  360. * object.
  361. * <p>
  362. * Callers should always use {@link #seek(long)} to seek, rather than reaching
  363. * into the file member. As long as this contract is followed, calls to {@link
  364. * #write(byte[], int, int)} are guaranteed to write at the end of the file,
  365. * even if there have been intermediate seeks.
  366. */
  367. private class PackStream extends OutputStream {
  368. final byte[] hdrBuf;
  369. final CRC32 crc32;
  370. final DeflaterOutputStream compress;
  371. private final RandomAccessFile file;
  372. private final CountingOutputStream out;
  373. private final Deflater deflater;
  374. private boolean atEnd;
  375. PackStream(File pack) throws IOException {
  376. file = new RandomAccessFile(pack, "rw"); //$NON-NLS-1$
  377. out = new CountingOutputStream(new FileOutputStream(file.getFD()));
  378. deflater = new Deflater(compression);
  379. compress = new DeflaterOutputStream(this, deflater, 8192);
  380. hdrBuf = new byte[32];
  381. crc32 = new CRC32();
  382. atEnd = true;
  383. }
  384. long getOffset() {
  385. // This value is accurate as long as we only ever write to the end of the
  386. // file, and don't seek back to overwrite any previous segments. Although
  387. // this is subtle, storing the stream counter this way is still preferable
  388. // to returning file.length() here, as it avoids a syscall and possible
  389. // IOException.
  390. return out.getCount();
  391. }
  392. void seek(long offset) throws IOException {
  393. file.seek(offset);
  394. atEnd = false;
  395. }
  396. void beginObject(int objectType, long length) throws IOException {
  397. crc32.reset();
  398. deflater.reset();
  399. write(hdrBuf, 0, encodeTypeSize(objectType, length));
  400. }
  401. private int encodeTypeSize(int type, long rawLength) {
  402. long nextLength = rawLength >>> 4;
  403. hdrBuf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (rawLength & 0x0F));
  404. rawLength = nextLength;
  405. int n = 1;
  406. while (rawLength > 0) {
  407. nextLength >>>= 7;
  408. hdrBuf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (rawLength & 0x7F));
  409. rawLength = nextLength;
  410. }
  411. return n;
  412. }
  413. @Override
  414. public void write(int b) throws IOException {
  415. hdrBuf[0] = (byte) b;
  416. write(hdrBuf, 0, 1);
  417. }
  418. @Override
  419. public void write(byte[] data, int off, int len) throws IOException {
  420. crc32.update(data, off, len);
  421. if (!atEnd) {
  422. file.seek(file.length());
  423. atEnd = true;
  424. }
  425. out.write(data, off, len);
  426. }
  427. byte[] finishPack() throws IOException {
  428. // Overwrite placeholder header with actual object count, then hash. This
  429. // method intentionally uses direct seek/write calls rather than the
  430. // wrappers which keep track of atEnd. This leaves atEnd, the file
  431. // pointer, and out's counter in an inconsistent state; that's ok, since
  432. // this method closes the file anyway.
  433. try {
  434. file.seek(0);
  435. out.write(hdrBuf, 0, writePackHeader(hdrBuf, objectList.size()));
  436. byte[] buf = buffer();
  437. SHA1 md = digest().reset();
  438. file.seek(0);
  439. while (true) {
  440. int r = file.read(buf);
  441. if (r < 0) {
  442. break;
  443. }
  444. md.update(buf, 0, r);
  445. }
  446. byte[] packHash = md.digest();
  447. out.write(packHash, 0, packHash.length);
  448. return packHash;
  449. } finally {
  450. close();
  451. }
  452. }
  453. @Override
  454. public void close() throws IOException {
  455. deflater.end();
  456. try {
  457. out.close();
  458. } finally {
  459. file.close();
  460. }
  461. }
  462. byte[] inflate(long filePos, int len) throws IOException, DataFormatException {
  463. byte[] dstbuf;
  464. try {
  465. dstbuf = new byte[len];
  466. } catch (OutOfMemoryError noMemory) {
  467. return null; // Caller will switch to large object streaming.
  468. }
  469. byte[] srcbuf = buffer();
  470. Inflater inf = inflater();
  471. filePos += setInput(filePos, inf, srcbuf);
  472. for (int dstoff = 0;;) {
  473. int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
  474. dstoff += n;
  475. if (inf.finished()) {
  476. return dstbuf;
  477. }
  478. if (inf.needsInput()) {
  479. filePos += setInput(filePos, inf, srcbuf);
  480. } else if (n == 0) {
  481. throw new DataFormatException();
  482. }
  483. }
  484. }
  485. private int setInput(long filePos, Inflater inf, byte[] buf)
  486. throws IOException {
  487. if (file.getFilePointer() != filePos) {
  488. seek(filePos);
  489. }
  490. int n = file.read(buf);
  491. if (n < 0) {
  492. throw new EOFException(JGitText.get().unexpectedEofInPack);
  493. }
  494. inf.setInput(buf, 0, n);
  495. return n;
  496. }
  497. }
  498. private class Reader extends ObjectReader {
  499. private final ObjectReader ctx;
  500. private Reader() {
  501. ctx = db.newReader();
  502. setStreamFileThreshold(ctx.getStreamFileThreshold());
  503. }
  504. @Override
  505. public ObjectReader newReader() {
  506. return db.newReader();
  507. }
  508. @Override
  509. public ObjectInserter getCreatedFromInserter() {
  510. return PackInserter.this;
  511. }
  512. @Override
  513. public Collection<ObjectId> resolve(AbbreviatedObjectId id)
  514. throws IOException {
  515. Collection<ObjectId> stored = ctx.resolve(id);
  516. if (objectList == null) {
  517. return stored;
  518. }
  519. Set<ObjectId> r = new HashSet<>(stored.size() + 2);
  520. r.addAll(stored);
  521. for (PackedObjectInfo obj : objectList) {
  522. if (id.prefixCompare(obj) == 0) {
  523. r.add(obj.copy());
  524. }
  525. }
  526. return r;
  527. }
  528. @Override
  529. public ObjectLoader open(AnyObjectId objectId, int typeHint)
  530. throws MissingObjectException, IncorrectObjectTypeException,
  531. IOException {
  532. if (objectMap == null) {
  533. return ctx.open(objectId, typeHint);
  534. }
  535. PackedObjectInfo obj = objectMap.get(objectId);
  536. if (obj == null) {
  537. return ctx.open(objectId, typeHint);
  538. }
  539. byte[] buf = buffer();
  540. packOut.seek(obj.getOffset());
  541. int cnt = packOut.file.read(buf, 0, 20);
  542. if (cnt <= 0) {
  543. throw new EOFException(JGitText.get().unexpectedEofInPack);
  544. }
  545. int c = buf[0] & 0xff;
  546. int type = (c >> 4) & 7;
  547. if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
  548. throw new IOException(MessageFormat.format(
  549. JGitText.get().cannotReadBackDelta, Integer.toString(type)));
  550. }
  551. if (typeHint != OBJ_ANY && type != typeHint) {
  552. throw new IncorrectObjectTypeException(objectId.copy(), typeHint);
  553. }
  554. long sz = c & 0x0f;
  555. int ptr = 1;
  556. int shift = 4;
  557. while ((c & 0x80) != 0) {
  558. if (ptr >= cnt) {
  559. throw new EOFException(JGitText.get().unexpectedEofInPack);
  560. }
  561. c = buf[ptr++] & 0xff;
  562. sz += ((long) (c & 0x7f)) << shift;
  563. shift += 7;
  564. }
  565. long zpos = obj.getOffset() + ptr;
  566. if (sz < getStreamFileThreshold()) {
  567. byte[] data = inflate(obj, zpos, (int) sz);
  568. if (data != null) {
  569. return new ObjectLoader.SmallObject(type, data);
  570. }
  571. }
  572. return new StreamLoader(type, sz, zpos);
  573. }
  574. private byte[] inflate(PackedObjectInfo obj, long zpos, int sz)
  575. throws IOException, CorruptObjectException {
  576. try {
  577. return packOut.inflate(zpos, sz);
  578. } catch (DataFormatException dfe) {
  579. throw new CorruptObjectException(
  580. MessageFormat.format(
  581. JGitText.get().objectAtHasBadZlibStream,
  582. Long.valueOf(obj.getOffset()),
  583. tmpPack.getAbsolutePath()),
  584. dfe);
  585. }
  586. }
  587. @Override
  588. public Set<ObjectId> getShallowCommits() throws IOException {
  589. return ctx.getShallowCommits();
  590. }
  591. @Override
  592. public void close() {
  593. ctx.close();
  594. }
  595. private class StreamLoader extends ObjectLoader {
  596. private final int type;
  597. private final long size;
  598. private final long pos;
  599. StreamLoader(int type, long size, long pos) {
  600. this.type = type;
  601. this.size = size;
  602. this.pos = pos;
  603. }
  604. @Override
  605. public ObjectStream openStream()
  606. throws MissingObjectException, IOException {
  607. int bufsz = buffer().length;
  608. packOut.seek(pos);
  609. InputStream fileStream = new FilterInputStream(
  610. Channels.newInputStream(packOut.file.getChannel())) {
  611. // atEnd was already set to false by the previous seek, but it's
  612. // technically possible for a caller to call insert on the
  613. // inserter in the middle of reading from this stream. Behavior is
  614. // undefined in this case, so it would arguably be ok to ignore,
  615. // but it's not hard to at least make an attempt to not corrupt
  616. // the data.
  617. @Override
  618. public int read() throws IOException {
  619. packOut.atEnd = false;
  620. return super.read();
  621. }
  622. @Override
  623. public int read(byte[] b) throws IOException {
  624. packOut.atEnd = false;
  625. return super.read(b);
  626. }
  627. @Override
  628. public int read(byte[] b, int off, int len) throws IOException {
  629. packOut.atEnd = false;
  630. return super.read(b,off,len);
  631. }
  632. @Override
  633. public void close() {
  634. // Never close underlying RandomAccessFile, which lasts the
  635. // lifetime of the enclosing PackStream.
  636. }
  637. };
  638. return new ObjectStream.Filter(
  639. type, size,
  640. new BufferedInputStream(
  641. new InflaterInputStream(fileStream, inflater(), bufsz), bufsz));
  642. }
  643. @Override
  644. public int getType() {
  645. return type;
  646. }
  647. @Override
  648. public long getSize() {
  649. return size;
  650. }
  651. @Override
  652. public byte[] getCachedBytes() throws LargeObjectException {
  653. throw new LargeObjectException.ExceedsLimit(
  654. getStreamFileThreshold(), size);
  655. }
  656. }
  657. }
  658. }