You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsInserter.java 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.internal.storage.dfs;
  44. import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
  45. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  46. import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
  47. import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
  48. import java.io.BufferedInputStream;
  49. import java.io.EOFException;
  50. import java.io.IOException;
  51. import java.io.InputStream;
  52. import java.io.OutputStream;
  53. import java.nio.ByteBuffer;
  54. import java.security.MessageDigest;
  55. import java.text.MessageFormat;
  56. import java.util.Collection;
  57. import java.util.Collections;
  58. import java.util.HashSet;
  59. import java.util.List;
  60. import java.util.Set;
  61. import java.util.zip.CRC32;
  62. import java.util.zip.DataFormatException;
  63. import java.util.zip.Deflater;
  64. import java.util.zip.DeflaterOutputStream;
  65. import java.util.zip.Inflater;
  66. import java.util.zip.InflaterInputStream;
  67. import org.eclipse.jgit.errors.CorruptObjectException;
  68. import org.eclipse.jgit.errors.IncorrectObjectTypeException;
  69. import org.eclipse.jgit.errors.LargeObjectException;
  70. import org.eclipse.jgit.internal.JGitText;
  71. import org.eclipse.jgit.internal.storage.file.PackIndex;
  72. import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
  73. import org.eclipse.jgit.internal.storage.pack.PackExt;
  74. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  75. import org.eclipse.jgit.lib.AnyObjectId;
  76. import org.eclipse.jgit.lib.Constants;
  77. import org.eclipse.jgit.lib.ObjectId;
  78. import org.eclipse.jgit.lib.ObjectIdOwnerMap;
  79. import org.eclipse.jgit.lib.ObjectInserter;
  80. import org.eclipse.jgit.lib.ObjectLoader;
  81. import org.eclipse.jgit.lib.ObjectReader;
  82. import org.eclipse.jgit.lib.ObjectStream;
  83. import org.eclipse.jgit.transport.PackedObjectInfo;
  84. import org.eclipse.jgit.util.BlockList;
  85. import org.eclipse.jgit.util.IO;
  86. import org.eclipse.jgit.util.NB;
  87. import org.eclipse.jgit.util.TemporaryBuffer;
  88. import org.eclipse.jgit.util.io.CountingOutputStream;
  89. import org.eclipse.jgit.util.sha1.SHA1;
  90. /** Inserts objects into the DFS. */
  91. public class DfsInserter extends ObjectInserter {
  92. /** Always produce version 2 indexes, to get CRC data. */
  93. private static final int INDEX_VERSION = 2;
  94. final DfsObjDatabase db;
  95. int compression = Deflater.BEST_COMPRESSION;
  96. List<PackedObjectInfo> objectList;
  97. ObjectIdOwnerMap<PackedObjectInfo> objectMap;
  98. DfsBlockCache cache;
  99. DfsPackKey packKey;
  100. DfsPackDescription packDsc;
  101. PackStream packOut;
  102. private boolean rollback;
  103. private boolean checkExisting = true;
  104. /**
  105. * Initialize a new inserter.
  106. *
  107. * @param db
  108. * database the inserter writes to.
  109. */
  110. protected DfsInserter(DfsObjDatabase db) {
  111. this.db = db;
  112. }
  113. /**
  114. * @param check
  115. * if false, will write out possibly-duplicate objects without
  116. * first checking whether they exist in the repo; default is true.
  117. */
  118. public void checkExisting(boolean check) {
  119. checkExisting = check;
  120. }
  121. void setCompressionLevel(int compression) {
  122. this.compression = compression;
  123. }
  124. @Override
  125. public DfsPackParser newPackParser(InputStream in) throws IOException {
  126. return new DfsPackParser(db, this, in);
  127. }
  128. @Override
  129. public ObjectReader newReader() {
  130. return new Reader();
  131. }
  132. @Override
  133. public ObjectId insert(int type, byte[] data, int off, int len)
  134. throws IOException {
  135. ObjectId id = idFor(type, data, off, len);
  136. if (objectMap != null && objectMap.contains(id))
  137. return id;
  138. // Ignore unreachable (garbage) objects here.
  139. if (checkExisting && db.has(id, true))
  140. return id;
  141. long offset = beginObject(type, len);
  142. packOut.compress.write(data, off, len);
  143. packOut.compress.finish();
  144. return endObject(id, offset);
  145. }
  146. @Override
  147. public ObjectId insert(int type, long len, InputStream in)
  148. throws IOException {
  149. byte[] buf = insertBuffer(len);
  150. if (len <= buf.length) {
  151. IO.readFully(in, buf, 0, (int) len);
  152. return insert(type, buf, 0, (int) len);
  153. }
  154. long offset = beginObject(type, len);
  155. SHA1 md = digest();
  156. md.update(Constants.encodedTypeString(type));
  157. md.update((byte) ' ');
  158. md.update(Constants.encodeASCII(len));
  159. md.update((byte) 0);
  160. while (0 < len) {
  161. int n = in.read(buf, 0, (int) Math.min(buf.length, len));
  162. if (n <= 0)
  163. throw new EOFException();
  164. md.update(buf, 0, n);
  165. packOut.compress.write(buf, 0, n);
  166. len -= n;
  167. }
  168. packOut.compress.finish();
  169. return endObject(md.toObjectId(), offset);
  170. }
  171. private byte[] insertBuffer(long len) {
  172. byte[] buf = buffer();
  173. if (len <= buf.length)
  174. return buf;
  175. if (len < db.getReaderOptions().getStreamFileThreshold()) {
  176. try {
  177. return new byte[(int) len];
  178. } catch (OutOfMemoryError noMem) {
  179. return buf;
  180. }
  181. }
  182. return buf;
  183. }
  184. @Override
  185. public void flush() throws IOException {
  186. if (packDsc == null)
  187. return;
  188. if (packOut == null)
  189. throw new IOException();
  190. byte[] packHash = packOut.writePackFooter();
  191. packDsc.addFileExt(PACK);
  192. packDsc.setFileSize(PACK, packOut.getCount());
  193. packOut.close();
  194. packOut = null;
  195. sortObjectsById();
  196. PackIndex index = writePackIndex(packDsc, packHash, objectList);
  197. db.commitPack(Collections.singletonList(packDsc), null);
  198. rollback = false;
  199. DfsPackFile p = cache.getOrCreate(packDsc, packKey);
  200. if (index != null)
  201. p.setPackIndex(index);
  202. db.addPack(p);
  203. clear();
  204. }
  205. @Override
  206. public void close() {
  207. if (packOut != null) {
  208. try {
  209. packOut.close();
  210. } catch (IOException err) {
  211. // Ignore a close failure, the pack should be removed.
  212. } finally {
  213. packOut = null;
  214. }
  215. }
  216. if (rollback && packDsc != null) {
  217. try {
  218. db.rollbackPack(Collections.singletonList(packDsc));
  219. } finally {
  220. packDsc = null;
  221. rollback = false;
  222. }
  223. }
  224. clear();
  225. }
  226. private void clear() {
  227. objectList = null;
  228. objectMap = null;
  229. packKey = null;
  230. packDsc = null;
  231. }
  232. private long beginObject(int type, long len) throws IOException {
  233. if (packOut == null)
  234. beginPack();
  235. long offset = packOut.getCount();
  236. packOut.beginObject(type, len);
  237. return offset;
  238. }
  239. private ObjectId endObject(ObjectId id, long offset) {
  240. PackedObjectInfo obj = new PackedObjectInfo(id);
  241. obj.setOffset(offset);
  242. obj.setCRC((int) packOut.crc32.getValue());
  243. objectList.add(obj);
  244. objectMap.addIfAbsent(obj);
  245. return id;
  246. }
  247. private void beginPack() throws IOException {
  248. objectList = new BlockList<>();
  249. objectMap = new ObjectIdOwnerMap<>();
  250. cache = DfsBlockCache.getInstance();
  251. rollback = true;
  252. packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT);
  253. packOut = new PackStream(db.writeFile(packDsc, PACK));
  254. packKey = new DfsPackKey();
  255. // Write the header as though it were a single object pack.
  256. byte[] buf = packOut.hdrBuf;
  257. System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4);
  258. NB.encodeInt32(buf, 4, 2); // Always use pack version 2.
  259. NB.encodeInt32(buf, 8, 1); // Always assume 1 object.
  260. packOut.write(buf, 0, 12);
  261. }
  262. private void sortObjectsById() {
  263. Collections.sort(objectList);
  264. }
  265. PackIndex writePackIndex(DfsPackDescription pack, byte[] packHash,
  266. List<PackedObjectInfo> list) throws IOException {
  267. pack.setIndexVersion(INDEX_VERSION);
  268. pack.setObjectCount(list.size());
  269. // If there are less than 58,000 objects, the entire index fits in under
  270. // 2 MiB. Callers will probably need the index immediately, so buffer
  271. // the index in process and load from the buffer.
  272. TemporaryBuffer.Heap buf = null;
  273. PackIndex packIndex = null;
  274. if (list.size() <= 58000) {
  275. buf = new TemporaryBuffer.Heap(2 << 20);
  276. index(buf, packHash, list);
  277. packIndex = PackIndex.read(buf.openInputStream());
  278. }
  279. DfsOutputStream os = db.writeFile(pack, INDEX);
  280. try (CountingOutputStream cnt = new CountingOutputStream(os)) {
  281. if (buf != null)
  282. buf.writeTo(cnt, null);
  283. else
  284. index(cnt, packHash, list);
  285. pack.addFileExt(INDEX);
  286. pack.setFileSize(INDEX, cnt.getCount());
  287. } finally {
  288. if (buf != null) {
  289. buf.close();
  290. }
  291. }
  292. return packIndex;
  293. }
  294. private static void index(OutputStream out, byte[] packHash,
  295. List<PackedObjectInfo> list) throws IOException {
  296. PackIndexWriter.createVersion(out, INDEX_VERSION).write(list, packHash);
  297. }
  298. private class PackStream extends OutputStream {
  299. private final DfsOutputStream out;
  300. private final MessageDigest md;
  301. final byte[] hdrBuf;
  302. private final Deflater deflater;
  303. private final int blockSize;
  304. private long currPos; // Position of currBuf[0] in the output stream.
  305. private int currPtr; // Number of bytes in currBuf.
  306. private byte[] currBuf;
  307. final CRC32 crc32;
  308. final DeflaterOutputStream compress;
  309. PackStream(DfsOutputStream out) {
  310. this.out = out;
  311. hdrBuf = new byte[32];
  312. md = Constants.newMessageDigest();
  313. crc32 = new CRC32();
  314. deflater = new Deflater(compression);
  315. compress = new DeflaterOutputStream(this, deflater, 8192);
  316. int size = out.blockSize();
  317. if (size <= 0)
  318. size = cache.getBlockSize();
  319. else if (size < cache.getBlockSize())
  320. size = (cache.getBlockSize() / size) * size;
  321. blockSize = size;
  322. currBuf = new byte[blockSize];
  323. }
  324. long getCount() {
  325. return currPos + currPtr;
  326. }
  327. void beginObject(int objectType, long length) throws IOException {
  328. crc32.reset();
  329. deflater.reset();
  330. write(hdrBuf, 0, encodeTypeSize(objectType, length));
  331. }
  332. private int encodeTypeSize(int type, long rawLength) {
  333. long nextLength = rawLength >>> 4;
  334. hdrBuf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (rawLength & 0x0F));
  335. rawLength = nextLength;
  336. int n = 1;
  337. while (rawLength > 0) {
  338. nextLength >>>= 7;
  339. hdrBuf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (rawLength & 0x7F));
  340. rawLength = nextLength;
  341. }
  342. return n;
  343. }
  344. @Override
  345. public void write(final int b) throws IOException {
  346. hdrBuf[0] = (byte) b;
  347. write(hdrBuf, 0, 1);
  348. }
  349. @Override
  350. public void write(byte[] data, int off, int len) throws IOException {
  351. crc32.update(data, off, len);
  352. md.update(data, off, len);
  353. writeNoHash(data, off, len);
  354. }
  355. private void writeNoHash(byte[] data, int off, int len)
  356. throws IOException {
  357. while (0 < len) {
  358. int n = Math.min(len, currBuf.length - currPtr);
  359. if (n == 0) {
  360. flushBlock();
  361. currBuf = new byte[blockSize];
  362. continue;
  363. }
  364. System.arraycopy(data, off, currBuf, currPtr, n);
  365. off += n;
  366. len -= n;
  367. currPtr += n;
  368. }
  369. }
  370. private void flushBlock() throws IOException {
  371. out.write(currBuf, 0, currPtr);
  372. byte[] buf;
  373. if (currPtr == currBuf.length)
  374. buf = currBuf;
  375. else
  376. buf = copyOf(currBuf, 0, currPtr);
  377. cache.put(new DfsBlock(packKey, currPos, buf));
  378. currPos += currPtr;
  379. currPtr = 0;
  380. currBuf = null;
  381. }
  382. private byte[] copyOf(byte[] src, int ptr, int cnt) {
  383. byte[] dst = new byte[cnt];
  384. System.arraycopy(src, ptr, dst, 0, cnt);
  385. return dst;
  386. }
  387. byte[] writePackFooter() throws IOException {
  388. byte[] packHash = md.digest();
  389. writeNoHash(packHash, 0, packHash.length);
  390. if (currPtr != 0)
  391. flushBlock();
  392. return packHash;
  393. }
  394. int read(long pos, byte[] dst, int ptr, int cnt) throws IOException {
  395. int r = 0;
  396. while (pos < currPos && r < cnt) {
  397. DfsBlock b = getOrLoadBlock(pos);
  398. int n = b.copy(pos, dst, ptr + r, cnt - r);
  399. pos += n;
  400. r += n;
  401. }
  402. if (currPos <= pos && r < cnt) {
  403. int s = (int) (pos - currPos);
  404. int n = Math.min(currPtr - s, cnt - r);
  405. System.arraycopy(currBuf, s, dst, ptr + r, n);
  406. r += n;
  407. }
  408. return r;
  409. }
  410. byte[] inflate(DfsReader ctx, long pos, int len) throws IOException,
  411. DataFormatException {
  412. byte[] dstbuf;
  413. try {
  414. dstbuf = new byte[len];
  415. } catch (OutOfMemoryError noMemory) {
  416. return null; // Caller will switch to large object streaming.
  417. }
  418. Inflater inf = ctx.inflater();
  419. pos += setInput(pos, inf);
  420. for (int dstoff = 0;;) {
  421. int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
  422. dstoff += n;
  423. if (inf.finished())
  424. return dstbuf;
  425. if (inf.needsInput())
  426. pos += setInput(pos, inf);
  427. else if (n == 0)
  428. throw new DataFormatException();
  429. }
  430. }
  431. private int setInput(long pos, Inflater inf)
  432. throws IOException, DataFormatException {
  433. if (pos < currPos)
  434. return getOrLoadBlock(pos).setInput(pos, inf);
  435. if (pos < currPos + currPtr) {
  436. int s = (int) (pos - currPos);
  437. int n = currPtr - s;
  438. inf.setInput(currBuf, s, n);
  439. return n;
  440. }
  441. throw new EOFException(DfsText.get().unexpectedEofInPack);
  442. }
  443. private DfsBlock getOrLoadBlock(long pos) throws IOException {
  444. long s = toBlockStart(pos);
  445. DfsBlock b = cache.get(packKey, s);
  446. if (b != null)
  447. return b;
  448. byte[] d = new byte[blockSize];
  449. for (int p = 0; p < blockSize;) {
  450. int n = out.read(s + p, ByteBuffer.wrap(d, p, blockSize - p));
  451. if (n <= 0)
  452. throw new EOFException(DfsText.get().unexpectedEofInPack);
  453. p += n;
  454. }
  455. b = new DfsBlock(packKey, s, d);
  456. cache.put(b);
  457. return b;
  458. }
  459. private long toBlockStart(long pos) {
  460. return (pos / blockSize) * blockSize;
  461. }
  462. @Override
  463. public void close() throws IOException {
  464. deflater.end();
  465. out.close();
  466. }
  467. }
  468. private class Reader extends ObjectReader {
  469. private final DfsReader ctx = db.newReader();
  470. @Override
  471. public ObjectReader newReader() {
  472. return db.newReader();
  473. }
  474. @Override
  475. public Collection<ObjectId> resolve(AbbreviatedObjectId id)
  476. throws IOException {
  477. Collection<ObjectId> stored = ctx.resolve(id);
  478. if (objectList == null)
  479. return stored;
  480. Set<ObjectId> r = new HashSet<>(stored.size() + 2);
  481. r.addAll(stored);
  482. for (PackedObjectInfo obj : objectList) {
  483. if (id.prefixCompare(obj) == 0)
  484. r.add(obj.copy());
  485. }
  486. return r;
  487. }
  488. @Override
  489. public ObjectLoader open(AnyObjectId objectId, int typeHint)
  490. throws IOException {
  491. if (objectMap == null)
  492. return ctx.open(objectId, typeHint);
  493. PackedObjectInfo obj = objectMap.get(objectId);
  494. if (obj == null)
  495. return ctx.open(objectId, typeHint);
  496. byte[] buf = buffer();
  497. int cnt = packOut.read(obj.getOffset(), buf, 0, 20);
  498. if (cnt <= 0)
  499. throw new EOFException(DfsText.get().unexpectedEofInPack);
  500. int c = buf[0] & 0xff;
  501. int type = (c >> 4) & 7;
  502. if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA)
  503. throw new IOException(MessageFormat.format(
  504. DfsText.get().cannotReadBackDelta, Integer.toString(type)));
  505. if (typeHint != OBJ_ANY && type != typeHint) {
  506. throw new IncorrectObjectTypeException(objectId.copy(), typeHint);
  507. }
  508. long sz = c & 0x0f;
  509. int ptr = 1;
  510. int shift = 4;
  511. while ((c & 0x80) != 0) {
  512. if (ptr >= cnt)
  513. throw new EOFException(DfsText.get().unexpectedEofInPack);
  514. c = buf[ptr++] & 0xff;
  515. sz += ((long) (c & 0x7f)) << shift;
  516. shift += 7;
  517. }
  518. long zpos = obj.getOffset() + ptr;
  519. if (sz < ctx.getStreamFileThreshold()) {
  520. byte[] data = inflate(obj, zpos, (int) sz);
  521. if (data != null)
  522. return new ObjectLoader.SmallObject(type, data);
  523. }
  524. return new StreamLoader(obj.copy(), type, sz, packKey, zpos);
  525. }
  526. private byte[] inflate(PackedObjectInfo obj, long zpos, int sz)
  527. throws IOException, CorruptObjectException {
  528. try {
  529. return packOut.inflate(ctx, zpos, sz);
  530. } catch (DataFormatException dfe) {
  531. CorruptObjectException coe = new CorruptObjectException(
  532. MessageFormat.format(
  533. JGitText.get().objectAtHasBadZlibStream,
  534. Long.valueOf(obj.getOffset()),
  535. packDsc.getFileName(PackExt.PACK)));
  536. coe.initCause(dfe);
  537. throw coe;
  538. }
  539. }
  540. @Override
  541. public Set<ObjectId> getShallowCommits() throws IOException {
  542. return ctx.getShallowCommits();
  543. }
  544. @Override
  545. public ObjectInserter getCreatedFromInserter() {
  546. return DfsInserter.this;
  547. }
  548. @Override
  549. public void close() {
  550. ctx.close();
  551. }
  552. }
  553. private class StreamLoader extends ObjectLoader {
  554. private final ObjectId id;
  555. private final int type;
  556. private final long size;
  557. private final DfsPackKey srcPack;
  558. private final long pos;
  559. StreamLoader(ObjectId id, int type, long sz,
  560. DfsPackKey key, long pos) {
  561. this.id = id;
  562. this.type = type;
  563. this.size = sz;
  564. this.srcPack = key;
  565. this.pos = pos;
  566. }
  567. @Override
  568. public ObjectStream openStream() throws IOException {
  569. final DfsReader ctx = db.newReader();
  570. if (srcPack != packKey) {
  571. try {
  572. // Post DfsInserter.flush() use the normal code path.
  573. // The newly created pack is registered in the cache.
  574. return ctx.open(id, type).openStream();
  575. } finally {
  576. ctx.close();
  577. }
  578. }
  579. int bufsz = 8192;
  580. final Inflater inf = ctx.inflater();
  581. return new ObjectStream.Filter(type,
  582. size, new BufferedInputStream(new InflaterInputStream(
  583. new ReadBackStream(pos), inf, bufsz), bufsz)) {
  584. @Override
  585. public void close() throws IOException {
  586. ctx.close();
  587. super.close();
  588. }
  589. };
  590. }
  591. @Override
  592. public int getType() {
  593. return type;
  594. }
  595. @Override
  596. public long getSize() {
  597. return size;
  598. }
  599. @Override
  600. public boolean isLarge() {
  601. return true;
  602. }
  603. @Override
  604. public byte[] getCachedBytes() throws LargeObjectException {
  605. throw new LargeObjectException.ExceedsLimit(
  606. db.getReaderOptions().getStreamFileThreshold(), size);
  607. }
  608. }
  609. private final class ReadBackStream extends InputStream {
  610. private long pos;
  611. ReadBackStream(long offset) {
  612. pos = offset;
  613. }
  614. @Override
  615. public int read() throws IOException {
  616. byte[] b = new byte[1];
  617. int n = read(b);
  618. return n == 1 ? b[0] & 0xff : -1;
  619. }
  620. @Override
  621. public int read(byte[] buf, int ptr, int len) throws IOException {
  622. int n = packOut.read(pos, buf, ptr, len);
  623. if (n > 0) {
  624. pos += n;
  625. }
  626. return n;
  627. }
  628. }
  629. }