You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsReader.java 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2006-2008, Shawn O. Pearce <spearce@spearce.org>
  4. * and other copyright owners as documented in the project's IP log.
  5. *
  6. * This program and the accompanying materials are made available
  7. * under the terms of the Eclipse Distribution License v1.0 which
  8. * accompanies this distribution, is reproduced below, and is
  9. * available at http://www.eclipse.org/org/documents/edl-v10.php
  10. *
  11. * All rights reserved.
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials provided
  23. * with the distribution.
  24. *
  25. * - Neither the name of the Eclipse Foundation, Inc. nor the
  26. * names of its contributors may be used to endorse or promote
  27. * products derived from this software without specific prior
  28. * written permission.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  31. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  32. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  33. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  34. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  35. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  37. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  38. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  39. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  40. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  41. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  42. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. */
  44. package org.eclipse.jgit.storage.dfs;
  45. import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
  46. import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
  47. import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
  48. import static org.eclipse.jgit.storage.pack.PackExt.PACK;
  49. import java.io.IOException;
  50. import java.io.InterruptedIOException;
  51. import java.security.MessageDigest;
  52. import java.text.MessageFormat;
  53. import java.util.ArrayList;
  54. import java.util.Arrays;
  55. import java.util.Collection;
  56. import java.util.Collections;
  57. import java.util.Comparator;
  58. import java.util.HashSet;
  59. import java.util.Iterator;
  60. import java.util.LinkedList;
  61. import java.util.List;
  62. import java.util.Set;
  63. import java.util.concurrent.ExecutionException;
  64. import java.util.zip.DataFormatException;
  65. import java.util.zip.Inflater;
  66. import org.eclipse.jgit.errors.IncorrectObjectTypeException;
  67. import org.eclipse.jgit.errors.MissingObjectException;
  68. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  69. import org.eclipse.jgit.internal.JGitText;
  70. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  71. import org.eclipse.jgit.lib.AnyObjectId;
  72. import org.eclipse.jgit.lib.AsyncObjectLoaderQueue;
  73. import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
  74. import org.eclipse.jgit.lib.Constants;
  75. import org.eclipse.jgit.lib.InflaterCache;
  76. import org.eclipse.jgit.lib.ObjectId;
  77. import org.eclipse.jgit.lib.ObjectLoader;
  78. import org.eclipse.jgit.lib.ObjectReader;
  79. import org.eclipse.jgit.lib.ProgressMonitor;
  80. import org.eclipse.jgit.revwalk.ObjectWalk;
  81. import org.eclipse.jgit.revwalk.RevCommit;
  82. import org.eclipse.jgit.revwalk.RevWalk;
  83. import org.eclipse.jgit.storage.pack.CachedPack;
  84. import org.eclipse.jgit.storage.pack.ObjectReuseAsIs;
  85. import org.eclipse.jgit.storage.pack.ObjectToPack;
  86. import org.eclipse.jgit.storage.pack.PackOutputStream;
  87. import org.eclipse.jgit.storage.pack.PackWriter;
  88. import org.eclipse.jgit.util.BlockList;
  89. /**
  90. * Reader to access repository content through.
  91. * <p>
  92. * See the base {@link ObjectReader} documentation for details. Notably, a
  93. * reader is not thread safe.
  94. */
  95. public final class DfsReader extends ObjectReader implements ObjectReuseAsIs {
  96. /** Temporary buffer large enough for at least one raw object id. */
  97. final byte[] tempId = new byte[OBJECT_ID_LENGTH];
  98. /** Database this reader loads objects from. */
  99. final DfsObjDatabase db;
  100. private Inflater inf;
  101. private DfsBlock block;
  102. private DeltaBaseCache baseCache;
  103. private DfsPackFile last;
  104. private boolean wantReadAhead;
  105. private List<ReadAheadTask.BlockFuture> pendingReadAhead;
  106. DfsReader(DfsObjDatabase db) {
  107. this.db = db;
  108. }
  109. DfsReaderOptions getOptions() {
  110. return db.getReaderOptions();
  111. }
  112. DeltaBaseCache getDeltaBaseCache() {
  113. if (baseCache == null)
  114. baseCache = new DeltaBaseCache(this);
  115. return baseCache;
  116. }
  117. int getStreamFileThreshold() {
  118. return getOptions().getStreamFileThreshold();
  119. }
  120. @Override
  121. public ObjectReader newReader() {
  122. return new DfsReader(db);
  123. }
  124. @Override
  125. public Collection<ObjectId> resolve(AbbreviatedObjectId id)
  126. throws IOException {
  127. if (id.isComplete())
  128. return Collections.singleton(id.toObjectId());
  129. HashSet<ObjectId> matches = new HashSet<ObjectId>(4);
  130. for (DfsPackFile pack : db.getPacks()) {
  131. pack.resolve(this, matches, id, 256);
  132. if (256 <= matches.size())
  133. break;
  134. }
  135. return matches;
  136. }
  137. @Override
  138. public boolean has(AnyObjectId objectId) throws IOException {
  139. if (last != null && last.hasObject(this, objectId))
  140. return true;
  141. for (DfsPackFile pack : db.getPacks()) {
  142. if (last == pack)
  143. continue;
  144. if (pack.hasObject(this, objectId)) {
  145. last = pack;
  146. return true;
  147. }
  148. }
  149. return false;
  150. }
  151. @Override
  152. public ObjectLoader open(AnyObjectId objectId, int typeHint)
  153. throws MissingObjectException, IncorrectObjectTypeException,
  154. IOException {
  155. if (last != null) {
  156. ObjectLoader ldr = last.get(this, objectId);
  157. if (ldr != null)
  158. return ldr;
  159. }
  160. for (DfsPackFile pack : db.getPacks()) {
  161. if (pack == last)
  162. continue;
  163. ObjectLoader ldr = pack.get(this, objectId);
  164. if (ldr != null) {
  165. last = pack;
  166. return ldr;
  167. }
  168. }
  169. if (typeHint == OBJ_ANY)
  170. throw new MissingObjectException(objectId.copy(), "unknown");
  171. throw new MissingObjectException(objectId.copy(), typeHint);
  172. }
  173. @Override
  174. public Set<ObjectId> getShallowCommits() {
  175. return Collections.emptySet();
  176. }
  177. private static final Comparator<FoundObject<?>> FOUND_OBJECT_SORT = new Comparator<FoundObject<?>>() {
  178. public int compare(FoundObject<?> a, FoundObject<?> b) {
  179. int cmp = a.packIndex - b.packIndex;
  180. if (cmp == 0)
  181. cmp = Long.signum(a.offset - b.offset);
  182. return cmp;
  183. }
  184. };
  185. private static class FoundObject<T extends ObjectId> {
  186. final T id;
  187. final DfsPackFile pack;
  188. final long offset;
  189. final int packIndex;
  190. FoundObject(T objectId, int packIdx, DfsPackFile pack, long offset) {
  191. this.id = objectId;
  192. this.pack = pack;
  193. this.offset = offset;
  194. this.packIndex = packIdx;
  195. }
  196. FoundObject(T objectId) {
  197. this.id = objectId;
  198. this.pack = null;
  199. this.offset = 0;
  200. this.packIndex = 0;
  201. }
  202. }
  203. private <T extends ObjectId> Iterable<FoundObject<T>> findAll(
  204. Iterable<T> objectIds) throws IOException {
  205. ArrayList<FoundObject<T>> r = new ArrayList<FoundObject<T>>();
  206. DfsPackFile[] packList = db.getPacks();
  207. if (packList.length == 0) {
  208. for (T t : objectIds)
  209. r.add(new FoundObject<T>(t));
  210. return r;
  211. }
  212. int lastIdx = 0;
  213. DfsPackFile lastPack = packList[lastIdx];
  214. OBJECT_SCAN: for (T t : objectIds) {
  215. try {
  216. long p = lastPack.findOffset(this, t);
  217. if (0 < p) {
  218. r.add(new FoundObject<T>(t, lastIdx, lastPack, p));
  219. continue;
  220. }
  221. } catch (IOException e) {
  222. // Fall though and try to examine other packs.
  223. }
  224. for (int i = 0; i < packList.length; i++) {
  225. if (i == lastIdx)
  226. continue;
  227. DfsPackFile pack = packList[i];
  228. try {
  229. long p = pack.findOffset(this, t);
  230. if (0 < p) {
  231. r.add(new FoundObject<T>(t, i, pack, p));
  232. lastIdx = i;
  233. lastPack = pack;
  234. continue OBJECT_SCAN;
  235. }
  236. } catch (IOException e) {
  237. // Examine other packs.
  238. }
  239. }
  240. r.add(new FoundObject<T>(t));
  241. }
  242. Collections.sort(r, FOUND_OBJECT_SORT);
  243. last = lastPack;
  244. return r;
  245. }
  246. @Override
  247. public <T extends ObjectId> AsyncObjectLoaderQueue<T> open(
  248. Iterable<T> objectIds, final boolean reportMissing) {
  249. wantReadAhead = true;
  250. Iterable<FoundObject<T>> order;
  251. IOException error = null;
  252. try {
  253. order = findAll(objectIds);
  254. } catch (IOException e) {
  255. order = Collections.emptyList();
  256. error = e;
  257. }
  258. final Iterator<FoundObject<T>> idItr = order.iterator();
  259. final IOException findAllError = error;
  260. return new AsyncObjectLoaderQueue<T>() {
  261. private FoundObject<T> cur;
  262. public boolean next() throws MissingObjectException, IOException {
  263. if (idItr.hasNext()) {
  264. cur = idItr.next();
  265. return true;
  266. } else if (findAllError != null) {
  267. throw findAllError;
  268. } else {
  269. cancelReadAhead();
  270. return false;
  271. }
  272. }
  273. public T getCurrent() {
  274. return cur.id;
  275. }
  276. public ObjectId getObjectId() {
  277. return cur.id;
  278. }
  279. public ObjectLoader open() throws IOException {
  280. if (cur.pack == null)
  281. throw new MissingObjectException(cur.id, "unknown");
  282. return cur.pack.load(DfsReader.this, cur.offset);
  283. }
  284. public boolean cancel(boolean mayInterruptIfRunning) {
  285. cancelReadAhead();
  286. return true;
  287. }
  288. public void release() {
  289. cancelReadAhead();
  290. }
  291. };
  292. }
  293. @Override
  294. public <T extends ObjectId> AsyncObjectSizeQueue<T> getObjectSize(
  295. Iterable<T> objectIds, final boolean reportMissing) {
  296. wantReadAhead = true;
  297. Iterable<FoundObject<T>> order;
  298. IOException error = null;
  299. try {
  300. order = findAll(objectIds);
  301. } catch (IOException e) {
  302. order = Collections.emptyList();
  303. error = e;
  304. }
  305. final Iterator<FoundObject<T>> idItr = order.iterator();
  306. final IOException findAllError = error;
  307. return new AsyncObjectSizeQueue<T>() {
  308. private FoundObject<T> cur;
  309. private long sz;
  310. public boolean next() throws MissingObjectException, IOException {
  311. if (idItr.hasNext()) {
  312. cur = idItr.next();
  313. if (cur.pack == null)
  314. throw new MissingObjectException(cur.id, "unknown");
  315. sz = cur.pack.getObjectSize(DfsReader.this, cur.offset);
  316. return true;
  317. } else if (findAllError != null) {
  318. throw findAllError;
  319. } else {
  320. cancelReadAhead();
  321. return false;
  322. }
  323. }
  324. public T getCurrent() {
  325. return cur.id;
  326. }
  327. public ObjectId getObjectId() {
  328. return cur.id;
  329. }
  330. public long getSize() {
  331. return sz;
  332. }
  333. public boolean cancel(boolean mayInterruptIfRunning) {
  334. cancelReadAhead();
  335. return true;
  336. }
  337. public void release() {
  338. cancelReadAhead();
  339. }
  340. };
  341. }
  342. @Override
  343. public void walkAdviceBeginCommits(RevWalk walk, Collection<RevCommit> roots) {
  344. wantReadAhead = true;
  345. }
  346. @Override
  347. public void walkAdviceBeginTrees(ObjectWalk ow, RevCommit min, RevCommit max) {
  348. wantReadAhead = true;
  349. }
  350. @Override
  351. public void walkAdviceEnd() {
  352. cancelReadAhead();
  353. }
  354. @Override
  355. public long getObjectSize(AnyObjectId objectId, int typeHint)
  356. throws MissingObjectException, IncorrectObjectTypeException,
  357. IOException {
  358. if (last != null) {
  359. long sz = last.getObjectSize(this, objectId);
  360. if (0 <= sz)
  361. return sz;
  362. }
  363. for (DfsPackFile pack : db.getPacks()) {
  364. if (pack == last)
  365. continue;
  366. long sz = pack.getObjectSize(this, objectId);
  367. if (0 <= sz) {
  368. last = pack;
  369. return sz;
  370. }
  371. }
  372. if (typeHint == OBJ_ANY)
  373. throw new MissingObjectException(objectId.copy(), "unknown");
  374. throw new MissingObjectException(objectId.copy(), typeHint);
  375. }
  376. public DfsObjectToPack newObjectToPack(AnyObjectId objectId, int type) {
  377. return new DfsObjectToPack(objectId, type);
  378. }
  379. private static final Comparator<DfsObjectRepresentation> REPRESENTATION_SORT = new Comparator<DfsObjectRepresentation>() {
  380. public int compare(DfsObjectRepresentation a, DfsObjectRepresentation b) {
  381. int cmp = a.packIndex - b.packIndex;
  382. if (cmp == 0)
  383. cmp = Long.signum(a.offset - b.offset);
  384. return cmp;
  385. }
  386. };
  387. public void selectObjectRepresentation(PackWriter packer,
  388. ProgressMonitor monitor, Iterable<ObjectToPack> objects)
  389. throws IOException, MissingObjectException {
  390. DfsPackFile[] packList = db.getPacks();
  391. if (packList.length == 0) {
  392. Iterator<ObjectToPack> itr = objects.iterator();
  393. if (itr.hasNext())
  394. throw new MissingObjectException(itr.next(), "unknown");
  395. return;
  396. }
  397. int objectCount = 0;
  398. int updated = 0;
  399. int posted = 0;
  400. List<DfsObjectRepresentation> all = new BlockList<DfsObjectRepresentation>();
  401. for (ObjectToPack otp : objects) {
  402. boolean found = false;
  403. for (int packIndex = 0; packIndex < packList.length; packIndex++) {
  404. DfsPackFile pack = packList[packIndex];
  405. long p = pack.findOffset(this, otp);
  406. if (0 < p) {
  407. DfsObjectRepresentation r = new DfsObjectRepresentation(otp);
  408. r.pack = pack;
  409. r.packIndex = packIndex;
  410. r.offset = p;
  411. all.add(r);
  412. found = true;
  413. }
  414. }
  415. if (!found)
  416. throw new MissingObjectException(otp, otp.getType());
  417. if ((++updated & 1) == 1) {
  418. monitor.update(1); // Update by 50%, the other 50% is below.
  419. posted++;
  420. }
  421. objectCount++;
  422. }
  423. Collections.sort(all, REPRESENTATION_SORT);
  424. try {
  425. wantReadAhead = true;
  426. for (DfsObjectRepresentation r : all) {
  427. r.pack.representation(this, r);
  428. packer.select(r.object, r);
  429. if ((++updated & 1) == 1 && posted < objectCount) {
  430. monitor.update(1);
  431. posted++;
  432. }
  433. }
  434. } finally {
  435. cancelReadAhead();
  436. }
  437. if (posted < objectCount)
  438. monitor.update(objectCount - posted);
  439. }
  440. public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp,
  441. boolean validate) throws IOException,
  442. StoredObjectRepresentationNotAvailableException {
  443. DfsObjectToPack src = (DfsObjectToPack) otp;
  444. src.pack.copyAsIs(out, src, validate, this);
  445. }
  446. private static final Comparator<ObjectToPack> WRITE_SORT = new Comparator<ObjectToPack>() {
  447. public int compare(ObjectToPack o1, ObjectToPack o2) {
  448. DfsObjectToPack a = (DfsObjectToPack) o1;
  449. DfsObjectToPack b = (DfsObjectToPack) o2;
  450. int cmp = a.packIndex - b.packIndex;
  451. if (cmp == 0)
  452. cmp = Long.signum(a.offset - b.offset);
  453. return cmp;
  454. }
  455. };
  456. public void writeObjects(PackOutputStream out, List<ObjectToPack> list)
  457. throws IOException {
  458. if (list.isEmpty())
  459. return;
  460. // Sorting objects by order in the current packs is usually
  461. // worthwhile. Most packs are going to be OFS_DELTA style,
  462. // where the base must appear before the deltas. If both base
  463. // and delta are to be reused, this ensures the base writes in
  464. // the output first without the recursive write-base-first logic
  465. // used by PackWriter to ensure OFS_DELTA can be used.
  466. //
  467. // Sorting by pack also ensures newer objects go first, which
  468. // typically matches the desired order.
  469. //
  470. // Only do this sorting for OBJ_TREE and OBJ_BLOB. Commits
  471. // are very likely to already be sorted in a good order in the
  472. // incoming list, and if they aren't, JGit's PackWriter has fixed
  473. // the order to be more optimal for readers, so honor that.
  474. switch (list.get(0).getType()) {
  475. case OBJ_TREE:
  476. case OBJ_BLOB:
  477. Collections.sort(list, WRITE_SORT);
  478. }
  479. try {
  480. wantReadAhead = true;
  481. for (ObjectToPack otp : list)
  482. out.writeObject(otp);
  483. } finally {
  484. cancelReadAhead();
  485. }
  486. }
  487. public Collection<CachedPack> getCachedPacks() throws IOException {
  488. DfsPackFile[] packList = db.getPacks();
  489. List<CachedPack> cached = new ArrayList<CachedPack>(packList.length);
  490. for (DfsPackFile pack : packList) {
  491. DfsPackDescription desc = pack.getPackDescription();
  492. if (canBeCachedPack(desc))
  493. cached.add(new DfsCachedPack(pack));
  494. }
  495. return cached;
  496. }
  497. private static boolean canBeCachedPack(DfsPackDescription desc) {
  498. return desc.getTips() != null && !desc.getTips().isEmpty();
  499. }
  500. public void copyPackAsIs(PackOutputStream out, CachedPack pack,
  501. boolean validate) throws IOException {
  502. try {
  503. wantReadAhead = true;
  504. ((DfsCachedPack) pack).copyAsIs(out, validate, this);
  505. } finally {
  506. cancelReadAhead();
  507. }
  508. }
  509. /**
  510. * Copy bytes from the window to a caller supplied buffer.
  511. *
  512. * @param pack
  513. * the file the desired window is stored within.
  514. * @param position
  515. * position within the file to read from.
  516. * @param dstbuf
  517. * destination buffer to copy into.
  518. * @param dstoff
  519. * offset within <code>dstbuf</code> to start copying into.
  520. * @param cnt
  521. * number of bytes to copy. This value may exceed the number of
  522. * bytes remaining in the window starting at offset
  523. * <code>pos</code>.
  524. * @return number of bytes actually copied; this may be less than
  525. * <code>cnt</code> if <code>cnt</code> exceeded the number of bytes
  526. * available.
  527. * @throws IOException
  528. * this cursor does not match the provider or id and the proper
  529. * window could not be acquired through the provider's cache.
  530. */
  531. int copy(DfsPackFile pack, long position, byte[] dstbuf, int dstoff, int cnt)
  532. throws IOException {
  533. if (cnt == 0)
  534. return 0;
  535. long length = pack.length;
  536. if (0 <= length && length <= position)
  537. return 0;
  538. int need = cnt;
  539. do {
  540. pin(pack, position);
  541. int r = block.copy(position, dstbuf, dstoff, need);
  542. position += r;
  543. dstoff += r;
  544. need -= r;
  545. if (length < 0)
  546. length = pack.length;
  547. } while (0 < need && position < length);
  548. return cnt - need;
  549. }
  550. void copyPackAsIs(DfsPackFile pack, long length, boolean validate,
  551. PackOutputStream out) throws IOException {
  552. MessageDigest md = null;
  553. if (validate) {
  554. md = Constants.newMessageDigest();
  555. byte[] buf = out.getCopyBuffer();
  556. pin(pack, 0);
  557. if (block.copy(0, buf, 0, 12) != 12) {
  558. pack.setInvalid();
  559. throw new IOException(JGitText.get().packfileIsTruncated);
  560. }
  561. md.update(buf, 0, 12);
  562. }
  563. long position = 12;
  564. long remaining = length - (12 + 20);
  565. while (0 < remaining) {
  566. pin(pack, position);
  567. int ptr = (int) (position - block.start);
  568. int n = (int) Math.min(block.size() - ptr, remaining);
  569. block.write(out, position, n, md);
  570. position += n;
  571. remaining -= n;
  572. }
  573. if (md != null) {
  574. byte[] buf = new byte[20];
  575. byte[] actHash = md.digest();
  576. pin(pack, position);
  577. if (block.copy(position, buf, 0, 20) != 20) {
  578. pack.setInvalid();
  579. throw new IOException(JGitText.get().packfileIsTruncated);
  580. }
  581. if (!Arrays.equals(actHash, buf)) {
  582. pack.setInvalid();
  583. throw new IOException(MessageFormat.format(
  584. JGitText.get().packfileCorruptionDetected,
  585. pack.getPackDescription().getFileName(PACK)));
  586. }
  587. }
  588. }
  589. /**
  590. * Inflate a region of the pack starting at {@code position}.
  591. *
  592. * @param pack
  593. * the file the desired window is stored within.
  594. * @param position
  595. * position within the file to read from.
  596. * @param dstbuf
  597. * destination buffer the inflater should output decompressed
  598. * data to.
  599. * @param headerOnly
  600. * if true the caller wants only {@code dstbuf.length} bytes.
  601. * @return updated <code>dstoff</code> based on the number of bytes
  602. * successfully inflated into <code>dstbuf</code>.
  603. * @throws IOException
  604. * this cursor does not match the provider or id and the proper
  605. * window could not be acquired through the provider's cache.
  606. * @throws DataFormatException
  607. * the inflater encountered an invalid chunk of data. Data
  608. * stream corruption is likely.
  609. */
  610. int inflate(DfsPackFile pack, long position, byte[] dstbuf,
  611. boolean headerOnly) throws IOException, DataFormatException {
  612. prepareInflater();
  613. pin(pack, position);
  614. int dstoff = 0;
  615. for (;;) {
  616. dstoff = block.inflate(inf, position, dstbuf, dstoff);
  617. if (headerOnly && dstoff == dstbuf.length)
  618. return dstoff;
  619. if (inf.needsInput()) {
  620. position += block.remaining(position);
  621. pin(pack, position);
  622. } else if (inf.finished())
  623. return dstoff;
  624. else
  625. throw new DataFormatException();
  626. }
  627. }
  628. DfsBlock quickCopy(DfsPackFile p, long pos, long cnt)
  629. throws IOException {
  630. pin(p, pos);
  631. if (block.contains(p.key, pos + (cnt - 1)))
  632. return block;
  633. return null;
  634. }
  635. Inflater inflater() {
  636. prepareInflater();
  637. return inf;
  638. }
  639. private void prepareInflater() {
  640. if (inf == null)
  641. inf = InflaterCache.get();
  642. else
  643. inf.reset();
  644. }
  645. void pin(DfsPackFile pack, long position) throws IOException {
  646. DfsBlock b = block;
  647. if (b == null || !b.contains(pack.key, position)) {
  648. // If memory is low, we may need what is in our window field to
  649. // be cleaned up by the GC during the get for the next window.
  650. // So we always clear it, even though we are just going to set
  651. // it again.
  652. //
  653. block = null;
  654. if (pendingReadAhead != null)
  655. waitForBlock(pack.key, position);
  656. block = pack.getOrLoadBlock(position, this);
  657. }
  658. }
  659. boolean wantReadAhead() {
  660. return wantReadAhead;
  661. }
  662. void startedReadAhead(List<ReadAheadTask.BlockFuture> blocks) {
  663. if (pendingReadAhead == null)
  664. pendingReadAhead = new LinkedList<ReadAheadTask.BlockFuture>();
  665. pendingReadAhead.addAll(blocks);
  666. }
  667. private void cancelReadAhead() {
  668. if (pendingReadAhead != null) {
  669. for (ReadAheadTask.BlockFuture f : pendingReadAhead)
  670. f.cancel(true);
  671. pendingReadAhead = null;
  672. }
  673. wantReadAhead = false;
  674. }
  675. private void waitForBlock(DfsPackKey key, long position)
  676. throws InterruptedIOException {
  677. Iterator<ReadAheadTask.BlockFuture> itr = pendingReadAhead.iterator();
  678. while (itr.hasNext()) {
  679. ReadAheadTask.BlockFuture f = itr.next();
  680. if (f.contains(key, position)) {
  681. try {
  682. f.get();
  683. } catch (InterruptedException e) {
  684. throw new InterruptedIOException();
  685. } catch (ExecutionException e) {
  686. // Exceptions should never be thrown by get(). Ignore
  687. // this and let the normal load paths identify any error.
  688. }
  689. itr.remove();
  690. if (pendingReadAhead.isEmpty())
  691. pendingReadAhead = null;
  692. break;
  693. }
  694. }
  695. }
  696. /** Release the current window cursor. */
  697. @Override
  698. public void release() {
  699. cancelReadAhead();
  700. last = null;
  701. block = null;
  702. baseCache = null;
  703. try {
  704. InflaterCache.release(inf);
  705. } finally {
  706. inf = null;
  707. }
  708. }
  709. }