You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DfsReader.java 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * Copyright (C) 2008-2011, Google Inc.
  3. * Copyright (C) 2006-2008, Shawn O. Pearce <spearce@spearce.org>
  4. * and other copyright owners as documented in the project's IP log.
  5. *
  6. * This program and the accompanying materials are made available
  7. * under the terms of the Eclipse Distribution License v1.0 which
  8. * accompanies this distribution, is reproduced below, and is
  9. * available at http://www.eclipse.org/org/documents/edl-v10.php
  10. *
  11. * All rights reserved.
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials provided
  23. * with the distribution.
  24. *
  25. * - Neither the name of the Eclipse Foundation, Inc. nor the
  26. * names of its contributors may be used to endorse or promote
  27. * products derived from this software without specific prior
  28. * written permission.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  31. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  32. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  33. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  34. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  35. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  36. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  37. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  38. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  39. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  40. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  41. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  42. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. */
  44. package org.eclipse.jgit.internal.storage.dfs;
  45. import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
  46. import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
  47. import java.io.IOException;
  48. import java.util.ArrayList;
  49. import java.util.Arrays;
  50. import java.util.Collection;
  51. import java.util.Collections;
  52. import java.util.Comparator;
  53. import java.util.HashSet;
  54. import java.util.Iterator;
  55. import java.util.LinkedList;
  56. import java.util.List;
  57. import java.util.Set;
  58. import java.util.zip.DataFormatException;
  59. import java.util.zip.Inflater;
  60. import org.eclipse.jgit.errors.IncorrectObjectTypeException;
  61. import org.eclipse.jgit.errors.MissingObjectException;
  62. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  63. import org.eclipse.jgit.internal.JGitText;
  64. import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackList;
  65. import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
  66. import org.eclipse.jgit.internal.storage.file.BitmapIndexImpl;
  67. import org.eclipse.jgit.internal.storage.file.PackBitmapIndex;
  68. import org.eclipse.jgit.internal.storage.file.PackIndex;
  69. import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
  70. import org.eclipse.jgit.internal.storage.pack.CachedPack;
  71. import org.eclipse.jgit.internal.storage.pack.ObjectReuseAsIs;
  72. import org.eclipse.jgit.internal.storage.pack.ObjectToPack;
  73. import org.eclipse.jgit.internal.storage.pack.PackOutputStream;
  74. import org.eclipse.jgit.internal.storage.pack.PackWriter;
  75. import org.eclipse.jgit.lib.AbbreviatedObjectId;
  76. import org.eclipse.jgit.lib.AnyObjectId;
  77. import org.eclipse.jgit.lib.AsyncObjectLoaderQueue;
  78. import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
  79. import org.eclipse.jgit.lib.BitmapIndex;
  80. import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
  81. import org.eclipse.jgit.lib.InflaterCache;
  82. import org.eclipse.jgit.lib.ObjectId;
  83. import org.eclipse.jgit.lib.ObjectLoader;
  84. import org.eclipse.jgit.lib.ObjectReader;
  85. import org.eclipse.jgit.lib.ProgressMonitor;
  86. import org.eclipse.jgit.util.BlockList;
  87. /**
  88. * Reader to access repository content through.
  89. * <p>
  90. * See the base {@link ObjectReader} documentation for details. Notably, a
  91. * reader is not thread safe.
  92. */
  93. public final class DfsReader extends ObjectReader implements ObjectReuseAsIs {
  94. private static final int MAX_RESOLVE_MATCHES = 256;
  95. /** Temporary buffer large enough for at least one raw object id. */
  96. final byte[] tempId = new byte[OBJECT_ID_LENGTH];
  97. /** Database this reader loads objects from. */
  98. final DfsObjDatabase db;
  99. final DfsReaderIoStats.Accumulator stats = new DfsReaderIoStats.Accumulator();
  100. private Inflater inf;
  101. private DfsBlock block;
  102. private DeltaBaseCache baseCache;
  103. private DfsPackFile last;
  104. private boolean avoidUnreachable;
  105. DfsReader(DfsObjDatabase db) {
  106. this.db = db;
  107. this.streamFileThreshold = db.getReaderOptions().getStreamFileThreshold();
  108. }
  109. DfsReaderOptions getOptions() {
  110. return db.getReaderOptions();
  111. }
  112. DeltaBaseCache getDeltaBaseCache() {
  113. if (baseCache == null)
  114. baseCache = new DeltaBaseCache(this);
  115. return baseCache;
  116. }
  117. @Override
  118. public ObjectReader newReader() {
  119. return new DfsReader(db);
  120. }
  121. @Override
  122. public void setAvoidUnreachableObjects(boolean avoid) {
  123. avoidUnreachable = avoid;
  124. }
  125. @Override
  126. public BitmapIndex getBitmapIndex() throws IOException {
  127. for (DfsPackFile pack : db.getPacks()) {
  128. PackBitmapIndex bitmapIndex = pack.getBitmapIndex(this);
  129. if (bitmapIndex != null)
  130. return new BitmapIndexImpl(bitmapIndex);
  131. }
  132. return null;
  133. }
  134. @Override
  135. public Collection<CachedPack> getCachedPacksAndUpdate(
  136. BitmapBuilder needBitmap) throws IOException {
  137. for (DfsPackFile pack : db.getPacks()) {
  138. PackBitmapIndex bitmapIndex = pack.getBitmapIndex(this);
  139. if (needBitmap.removeAllOrNone(bitmapIndex))
  140. return Collections.<CachedPack> singletonList(
  141. new DfsCachedPack(pack));
  142. }
  143. return Collections.emptyList();
  144. }
  145. @Override
  146. public Collection<ObjectId> resolve(AbbreviatedObjectId id)
  147. throws IOException {
  148. if (id.isComplete())
  149. return Collections.singleton(id.toObjectId());
  150. HashSet<ObjectId> matches = new HashSet<>(4);
  151. PackList packList = db.getPackList();
  152. resolveImpl(packList, id, matches);
  153. if (matches.size() < MAX_RESOLVE_MATCHES && packList.dirty()) {
  154. stats.scanPacks++;
  155. resolveImpl(db.scanPacks(packList), id, matches);
  156. }
  157. return matches;
  158. }
  159. private void resolveImpl(PackList packList, AbbreviatedObjectId id,
  160. HashSet<ObjectId> matches) throws IOException {
  161. for (DfsPackFile pack : packList.packs) {
  162. if (skipGarbagePack(pack)) {
  163. continue;
  164. }
  165. pack.resolve(this, matches, id, MAX_RESOLVE_MATCHES);
  166. if (matches.size() >= MAX_RESOLVE_MATCHES) {
  167. break;
  168. }
  169. }
  170. }
  171. @Override
  172. public boolean has(AnyObjectId objectId) throws IOException {
  173. if (last != null
  174. && !skipGarbagePack(last)
  175. && last.hasObject(this, objectId))
  176. return true;
  177. PackList packList = db.getPackList();
  178. if (hasImpl(packList, objectId)) {
  179. return true;
  180. } else if (packList.dirty()) {
  181. stats.scanPacks++;
  182. return hasImpl(db.scanPacks(packList), objectId);
  183. }
  184. return false;
  185. }
  186. private boolean hasImpl(PackList packList, AnyObjectId objectId)
  187. throws IOException {
  188. for (DfsPackFile pack : packList.packs) {
  189. if (pack == last || skipGarbagePack(pack))
  190. continue;
  191. if (pack.hasObject(this, objectId)) {
  192. last = pack;
  193. return true;
  194. }
  195. }
  196. return false;
  197. }
  198. @Override
  199. public ObjectLoader open(AnyObjectId objectId, int typeHint)
  200. throws MissingObjectException, IncorrectObjectTypeException,
  201. IOException {
  202. ObjectLoader ldr;
  203. if (last != null && !skipGarbagePack(last)) {
  204. ldr = last.get(this, objectId);
  205. if (ldr != null) {
  206. return checkType(ldr, objectId, typeHint);
  207. }
  208. }
  209. PackList packList = db.getPackList();
  210. ldr = openImpl(packList, objectId);
  211. if (ldr != null) {
  212. return checkType(ldr, objectId, typeHint);
  213. }
  214. if (packList.dirty()) {
  215. stats.scanPacks++;
  216. ldr = openImpl(db.scanPacks(packList), objectId);
  217. if (ldr != null) {
  218. return checkType(ldr, objectId, typeHint);
  219. }
  220. }
  221. if (typeHint == OBJ_ANY)
  222. throw new MissingObjectException(objectId.copy(),
  223. JGitText.get().unknownObjectType2);
  224. throw new MissingObjectException(objectId.copy(), typeHint);
  225. }
  226. private static ObjectLoader checkType(ObjectLoader ldr, AnyObjectId id,
  227. int typeHint) throws IncorrectObjectTypeException {
  228. if (typeHint != OBJ_ANY && ldr.getType() != typeHint) {
  229. throw new IncorrectObjectTypeException(id.copy(), typeHint);
  230. }
  231. return ldr;
  232. }
  233. private ObjectLoader openImpl(PackList packList, AnyObjectId objectId)
  234. throws IOException {
  235. for (DfsPackFile pack : packList.packs) {
  236. if (pack == last || skipGarbagePack(pack)) {
  237. continue;
  238. }
  239. ObjectLoader ldr = pack.get(this, objectId);
  240. if (ldr != null) {
  241. last = pack;
  242. return ldr;
  243. }
  244. }
  245. return null;
  246. }
  247. @Override
  248. public Set<ObjectId> getShallowCommits() {
  249. return Collections.emptySet();
  250. }
  251. private static final Comparator<FoundObject<?>> FOUND_OBJECT_SORT = new Comparator<FoundObject<?>>() {
  252. @Override
  253. public int compare(FoundObject<?> a, FoundObject<?> b) {
  254. int cmp = a.packIndex - b.packIndex;
  255. if (cmp == 0)
  256. cmp = Long.signum(a.offset - b.offset);
  257. return cmp;
  258. }
  259. };
  260. private static class FoundObject<T extends ObjectId> {
  261. final T id;
  262. final DfsPackFile pack;
  263. final long offset;
  264. final int packIndex;
  265. FoundObject(T objectId, int packIdx, DfsPackFile pack, long offset) {
  266. this.id = objectId;
  267. this.pack = pack;
  268. this.offset = offset;
  269. this.packIndex = packIdx;
  270. }
  271. FoundObject(T objectId) {
  272. this.id = objectId;
  273. this.pack = null;
  274. this.offset = 0;
  275. this.packIndex = 0;
  276. }
  277. }
  278. private <T extends ObjectId> Iterable<FoundObject<T>> findAll(
  279. Iterable<T> objectIds) throws IOException {
  280. Collection<T> pending = new LinkedList<>();
  281. for (T id : objectIds) {
  282. pending.add(id);
  283. }
  284. PackList packList = db.getPackList();
  285. List<FoundObject<T>> r = new ArrayList<>();
  286. findAllImpl(packList, pending, r);
  287. if (!pending.isEmpty() && packList.dirty()) {
  288. stats.scanPacks++;
  289. findAllImpl(db.scanPacks(packList), pending, r);
  290. }
  291. for (T t : pending) {
  292. r.add(new FoundObject<>(t));
  293. }
  294. Collections.sort(r, FOUND_OBJECT_SORT);
  295. return r;
  296. }
  297. private <T extends ObjectId> void findAllImpl(PackList packList,
  298. Collection<T> pending, List<FoundObject<T>> r) {
  299. DfsPackFile[] packs = packList.packs;
  300. if (packs.length == 0) {
  301. return;
  302. }
  303. int lastIdx = 0;
  304. DfsPackFile lastPack = packs[lastIdx];
  305. OBJECT_SCAN: for (Iterator<T> it = pending.iterator(); it.hasNext();) {
  306. T t = it.next();
  307. if (!skipGarbagePack(lastPack)) {
  308. try {
  309. long p = lastPack.findOffset(this, t);
  310. if (0 < p) {
  311. r.add(new FoundObject<>(t, lastIdx, lastPack, p));
  312. it.remove();
  313. continue;
  314. }
  315. } catch (IOException e) {
  316. // Fall though and try to examine other packs.
  317. }
  318. }
  319. for (int i = 0; i < packs.length; i++) {
  320. if (i == lastIdx)
  321. continue;
  322. DfsPackFile pack = packs[i];
  323. if (skipGarbagePack(pack))
  324. continue;
  325. try {
  326. long p = pack.findOffset(this, t);
  327. if (0 < p) {
  328. r.add(new FoundObject<>(t, i, pack, p));
  329. it.remove();
  330. lastIdx = i;
  331. lastPack = pack;
  332. continue OBJECT_SCAN;
  333. }
  334. } catch (IOException e) {
  335. // Examine other packs.
  336. }
  337. }
  338. }
  339. last = lastPack;
  340. }
  341. private boolean skipGarbagePack(DfsPackFile pack) {
  342. return avoidUnreachable && pack.isGarbage();
  343. }
  344. @Override
  345. public <T extends ObjectId> AsyncObjectLoaderQueue<T> open(
  346. Iterable<T> objectIds, final boolean reportMissing) {
  347. Iterable<FoundObject<T>> order;
  348. IOException error = null;
  349. try {
  350. order = findAll(objectIds);
  351. } catch (IOException e) {
  352. order = Collections.emptyList();
  353. error = e;
  354. }
  355. final Iterator<FoundObject<T>> idItr = order.iterator();
  356. final IOException findAllError = error;
  357. return new AsyncObjectLoaderQueue<T>() {
  358. private FoundObject<T> cur;
  359. @Override
  360. public boolean next() throws MissingObjectException, IOException {
  361. if (idItr.hasNext()) {
  362. cur = idItr.next();
  363. return true;
  364. } else if (findAllError != null) {
  365. throw findAllError;
  366. } else {
  367. return false;
  368. }
  369. }
  370. @Override
  371. public T getCurrent() {
  372. return cur.id;
  373. }
  374. @Override
  375. public ObjectId getObjectId() {
  376. return cur.id;
  377. }
  378. @Override
  379. public ObjectLoader open() throws IOException {
  380. if (cur.pack == null)
  381. throw new MissingObjectException(cur.id,
  382. JGitText.get().unknownObjectType2);
  383. return cur.pack.load(DfsReader.this, cur.offset);
  384. }
  385. @Override
  386. public boolean cancel(boolean mayInterruptIfRunning) {
  387. return true;
  388. }
  389. @Override
  390. public void release() {
  391. // Nothing to clean up.
  392. }
  393. };
  394. }
  395. @Override
  396. public <T extends ObjectId> AsyncObjectSizeQueue<T> getObjectSize(
  397. Iterable<T> objectIds, final boolean reportMissing) {
  398. Iterable<FoundObject<T>> order;
  399. IOException error = null;
  400. try {
  401. order = findAll(objectIds);
  402. } catch (IOException e) {
  403. order = Collections.emptyList();
  404. error = e;
  405. }
  406. final Iterator<FoundObject<T>> idItr = order.iterator();
  407. final IOException findAllError = error;
  408. return new AsyncObjectSizeQueue<T>() {
  409. private FoundObject<T> cur;
  410. private long sz;
  411. @Override
  412. public boolean next() throws MissingObjectException, IOException {
  413. if (idItr.hasNext()) {
  414. cur = idItr.next();
  415. if (cur.pack == null)
  416. throw new MissingObjectException(cur.id,
  417. JGitText.get().unknownObjectType2);
  418. sz = cur.pack.getObjectSize(DfsReader.this, cur.offset);
  419. return true;
  420. } else if (findAllError != null) {
  421. throw findAllError;
  422. } else {
  423. return false;
  424. }
  425. }
  426. @Override
  427. public T getCurrent() {
  428. return cur.id;
  429. }
  430. @Override
  431. public ObjectId getObjectId() {
  432. return cur.id;
  433. }
  434. @Override
  435. public long getSize() {
  436. return sz;
  437. }
  438. @Override
  439. public boolean cancel(boolean mayInterruptIfRunning) {
  440. return true;
  441. }
  442. @Override
  443. public void release() {
  444. // Nothing to clean up.
  445. }
  446. };
  447. }
  448. @Override
  449. public long getObjectSize(AnyObjectId objectId, int typeHint)
  450. throws MissingObjectException, IncorrectObjectTypeException,
  451. IOException {
  452. if (last != null && !skipGarbagePack(last)) {
  453. long sz = last.getObjectSize(this, objectId);
  454. if (0 <= sz) {
  455. return sz;
  456. }
  457. }
  458. PackList packList = db.getPackList();
  459. long sz = getObjectSizeImpl(packList, objectId);
  460. if (0 <= sz) {
  461. return sz;
  462. }
  463. if (packList.dirty()) {
  464. sz = getObjectSizeImpl(packList, objectId);
  465. if (0 <= sz) {
  466. return sz;
  467. }
  468. }
  469. if (typeHint == OBJ_ANY) {
  470. throw new MissingObjectException(objectId.copy(),
  471. JGitText.get().unknownObjectType2);
  472. }
  473. throw new MissingObjectException(objectId.copy(), typeHint);
  474. }
  475. private long getObjectSizeImpl(PackList packList, AnyObjectId objectId)
  476. throws IOException {
  477. for (DfsPackFile pack : packList.packs) {
  478. if (pack == last || skipGarbagePack(pack)) {
  479. continue;
  480. }
  481. long sz = pack.getObjectSize(this, objectId);
  482. if (0 <= sz) {
  483. last = pack;
  484. return sz;
  485. }
  486. }
  487. return -1;
  488. }
  489. @Override
  490. public DfsObjectToPack newObjectToPack(AnyObjectId objectId, int type) {
  491. return new DfsObjectToPack(objectId, type);
  492. }
  493. private static final Comparator<DfsObjectToPack> OFFSET_SORT = new Comparator<DfsObjectToPack>() {
  494. @Override
  495. public int compare(DfsObjectToPack a, DfsObjectToPack b) {
  496. return Long.signum(a.getOffset() - b.getOffset());
  497. }
  498. };
  499. @Override
  500. public void selectObjectRepresentation(PackWriter packer,
  501. ProgressMonitor monitor, Iterable<ObjectToPack> objects)
  502. throws IOException, MissingObjectException {
  503. // Don't check dirty bit on PackList; assume ObjectToPacks all came from the
  504. // current list.
  505. for (DfsPackFile pack : sortPacksForSelectRepresentation()) {
  506. List<DfsObjectToPack> tmp = findAllFromPack(pack, objects);
  507. if (tmp.isEmpty())
  508. continue;
  509. Collections.sort(tmp, OFFSET_SORT);
  510. PackReverseIndex rev = pack.getReverseIdx(this);
  511. DfsObjectRepresentation rep = new DfsObjectRepresentation(pack);
  512. for (DfsObjectToPack otp : tmp) {
  513. pack.representation(rep, otp.getOffset(), this, rev);
  514. otp.setOffset(0);
  515. packer.select(otp, rep);
  516. if (!otp.isFound()) {
  517. otp.setFound();
  518. monitor.update(1);
  519. }
  520. }
  521. }
  522. }
  523. private static final Comparator<DfsPackFile> PACK_SORT_FOR_REUSE = new Comparator<DfsPackFile>() {
  524. @Override
  525. public int compare(DfsPackFile af, DfsPackFile bf) {
  526. DfsPackDescription ad = af.getPackDescription();
  527. DfsPackDescription bd = bf.getPackDescription();
  528. PackSource as = ad.getPackSource();
  529. PackSource bs = bd.getPackSource();
  530. if (as != null && as == bs && DfsPackDescription.isGC(as)) {
  531. // Push smaller GC files last; these likely have higher quality
  532. // delta compression and the contained representation should be
  533. // favored over other files.
  534. return Long.signum(bd.getFileSize(PACK) - ad.getFileSize(PACK));
  535. }
  536. // DfsPackDescription.compareTo already did a reasonable sort.
  537. // Rely on Arrays.sort being stable, leaving equal elements.
  538. return 0;
  539. }
  540. };
  541. private DfsPackFile[] sortPacksForSelectRepresentation()
  542. throws IOException {
  543. DfsPackFile[] packs = db.getPacks();
  544. DfsPackFile[] sorted = new DfsPackFile[packs.length];
  545. System.arraycopy(packs, 0, sorted, 0, packs.length);
  546. Arrays.sort(sorted, PACK_SORT_FOR_REUSE);
  547. return sorted;
  548. }
  549. private List<DfsObjectToPack> findAllFromPack(DfsPackFile pack,
  550. Iterable<ObjectToPack> objects) throws IOException {
  551. List<DfsObjectToPack> tmp = new BlockList<>();
  552. PackIndex idx = pack.getPackIndex(this);
  553. for (ObjectToPack otp : objects) {
  554. long p = idx.findOffset(otp);
  555. if (0 < p && !pack.isCorrupt(p)) {
  556. otp.setOffset(p);
  557. tmp.add((DfsObjectToPack) otp);
  558. }
  559. }
  560. return tmp;
  561. }
  562. @Override
  563. public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp,
  564. boolean validate) throws IOException,
  565. StoredObjectRepresentationNotAvailableException {
  566. DfsObjectToPack src = (DfsObjectToPack) otp;
  567. src.pack.copyAsIs(out, src, validate, this);
  568. }
  569. @Override
  570. public void writeObjects(PackOutputStream out, List<ObjectToPack> list)
  571. throws IOException {
  572. for (ObjectToPack otp : list)
  573. out.writeObject(otp);
  574. }
  575. @Override
  576. public void copyPackAsIs(PackOutputStream out, CachedPack pack)
  577. throws IOException {
  578. ((DfsCachedPack) pack).copyAsIs(out, this);
  579. }
  580. /**
  581. * Copy bytes from the window to a caller supplied buffer.
  582. *
  583. * @param pack
  584. * the file the desired window is stored within.
  585. * @param position
  586. * position within the file to read from.
  587. * @param dstbuf
  588. * destination buffer to copy into.
  589. * @param dstoff
  590. * offset within <code>dstbuf</code> to start copying into.
  591. * @param cnt
  592. * number of bytes to copy. This value may exceed the number of
  593. * bytes remaining in the window starting at offset
  594. * <code>pos</code>.
  595. * @return number of bytes actually copied; this may be less than
  596. * <code>cnt</code> if <code>cnt</code> exceeded the number of bytes
  597. * available.
  598. * @throws IOException
  599. * this cursor does not match the provider or id and the proper
  600. * window could not be acquired through the provider's cache.
  601. */
  602. int copy(DfsPackFile pack, long position, byte[] dstbuf, int dstoff, int cnt)
  603. throws IOException {
  604. if (cnt == 0)
  605. return 0;
  606. long length = pack.length;
  607. if (0 <= length && length <= position)
  608. return 0;
  609. int need = cnt;
  610. do {
  611. pin(pack, position);
  612. int r = block.copy(position, dstbuf, dstoff, need);
  613. position += r;
  614. dstoff += r;
  615. need -= r;
  616. if (length < 0)
  617. length = pack.length;
  618. } while (0 < need && position < length);
  619. return cnt - need;
  620. }
  621. /**
  622. * Inflate a region of the pack starting at {@code position}.
  623. *
  624. * @param pack
  625. * the file the desired window is stored within.
  626. * @param position
  627. * position within the file to read from.
  628. * @param dstbuf
  629. * destination buffer the inflater should output decompressed
  630. * data to. Must be large enough to store the entire stream,
  631. * unless headerOnly is true.
  632. * @param headerOnly
  633. * if true the caller wants only {@code dstbuf.length} bytes.
  634. * @return number of bytes inflated into <code>dstbuf</code>.
  635. * @throws IOException
  636. * this cursor does not match the provider or id and the proper
  637. * window could not be acquired through the provider's cache.
  638. * @throws DataFormatException
  639. * the inflater encountered an invalid chunk of data. Data
  640. * stream corruption is likely.
  641. */
  642. int inflate(DfsPackFile pack, long position, byte[] dstbuf,
  643. boolean headerOnly) throws IOException, DataFormatException {
  644. prepareInflater();
  645. pin(pack, position);
  646. position += block.setInput(position, inf);
  647. for (int dstoff = 0;;) {
  648. int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
  649. dstoff += n;
  650. if (inf.finished() || (headerOnly && dstoff == dstbuf.length)) {
  651. stats.inflatedBytes += dstoff;
  652. return dstoff;
  653. } else if (inf.needsInput()) {
  654. pin(pack, position);
  655. position += block.setInput(position, inf);
  656. } else if (n == 0)
  657. throw new DataFormatException();
  658. }
  659. }
  660. DfsBlock quickCopy(DfsPackFile p, long pos, long cnt)
  661. throws IOException {
  662. pin(p, pos);
  663. if (block.contains(p.key, pos + (cnt - 1)))
  664. return block;
  665. return null;
  666. }
  667. Inflater inflater() {
  668. prepareInflater();
  669. return inf;
  670. }
  671. private void prepareInflater() {
  672. if (inf == null)
  673. inf = InflaterCache.get();
  674. else
  675. inf.reset();
  676. }
  677. void pin(DfsPackFile pack, long position) throws IOException {
  678. DfsBlock b = block;
  679. if (b == null || !b.contains(pack.key, position)) {
  680. // If memory is low, we may need what is in our window field to
  681. // be cleaned up by the GC during the get for the next window.
  682. // So we always clear it, even though we are just going to set
  683. // it again.
  684. block = null;
  685. block = pack.getOrLoadBlock(position, this);
  686. }
  687. }
  688. void unpin() {
  689. block = null;
  690. }
  691. /** @return IO statistics accumulated by this reader. */
  692. public DfsReaderIoStats getIoStats() {
  693. return new DfsReaderIoStats(stats);
  694. }
  695. /** Release the current window cursor. */
  696. @Override
  697. public void close() {
  698. last = null;
  699. block = null;
  700. baseCache = null;
  701. try {
  702. InflaterCache.release(inf);
  703. } finally {
  704. inf = null;
  705. }
  706. }
  707. }