You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

DhtPackParser.java 39KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.storage.dht;
  44. import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
  45. import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
  46. import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
  47. import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
  48. import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
  49. import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
  50. import static org.eclipse.jgit.storage.dht.ChunkInfo.OBJ_MIXED;
  51. import java.io.IOException;
  52. import java.io.InputStream;
  53. import java.security.MessageDigest;
  54. import java.text.MessageFormat;
  55. import java.util.ArrayList;
  56. import java.util.Arrays;
  57. import java.util.Collection;
  58. import java.util.Collections;
  59. import java.util.Comparator;
  60. import java.util.HashMap;
  61. import java.util.HashSet;
  62. import java.util.LinkedHashMap;
  63. import java.util.LinkedList;
  64. import java.util.List;
  65. import java.util.ListIterator;
  66. import java.util.Map;
  67. import java.util.Map.Entry;
  68. import java.util.Set;
  69. import java.util.concurrent.TimeoutException;
  70. import org.eclipse.jgit.generated.storage.dht.proto.GitStore;
  71. import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo;
  72. import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta;
  73. import org.eclipse.jgit.lib.AnyObjectId;
  74. import org.eclipse.jgit.lib.Constants;
  75. import org.eclipse.jgit.lib.MutableObjectId;
  76. import org.eclipse.jgit.lib.ObjectId;
  77. import org.eclipse.jgit.lib.ObjectIdSubclassMap;
  78. import org.eclipse.jgit.lib.ProgressMonitor;
  79. import org.eclipse.jgit.storage.dht.spi.Context;
  80. import org.eclipse.jgit.storage.dht.spi.Database;
  81. import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
  82. import org.eclipse.jgit.storage.file.PackLock;
  83. import org.eclipse.jgit.transport.PackParser;
  84. import org.eclipse.jgit.transport.PackedObjectInfo;
  85. import org.eclipse.jgit.treewalk.CanonicalTreeParser;
  86. import org.eclipse.jgit.util.LongList;
  87. import com.google.protobuf.ByteString;
  88. /** Parses the pack stream into chunks, and indexes the chunks for lookup. */
  89. public class DhtPackParser extends PackParser {
  90. private final DhtObjDatabase objdb;
  91. private final RepositoryKey repo;
  92. private final Database db;
  93. private final DhtInserterOptions options;
  94. private final MessageDigest chunkKeyDigest;
  95. /** Number of objects to write to the global index at once. */
  96. private final int linkBatchSize;
  97. private Boolean saveAsCachedPack;
  98. private WriteBuffer dbWriteBuffer;
  99. /** Chunk writers for the 4 major object types, keyed by object type code. */
  100. private ChunkFormatter[] openChunks;
  101. /** Edges for current chunks. */
  102. private Edges[] openEdges;
  103. /** Prior chunks that were written, keyed by object type code. */
  104. private List<ChunkKey>[] chunkByOrder;
  105. /** Information on chunks already written out. */
  106. private Map<ChunkKey, ChunkInfo> infoByKey;
  107. /** Information on chunks already written out. */
  108. private Map<ChunkKey, ChunkMeta> chunkMeta;
  109. /** ChunkMeta that needs to be written out again, as it was modified. */
  110. private Map<ChunkKey, ChunkMeta> dirtyMeta;
  111. private Map<ChunkKey, Edges> chunkEdges;
  112. // Correlated lists, sorted by object stream position.
  113. private LongList objStreamPos;
  114. private LongList objChunkPtrs;
  115. /** Formatter handling the current object's data stream. */
  116. private ChunkFormatter currChunk;
  117. /** Current type of the object, if known. */
  118. private int currType;
  119. /** Position of the current object in the chunks we create. */
  120. private long currChunkPtr;
  121. /** If using OFS_DELTA, location of the base object in chunk space. */
  122. private long currBasePtr;
  123. /** Starting byte of the object data (aka end of the object header). */
  124. private int currDataPos;
  125. /** Total number of bytes in the object representation. */
  126. private long currPackedSize;
  127. /** Total number of bytes in the entire inflated object. */
  128. private long currInflatedSize;
  129. /** If the current object is fragmented, the list of chunks holding it. */
  130. private List<ChunkKey> currFragments;
  131. /** Previously written chunk that is being re-read during delta resolution. */
  132. private PackChunk dbChunk;
  133. /** Current read position in {@link #dbChunk}. */
  134. private int dbPtr;
  135. /** Recent chunks that were written, or recently read. */
  136. private LinkedHashMap<ChunkKey, PackChunk> chunkReadBackCache;
  137. /** Objects parsed from the stream, sorted by SHA-1. */
  138. private List<DhtInfo> objectListByName;
  139. /** Objects parsed from the stream, sorted by chunk (aka offset). */
  140. private List<DhtInfo> objectListByChunk;
  141. /** Iterators to write {@link #objectListByName} into the global index. */
  142. private ListIterator<DhtInfo>[] linkIterators;
  143. /** If the pack stream was self-contained, the cached pack info record key. */
  144. private CachedPackKey cachedPackKey;
  145. private CanonicalTreeParser treeParser;
  146. private final MutableObjectId idBuffer;
  147. private ObjectIdSubclassMap<DhtInfo> objectMap;
  148. DhtPackParser(DhtObjDatabase objdb, InputStream in) {
  149. super(objdb, in);
  150. // Disable collision checking. DhtReader performs some magic to look
  151. // only at old objects, so a colliding replacement will be ignored until
  152. // its removed during garbage collection.
  153. //
  154. setCheckObjectCollisions(false);
  155. this.objdb = objdb;
  156. this.repo = objdb.getRepository().getRepositoryKey();
  157. this.db = objdb.getDatabase();
  158. this.options = objdb.getInserterOptions();
  159. this.chunkKeyDigest = Constants.newMessageDigest();
  160. dbWriteBuffer = db.newWriteBuffer();
  161. openChunks = new ChunkFormatter[5];
  162. openEdges = new Edges[5];
  163. chunkByOrder = newListArray(5);
  164. infoByKey = new HashMap<ChunkKey, ChunkInfo>();
  165. dirtyMeta = new HashMap<ChunkKey, ChunkMeta>();
  166. chunkMeta = new HashMap<ChunkKey, ChunkMeta>();
  167. chunkEdges = new HashMap<ChunkKey, Edges>();
  168. treeParser = new CanonicalTreeParser();
  169. idBuffer = new MutableObjectId();
  170. objectMap = new ObjectIdSubclassMap<DhtInfo>();
  171. final int max = options.getParserCacheSize();
  172. chunkReadBackCache = new LinkedHashMap<ChunkKey, PackChunk>(max, 0.75f, true) {
  173. private static final long serialVersionUID = 1L;
  174. @Override
  175. protected boolean removeEldestEntry(Entry<ChunkKey, PackChunk> e) {
  176. return max < size();
  177. }
  178. };
  179. // The typical WriteBuffer flushes at 512 KiB increments, and
  180. // the typical ObjectInfo record is around 180 bytes. Use these
  181. // figures to come up with a rough estimate for how many links
  182. // to construct in one region of the DHT before moving onto a
  183. // different region in order to increase parallelism on large
  184. // object imports.
  185. //
  186. linkBatchSize = 512 * 1024 / 180;
  187. }
  188. @SuppressWarnings("unchecked")
  189. private static <T> List<T>[] newListArray(int size) {
  190. return new List[size];
  191. }
  192. /** @return if true, the pack stream is marked as a cached pack. */
  193. public boolean isSaveAsCachedPack() {
  194. return saveAsCachedPack != null && saveAsCachedPack.booleanValue();
  195. }
  196. /**
  197. * Enable saving the pack stream as a cached pack.
  198. *
  199. * @param save
  200. * if true, the stream is saved.
  201. */
  202. public void setSaveAsCachedPack(boolean save) {
  203. saveAsCachedPack = Boolean.valueOf(save);
  204. }
  205. @Override
  206. public PackLock parse(ProgressMonitor receiving, ProgressMonitor resolving)
  207. throws IOException {
  208. boolean success = false;
  209. try {
  210. PackLock lock = super.parse(receiving, resolving);
  211. chunkReadBackCache = null;
  212. openChunks = null;
  213. openEdges = null;
  214. treeParser = null;
  215. final int objCnt = getObjectCount();
  216. if (objCnt == 0) {
  217. // If no objects were received, no chunks were created. Leaving
  218. // success to false and doing a rollback is a good way to make
  219. // sure this is true.
  220. //
  221. return lock;
  222. }
  223. createObjectLists();
  224. if (isSaveAsCachedPack())
  225. putCachedPack();
  226. computeChunkEdges();
  227. putChunkIndexes();
  228. putDirtyMeta();
  229. chunkMeta = null;
  230. chunkEdges = null;
  231. dirtyMeta = null;
  232. objectMap = null;
  233. objectListByChunk = null;
  234. dbWriteBuffer.flush();
  235. putGlobalIndex(resolving);
  236. dbWriteBuffer.flush();
  237. success = true;
  238. return lock;
  239. } finally {
  240. openChunks = null;
  241. openEdges = null;
  242. objStreamPos = null;
  243. objChunkPtrs = null;
  244. currChunk = null;
  245. currFragments = null;
  246. dbChunk = null;
  247. chunkReadBackCache = null;
  248. infoByKey = null;
  249. chunkMeta = null;
  250. chunkEdges = null;
  251. treeParser = null;
  252. if (!success)
  253. rollback();
  254. chunkByOrder = null;
  255. objectListByName = null;
  256. objectListByChunk = null;
  257. linkIterators = null;
  258. dbWriteBuffer = null;
  259. }
  260. }
  261. @SuppressWarnings("unchecked")
  262. private void createObjectLists() {
  263. List objs = getSortedObjectList(null /* by name */);
  264. objectListByName = objs;
  265. int cnt = objectListByName.size();
  266. DhtInfo[] copy = objectListByName.toArray(new DhtInfo[cnt]);
  267. Arrays.sort(copy, new Comparator<PackedObjectInfo>() {
  268. public int compare(PackedObjectInfo o1, PackedObjectInfo o2) {
  269. DhtInfo a = (DhtInfo) o1;
  270. DhtInfo b = (DhtInfo) o2;
  271. return Long.signum(a.chunkPtr - b.chunkPtr);
  272. }
  273. });
  274. objectListByChunk = Arrays.asList(copy);
  275. }
  276. private void putCachedPack() throws DhtException {
  277. CachedPackInfo.Builder info = CachedPackInfo.newBuilder();
  278. for (DhtInfo obj : objectMap) {
  279. if (!obj.isInPack())
  280. return;
  281. if (!obj.isReferenced())
  282. info.getTipListBuilder().addObjectName(obj.name());
  283. }
  284. MessageDigest version = Constants.newMessageDigest();
  285. addChunkList(info, version, chunkByOrder[OBJ_TAG]);
  286. addChunkList(info, version, chunkByOrder[OBJ_COMMIT]);
  287. addChunkList(info, version, chunkByOrder[OBJ_TREE]);
  288. addChunkList(info, version, chunkByOrder[OBJ_BLOB]);
  289. info.setName(computePackName().name());
  290. info.setVersion(ObjectId.fromRaw(version.digest()).name());
  291. cachedPackKey = CachedPackKey.fromInfo(info.build());
  292. for (List<ChunkKey> list : chunkByOrder) {
  293. if (list == null)
  294. continue;
  295. for (ChunkKey key : list) {
  296. ChunkInfo oldInfo = infoByKey.get(key);
  297. GitStore.ChunkInfo.Builder b =
  298. GitStore.ChunkInfo.newBuilder(oldInfo.getData());
  299. b.setCachedPackKey(cachedPackKey.asString());
  300. ChunkInfo newInfo = new ChunkInfo(key, b.build());
  301. infoByKey.put(key, newInfo);
  302. // A fragment was already put, and has to be re-put.
  303. // Non-fragments will put later and do not put now.
  304. if (newInfo.getData().getIsFragment())
  305. db.repository().put(repo, newInfo, dbWriteBuffer);
  306. }
  307. }
  308. db.repository().put(repo, info.build(), dbWriteBuffer);
  309. }
  310. private void addChunkList(CachedPackInfo.Builder info,
  311. MessageDigest version, List<ChunkKey> list) {
  312. if (list == null)
  313. return;
  314. long bytesTotal = info.getBytesTotal();
  315. long objectsTotal = info.getObjectsTotal();
  316. long objectsDelta = info.getObjectsDelta();
  317. byte[] buf = new byte[Constants.OBJECT_ID_LENGTH];
  318. for (ChunkKey key : list) {
  319. ChunkInfo chunkInfo = infoByKey.get(key);
  320. GitStore.ChunkInfo c = chunkInfo.getData();
  321. int len = c.getChunkSize() - ChunkFormatter.TRAILER_SIZE;
  322. bytesTotal += len;
  323. objectsTotal += c.getObjectCounts().getTotal();
  324. objectsDelta += c.getObjectCounts().getOfsDelta();
  325. objectsDelta += c.getObjectCounts().getRefDelta();
  326. info.getChunkListBuilder().addChunkKey(
  327. chunkInfo.getChunkKey().asString());
  328. chunkInfo.getChunkKey().getChunkHash().copyRawTo(buf, 0);
  329. version.update(buf);
  330. }
  331. info.setBytesTotal(bytesTotal);
  332. info.setObjectsTotal(objectsTotal);
  333. info.setObjectsDelta(objectsDelta);
  334. }
  335. private ObjectId computePackName() {
  336. byte[] buf = new byte[Constants.OBJECT_ID_LENGTH];
  337. MessageDigest md = Constants.newMessageDigest();
  338. for (DhtInfo otp : objectListByName) {
  339. otp.copyRawTo(buf, 0);
  340. md.update(buf);
  341. }
  342. return ObjectId.fromRaw(md.digest());
  343. }
  344. private void rollback() throws DhtException {
  345. try {
  346. dbWriteBuffer.abort();
  347. dbWriteBuffer = db.newWriteBuffer();
  348. if (cachedPackKey != null)
  349. db.repository().remove(repo, cachedPackKey, dbWriteBuffer);
  350. if (linkIterators != null) {
  351. boolean removed = true;
  352. while (removed) {
  353. removed = false;
  354. for (ListIterator<DhtInfo> itr : linkIterators) {
  355. int cnt = 0;
  356. while (itr.hasPrevious() && cnt < linkBatchSize) {
  357. DhtInfo oe = itr.previous();
  358. db.objectIndex().remove( //
  359. ObjectIndexKey.create(repo, oe), //
  360. chunkOf(oe.chunkPtr), //
  361. dbWriteBuffer);
  362. cnt++;
  363. }
  364. if (0 < cnt)
  365. removed = true;
  366. }
  367. }
  368. }
  369. deleteChunks(chunkByOrder[OBJ_COMMIT]);
  370. deleteChunks(chunkByOrder[OBJ_TREE]);
  371. deleteChunks(chunkByOrder[OBJ_BLOB]);
  372. deleteChunks(chunkByOrder[OBJ_TAG]);
  373. dbWriteBuffer.flush();
  374. } catch (Throwable err) {
  375. throw new DhtException(DhtText.get().packParserRollbackFailed, err);
  376. }
  377. }
  378. private void deleteChunks(List<ChunkKey> list) throws DhtException {
  379. if (list != null) {
  380. for (ChunkKey key : list) {
  381. db.chunk().remove(key, dbWriteBuffer);
  382. db.repository().remove(repo, key, dbWriteBuffer);
  383. }
  384. }
  385. }
  386. private void putGlobalIndex(ProgressMonitor pm) throws DhtException {
  387. int objcnt = objectListByName.size();
  388. pm.beginTask(DhtText.get().recordingObjects, objcnt);
  389. int segments = Math.max(1, Math.min(objcnt / linkBatchSize, 32));
  390. linkIterators = newListIteratorArray(segments);
  391. int objsPerSegment = objcnt / segments;
  392. int beginIdx = 0;
  393. for (int i = 0; i < segments - 1; i++) {
  394. int endIdx = Math.min(beginIdx + objsPerSegment, objcnt);
  395. linkIterators[i] = objectListByName.subList(beginIdx, endIdx)
  396. .listIterator();
  397. beginIdx = endIdx;
  398. }
  399. linkIterators[segments - 1] = objectListByName
  400. .subList(beginIdx, objcnt).listIterator();
  401. boolean inserted = true;
  402. while (inserted) {
  403. inserted = false;
  404. for (ListIterator<DhtInfo> itr : linkIterators) {
  405. int cnt = 0;
  406. while (itr.hasNext() && cnt < linkBatchSize) {
  407. DhtInfo oe = itr.next();
  408. db.objectIndex().add( //
  409. ObjectIndexKey.create(repo, oe), //
  410. oe.info(chunkOf(oe.chunkPtr)), //
  411. dbWriteBuffer);
  412. cnt++;
  413. }
  414. if (0 < cnt) {
  415. pm.update(cnt);
  416. inserted = true;
  417. }
  418. }
  419. }
  420. pm.endTask();
  421. }
  422. @SuppressWarnings("unchecked")
  423. private static ListIterator<DhtInfo>[] newListIteratorArray(int size) {
  424. return new ListIterator[size];
  425. }
  426. private void computeChunkEdges() throws DhtException {
  427. List<DhtInfo> objs = objectListByChunk;
  428. int beginIdx = 0;
  429. ChunkKey key = chunkOf(objs.get(0).chunkPtr);
  430. int type = typeOf(objs.get(0).chunkPtr);
  431. int objIdx = 1;
  432. for (; objIdx < objs.size(); objIdx++) {
  433. DhtInfo oe = objs.get(objIdx);
  434. ChunkKey oeKey = chunkOf(oe.chunkPtr);
  435. if (!key.equals(oeKey)) {
  436. computeEdges(objs.subList(beginIdx, objIdx), key, type);
  437. beginIdx = objIdx;
  438. key = oeKey;
  439. type = typeOf(oe.chunkPtr);
  440. }
  441. if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr))
  442. type = OBJ_MIXED;
  443. }
  444. computeEdges(objs.subList(beginIdx, objs.size()), key, type);
  445. }
  446. private void computeEdges(List<DhtInfo> objs, ChunkKey key, int type)
  447. throws DhtException {
  448. Edges edges = chunkEdges.get(key);
  449. if (edges == null)
  450. return;
  451. for (DhtInfo obj : objs)
  452. edges.remove(obj);
  453. switch (type) {
  454. case OBJ_COMMIT:
  455. edges.commitEdges = toChunkList(edges.commitIds);
  456. break;
  457. case OBJ_TREE:
  458. // TODO prefetch tree edges
  459. break;
  460. }
  461. edges.commitIds = null;
  462. }
  463. private List<ChunkKey> toChunkList(Set<DhtInfo> objects)
  464. throws DhtException {
  465. if (objects == null || objects.isEmpty())
  466. return null;
  467. Map<ChunkKey, ChunkOrderingEntry> map = new HashMap<ChunkKey, ChunkOrderingEntry>();
  468. for (DhtInfo obj : objects) {
  469. if (!obj.isInPack())
  470. continue;
  471. long chunkPtr = obj.chunkPtr;
  472. ChunkKey key = chunkOf(chunkPtr);
  473. ChunkOrderingEntry e = map.get(key);
  474. if (e == null) {
  475. e = new ChunkOrderingEntry();
  476. e.key = key;
  477. e.order = chunkIdx(chunkPtr);
  478. map.put(key, e);
  479. } else {
  480. e.order = Math.min(e.order, chunkIdx(chunkPtr));
  481. }
  482. }
  483. ChunkOrderingEntry[] tmp = map.values().toArray(
  484. new ChunkOrderingEntry[map.size()]);
  485. Arrays.sort(tmp);
  486. ChunkKey[] out = new ChunkKey[tmp.length];
  487. for (int i = 0; i < tmp.length; i++)
  488. out[i] = tmp[i].key;
  489. return Arrays.asList(out);
  490. }
  491. private static final class ChunkOrderingEntry implements
  492. Comparable<ChunkOrderingEntry> {
  493. ChunkKey key;
  494. int order;
  495. public int compareTo(ChunkOrderingEntry o) {
  496. return order - o.order;
  497. }
  498. }
  499. private void putChunkIndexes() throws DhtException {
  500. List<DhtInfo> objs = objectListByChunk;
  501. int sIdx = 0;
  502. DhtInfo oe = objs.get(0);
  503. oe.setOffset(offsetOf(oe.chunkPtr));
  504. ChunkKey key = chunkOf(oe.chunkPtr);
  505. int type = typeOf(oe.chunkPtr);
  506. int objIdx = 1;
  507. for (; objIdx < objs.size(); objIdx++) {
  508. oe = objs.get(objIdx);
  509. oe.setOffset(offsetOf(oe.chunkPtr));
  510. ChunkKey oeKey = chunkOf(oe.chunkPtr);
  511. if (!key.equals(oeKey)) {
  512. putChunkIndex(objs.subList(sIdx, objIdx), key, type);
  513. sIdx = objIdx;
  514. key = oeKey;
  515. type = typeOf(oe.chunkPtr);
  516. }
  517. if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr))
  518. type = OBJ_MIXED;
  519. }
  520. putChunkIndex(objs.subList(sIdx, objs.size()), key, type);
  521. }
  522. private void putChunkIndex(List<DhtInfo> objectList, ChunkKey key, int type)
  523. throws DhtException {
  524. ChunkInfo oldInfo = infoByKey.get(key);
  525. GitStore.ChunkInfo.Builder info
  526. = GitStore.ChunkInfo.newBuilder(oldInfo.getData());
  527. PackChunk.Members builder = new PackChunk.Members();
  528. builder.setChunkKey(key);
  529. byte[] index = ChunkIndex.create(objectList);
  530. info.setIndexSize(index.length);
  531. builder.setChunkIndex(index);
  532. ChunkMeta meta = dirtyMeta.remove(key);
  533. if (meta == null)
  534. meta = chunkMeta.get(key);
  535. switch (type) {
  536. case OBJ_COMMIT: {
  537. Edges edges = chunkEdges.get(key);
  538. List<ChunkKey> e = edges != null ? edges.commitEdges : null;
  539. List<ChunkKey> s = sequentialHint(key, OBJ_COMMIT);
  540. if (e == null)
  541. e = Collections.emptyList();
  542. if (s == null)
  543. s = Collections.emptyList();
  544. if (!e.isEmpty() || !s.isEmpty()) {
  545. ChunkMeta.Builder m = edit(meta);
  546. ChunkMeta.PrefetchHint.Builder h = m.getCommitPrefetchBuilder();
  547. for (ChunkKey k : e)
  548. h.addEdge(k.asString());
  549. for (ChunkKey k : s)
  550. h.addSequential(k.asString());
  551. meta = m.build();
  552. }
  553. break;
  554. }
  555. case OBJ_TREE: {
  556. List<ChunkKey> s = sequentialHint(key, OBJ_TREE);
  557. if (s == null)
  558. s = Collections.emptyList();
  559. if (!s.isEmpty()) {
  560. ChunkMeta.Builder m = edit(meta);
  561. ChunkMeta.PrefetchHint.Builder h = m.getTreePrefetchBuilder();
  562. for (ChunkKey k : s)
  563. h.addSequential(k.asString());
  564. meta = m.build();
  565. }
  566. break;
  567. }
  568. }
  569. if (meta != null) {
  570. info.setMetaSize(meta.getSerializedSize());
  571. builder.setMeta(meta);
  572. }
  573. ChunkInfo newInfo = new ChunkInfo(key, info.build());
  574. infoByKey.put(key, newInfo);
  575. db.repository().put(repo, newInfo, dbWriteBuffer);
  576. db.chunk().put(builder, dbWriteBuffer);
  577. }
  578. private static ChunkMeta.Builder edit(ChunkMeta meta) {
  579. if (meta != null)
  580. return ChunkMeta.newBuilder(meta);
  581. return ChunkMeta.newBuilder();
  582. }
  583. private List<ChunkKey> sequentialHint(ChunkKey key, int typeCode) {
  584. List<ChunkKey> all = chunkByOrder[typeCode];
  585. if (all == null)
  586. return null;
  587. int idx = all.indexOf(key);
  588. if (0 <= idx) {
  589. int max = options.getPrefetchDepth();
  590. int end = Math.min(idx + 1 + max, all.size());
  591. return all.subList(idx + 1, end);
  592. }
  593. return null;
  594. }
  595. private void putDirtyMeta() throws DhtException {
  596. for (Map.Entry<ChunkKey, ChunkMeta> meta : dirtyMeta.entrySet()) {
  597. PackChunk.Members builder = new PackChunk.Members();
  598. builder.setChunkKey(meta.getKey());
  599. builder.setMeta(meta.getValue());
  600. db.chunk().put(builder, dbWriteBuffer);
  601. }
  602. }
  603. @Override
  604. protected PackedObjectInfo newInfo(AnyObjectId id, UnresolvedDelta delta,
  605. ObjectId baseId) {
  606. DhtInfo obj = objectMap.addIfAbsent(new DhtInfo(id));
  607. if (delta != null) {
  608. DhtDelta d = (DhtDelta) delta;
  609. obj.chunkPtr = d.chunkPtr;
  610. obj.packedSize = d.packedSize;
  611. obj.inflatedSize = d.inflatedSize;
  612. obj.base = baseId;
  613. obj.setType(d.getType());
  614. if (d.isFragmented())
  615. obj.setFragmented();
  616. }
  617. return obj;
  618. }
  619. @Override
  620. protected void onPackHeader(long objCnt) throws IOException {
  621. if (Integer.MAX_VALUE < objCnt) {
  622. throw new DhtException(MessageFormat.format(
  623. DhtText.get().tooManyObjectsInPack, Long.valueOf(objCnt)));
  624. }
  625. objStreamPos = new LongList((int) objCnt);
  626. objChunkPtrs = new LongList((int) objCnt);
  627. if (saveAsCachedPack == null)
  628. setSaveAsCachedPack(1000 < objCnt);
  629. }
  630. @Override
  631. protected void onBeginWholeObject(long streamPosition, int type,
  632. long inflatedSize) throws IOException {
  633. ChunkFormatter w = begin(type);
  634. if (!w.whole(type, inflatedSize)) {
  635. endChunk(type);
  636. w = begin(type);
  637. if (!w.whole(type, inflatedSize))
  638. throw panicCannotInsert();
  639. }
  640. currType = type;
  641. currDataPos = w.position();
  642. currPackedSize = 0;
  643. currInflatedSize = inflatedSize;
  644. objStreamPos.add(streamPosition);
  645. }
  646. @Override
  647. protected void onEndWholeObject(PackedObjectInfo info) throws IOException {
  648. boolean fragmented = currFragments != null;
  649. endOneObject();
  650. DhtInfo oe = (DhtInfo) info;
  651. oe.chunkPtr = currChunkPtr;
  652. oe.packedSize = currPackedSize;
  653. oe.inflatedSize = currInflatedSize;
  654. oe.setType(currType);
  655. if (fragmented)
  656. oe.setFragmented();
  657. }
  658. private void endOneObject() throws DhtException {
  659. if (currFragments != null)
  660. endFragmentedObject();
  661. objChunkPtrs.add(currChunkPtr);
  662. }
  663. @Override
  664. protected void onBeginOfsDelta(long deltaPos, long basePos,
  665. long inflatedSize) throws IOException {
  666. long basePtr = objChunkPtrs.get(findStreamIndex(basePos));
  667. int type = typeOf(basePtr);
  668. currType = type;
  669. currPackedSize = 0;
  670. currInflatedSize = inflatedSize;
  671. currBasePtr = basePtr;
  672. objStreamPos.add(deltaPos);
  673. ChunkFormatter w = begin(type);
  674. if (isInCurrentChunk(basePtr)) {
  675. if (w.ofsDelta(inflatedSize, w.position() - offsetOf(basePtr))) {
  676. currDataPos = w.position();
  677. return;
  678. }
  679. endChunk(type);
  680. w = begin(type);
  681. }
  682. if (!longOfsDelta(w, inflatedSize, basePtr)) {
  683. endChunk(type);
  684. w = begin(type);
  685. if (!longOfsDelta(w, inflatedSize, basePtr))
  686. throw panicCannotInsert();
  687. }
  688. currDataPos = w.position();
  689. }
  690. @Override
  691. protected void onBeginRefDelta(long deltaPos, AnyObjectId baseId,
  692. long inflatedSize) throws IOException {
  693. // Try to get the base type, but only if it was seen before in this
  694. // pack stream. If not assume worst-case of BLOB type.
  695. //
  696. int typeCode;
  697. DhtInfo baseInfo = objectMap.get(baseId);
  698. if (baseInfo != null && baseInfo.isInPack()) {
  699. typeCode = baseInfo.getType();
  700. currType = typeCode;
  701. } else {
  702. typeCode = OBJ_BLOB;
  703. currType = -1;
  704. }
  705. ChunkFormatter w = begin(typeCode);
  706. if (!w.refDelta(inflatedSize, baseId)) {
  707. endChunk(typeCode);
  708. w = begin(typeCode);
  709. if (!w.refDelta(inflatedSize, baseId))
  710. throw panicCannotInsert();
  711. }
  712. currDataPos = w.position();
  713. currPackedSize = 0;
  714. currInflatedSize = inflatedSize;
  715. objStreamPos.add(deltaPos);
  716. }
  717. @Override
  718. protected DhtDelta onEndDelta() throws IOException {
  719. boolean fragmented = currFragments != null;
  720. endOneObject();
  721. DhtDelta delta = new DhtDelta();
  722. delta.chunkPtr = currChunkPtr;
  723. delta.packedSize = currPackedSize;
  724. delta.inflatedSize = currInflatedSize;
  725. if (0 < currType)
  726. delta.setType(currType);
  727. if (fragmented)
  728. delta.setFragmented();
  729. return delta;
  730. }
  731. @Override
  732. protected void onObjectData(Source src, byte[] raw, int pos, int len)
  733. throws IOException {
  734. if (src != Source.INPUT)
  735. return;
  736. if (currChunk.append(raw, pos, len)) {
  737. currPackedSize += len;
  738. return;
  739. }
  740. if (currFragments == null && currChunk.getObjectCount() == 1)
  741. currFragments = new LinkedList<ChunkKey>();
  742. if (currFragments != null) {
  743. appendToFragment(raw, pos, len);
  744. return;
  745. }
  746. // Everything between dataPos and dataEnd must be saved.
  747. //
  748. final int dataPos = currDataPos;
  749. final int dataEnd = currChunk.position();
  750. final int hdrPos = offsetOf(currChunkPtr);
  751. final int hdrLen = dataPos - hdrPos;
  752. final int type = typeOf(currChunkPtr);
  753. byte[] dataOld = currChunk.getRawChunkDataArray();
  754. final int typeOld = currChunk.getCurrentObjectType();
  755. currChunk.rollback();
  756. endChunk(type);
  757. final ChunkFormatter w = begin(type);
  758. switch (typeOld) {
  759. case OBJ_COMMIT:
  760. case OBJ_BLOB:
  761. case OBJ_TREE:
  762. case OBJ_TAG:
  763. case OBJ_REF_DELTA:
  764. w.adjustObjectCount(1, typeOld);
  765. if (!w.append(dataOld, hdrPos, hdrLen))
  766. throw panicCannotInsert();
  767. break;
  768. case OBJ_OFS_DELTA:
  769. if (!longOfsDelta(w, currInflatedSize, currBasePtr))
  770. throw panicCannotInsert();
  771. break;
  772. default:
  773. throw new DhtException("Internal programming error: " + typeOld);
  774. }
  775. currDataPos = w.position();
  776. if (dataPos < dataEnd && !w.append(dataOld, dataPos, dataEnd - dataPos))
  777. throw panicCannotInsert();
  778. dataOld = null;
  779. if (w.append(raw, pos, len)) {
  780. currPackedSize += len;
  781. } else {
  782. currFragments = new LinkedList<ChunkKey>();
  783. appendToFragment(raw, pos, len);
  784. }
  785. }
  786. private boolean longOfsDelta(ChunkFormatter w, long infSize, long basePtr) {
  787. final int type = typeOf(basePtr);
  788. final List<ChunkKey> infoList = chunkByOrder[type];
  789. final int baseIdx = chunkIdx(basePtr);
  790. final ChunkInfo baseInfo = infoByKey.get(infoList.get(baseIdx));
  791. // Go backwards to the start of the base's chunk.
  792. long relativeChunkStart = 0;
  793. for (int i = infoList.size() - 1; baseIdx <= i; i--) {
  794. GitStore.ChunkInfo info = infoByKey.get(infoList.get(i)).getData();
  795. int packSize = info.getChunkSize() - ChunkFormatter.TRAILER_SIZE;
  796. relativeChunkStart += packSize;
  797. }
  798. // Offset to the base goes back to start of our chunk, then start of
  799. // the base chunk, but slide forward the distance of the base within
  800. // its own chunk.
  801. //
  802. long ofs = w.position() + relativeChunkStart - offsetOf(basePtr);
  803. if (w.ofsDelta(infSize, ofs)) {
  804. w.useBaseChunk(relativeChunkStart, baseInfo.getChunkKey());
  805. return true;
  806. }
  807. return false;
  808. }
  809. private void appendToFragment(byte[] raw, int pos, int len)
  810. throws DhtException {
  811. while (0 < len) {
  812. if (currChunk.free() == 0) {
  813. int typeCode = typeOf(currChunkPtr);
  814. currChunk.setFragment();
  815. currFragments.add(endChunk(typeCode));
  816. currChunk = openChunk(typeCode);
  817. }
  818. int n = Math.min(len, currChunk.free());
  819. currChunk.append(raw, pos, n);
  820. currPackedSize += n;
  821. pos += n;
  822. len -= n;
  823. }
  824. }
  825. private void endFragmentedObject() throws DhtException {
  826. currChunk.setFragment();
  827. ChunkKey lastKey = endChunk(typeOf(currChunkPtr));
  828. if (lastKey != null)
  829. currFragments.add(lastKey);
  830. ChunkMeta.Builder protoBuilder = ChunkMeta.newBuilder();
  831. for (ChunkKey key : currFragments)
  832. protoBuilder.addFragment(key.asString());
  833. ChunkMeta protoMeta = protoBuilder.build();
  834. for (ChunkKey key : currFragments) {
  835. ChunkMeta oldMeta = chunkMeta.get(key);
  836. if (oldMeta != null) {
  837. ChunkMeta.Builder newMeta = ChunkMeta.newBuilder(oldMeta);
  838. newMeta.clearFragment();
  839. newMeta.mergeFrom(protoMeta);
  840. ChunkMeta meta = newMeta.build();
  841. dirtyMeta.put(key, meta);
  842. chunkMeta.put(key, meta);
  843. } else {
  844. dirtyMeta.put(key, protoMeta);
  845. chunkMeta.put(key, protoMeta);
  846. }
  847. }
  848. currFragments = null;
  849. }
  850. @Override
  851. protected void onInflatedObjectData(PackedObjectInfo obj, int typeCode,
  852. byte[] data) throws IOException {
  853. DhtInfo info = (DhtInfo) obj;
  854. info.inflatedSize = data.length;
  855. info.setType(typeCode);
  856. switch (typeCode) {
  857. case OBJ_COMMIT:
  858. onCommit(info, data);
  859. break;
  860. case OBJ_TREE:
  861. onTree(data);
  862. break;
  863. case OBJ_TAG:
  864. onTag(data);
  865. break;
  866. }
  867. }
  868. private void onCommit(DhtInfo obj, byte[] raw) throws DhtException {
  869. Edges edges = edges(obj.chunkPtr);
  870. edges.remove(obj);
  871. // TODO compute hints for trees.
  872. if (isSaveAsCachedPack()) {
  873. idBuffer.fromString(raw, 5);
  874. lookupByName(idBuffer).setReferenced();
  875. }
  876. int ptr = 46;
  877. while (raw[ptr] == 'p') {
  878. idBuffer.fromString(raw, ptr + 7);
  879. DhtInfo p = lookupByName(idBuffer);
  880. p.setReferenced();
  881. edges.commit(p);
  882. ptr += 48;
  883. }
  884. }
  885. private void onTree(byte[] data) {
  886. if (isSaveAsCachedPack()) {
  887. treeParser.reset(data);
  888. while (!treeParser.eof()) {
  889. idBuffer.fromRaw(treeParser.idBuffer(), treeParser.idOffset());
  890. lookupByName(idBuffer).setReferenced();
  891. treeParser.next();
  892. }
  893. }
  894. }
  895. private void onTag(byte[] data) {
  896. if (isSaveAsCachedPack()) {
  897. idBuffer.fromString(data, 7); // "object $sha1"
  898. lookupByName(idBuffer).setReferenced();
  899. }
  900. }
  901. private DhtInfo lookupByName(AnyObjectId obj) {
  902. DhtInfo info = objectMap.get(obj);
  903. if (info == null) {
  904. info = new DhtInfo(obj);
  905. objectMap.add(info);
  906. }
  907. return info;
  908. }
  909. private Edges edges(long chunkPtr) throws DhtException {
  910. if (isInCurrentChunk(chunkPtr)) {
  911. int type = typeOf(chunkPtr);
  912. Edges s = openEdges[type];
  913. if (s == null) {
  914. s = new Edges();
  915. openEdges[type] = s;
  916. }
  917. return s;
  918. } else {
  919. ChunkKey key = chunkOf(chunkPtr);
  920. Edges s = chunkEdges.get(key);
  921. if (s == null) {
  922. s = new Edges();
  923. chunkEdges.put(key, s);
  924. }
  925. return s;
  926. }
  927. }
  928. private static class Edges {
  929. Set<DhtInfo> commitIds;
  930. List<ChunkKey> commitEdges;
  931. void commit(DhtInfo id) {
  932. if (commitIds == null)
  933. commitIds = new HashSet<DhtInfo>();
  934. commitIds.add(id);
  935. }
  936. void remove(DhtInfo id) {
  937. if (commitIds != null)
  938. commitIds.remove(id);
  939. }
  940. }
  941. @Override
  942. protected ObjectTypeAndSize seekDatabase(PackedObjectInfo obj,
  943. ObjectTypeAndSize info) throws IOException {
  944. return seekDatabase(((DhtInfo) obj).chunkPtr, info);
  945. }
  946. @Override
  947. protected ObjectTypeAndSize seekDatabase(UnresolvedDelta delta,
  948. ObjectTypeAndSize info) throws IOException {
  949. return seekDatabase(((DhtDelta) delta).chunkPtr, info);
  950. }
  951. private ObjectTypeAndSize seekDatabase(long chunkPtr, ObjectTypeAndSize info)
  952. throws DhtException {
  953. seekChunk(chunkOf(chunkPtr), true);
  954. dbPtr = dbChunk.readObjectTypeAndSize(offsetOf(chunkPtr), info);
  955. return info;
  956. }
  957. @Override
  958. protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException {
  959. int n = dbChunk.read(dbPtr, dst, pos, cnt);
  960. if (0 < n) {
  961. dbPtr += n;
  962. return n;
  963. }
  964. // ChunkMeta for fragments is delayed writing, so it isn't available
  965. // on the chunk if the chunk was read-back from the database. Use
  966. // our copy of ChunkMeta instead of the PackChunk's copy.
  967. ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey());
  968. if (meta == null)
  969. return 0;
  970. ChunkKey next = ChunkMetaUtil.getNextFragment(meta, dbChunk.getChunkKey());
  971. if (next == null)
  972. return 0;
  973. seekChunk(next, false);
  974. n = dbChunk.read(0, dst, pos, cnt);
  975. dbPtr = n;
  976. return n;
  977. }
  978. private void seekChunk(ChunkKey key, boolean cache) throws DhtException,
  979. DhtTimeoutException {
  980. if (dbChunk == null || !dbChunk.getChunkKey().equals(key)) {
  981. dbChunk = chunkReadBackCache.get(key);
  982. if (dbChunk == null) {
  983. dbWriteBuffer.flush();
  984. Collection<PackChunk.Members> found;
  985. Context opt = Context.READ_REPAIR;
  986. Sync<Collection<PackChunk.Members>> sync = Sync.create();
  987. db.chunk().get(opt, Collections.singleton(key), sync);
  988. try {
  989. found = sync.get(objdb.getReaderOptions().getTimeout());
  990. } catch (InterruptedException e) {
  991. throw new DhtTimeoutException(e);
  992. } catch (TimeoutException e) {
  993. throw new DhtTimeoutException(e);
  994. }
  995. if (found.isEmpty()) {
  996. throw new DhtException(MessageFormat.format(
  997. DhtText.get().missingChunk, key));
  998. }
  999. dbChunk = found.iterator().next().build();
  1000. if (cache)
  1001. chunkReadBackCache.put(key, dbChunk);
  1002. }
  1003. }
  1004. }
  1005. @Override
  1006. protected boolean onAppendBase(int typeCode, byte[] data,
  1007. PackedObjectInfo info) throws IOException {
  1008. return false; // This implementation does not copy base objects.
  1009. }
  1010. @Override
  1011. protected void onEndThinPack() throws IOException {
  1012. // Do nothing, this event is not relevant.
  1013. }
  1014. @Override
  1015. protected void onPackFooter(byte[] hash) throws IOException {
  1016. // TODO Combine together fractional chunks to reduce overhead.
  1017. // Fractional chunks are common for single-commit pushes since
  1018. // they are broken out by object type.
  1019. // TODO Try to combine the chunk data and its index into a single
  1020. // put call for the last chunk of each type. This would break the
  1021. // read back we do in seekDatabase during delta resolution.
  1022. // If there are deltas to be resolved the pending chunks
  1023. // will need to be reloaded later. Ensure they are stored.
  1024. //
  1025. endChunk(OBJ_COMMIT);
  1026. endChunk(OBJ_TREE);
  1027. endChunk(OBJ_BLOB);
  1028. endChunk(OBJ_TAG);
  1029. // These are only necessary during initial parsing. Drop them now.
  1030. //
  1031. objStreamPos = null;
  1032. objChunkPtrs = null;
  1033. }
  1034. @Override
  1035. protected void onObjectHeader(Source src, byte[] raw, int pos, int len)
  1036. throws IOException {
  1037. // Do nothing, the original stream headers are not used.
  1038. }
  1039. @Override
  1040. protected void onStoreStream(byte[] raw, int pos, int len)
  1041. throws IOException {
  1042. // Do nothing, the stream is being sliced and cannot be stored as-is.
  1043. }
  1044. @Override
  1045. protected boolean checkCRC(int oldCRC) {
  1046. return true; // Don't bother to check CRCs, assume the chunk is OK.
  1047. }
  1048. private ChunkFormatter begin(int typeCode) throws DhtException {
  1049. ChunkFormatter w = openChunk(typeCode);
  1050. currChunk = w;
  1051. currChunkPtr = makeObjectPointer(w, typeCode);
  1052. return w;
  1053. }
  1054. private ChunkFormatter openChunk(int typeCode) throws DhtException {
  1055. if (typeCode == 0)
  1056. throw new DhtException("Invalid internal typeCode 0");
  1057. ChunkFormatter w = openChunks[typeCode];
  1058. if (w == null) {
  1059. w = new ChunkFormatter(repo, options);
  1060. w.setSource(GitStore.ChunkInfo.Source.RECEIVE);
  1061. w.setObjectType(typeCode);
  1062. openChunks[typeCode] = w;
  1063. }
  1064. return w;
  1065. }
  1066. private ChunkKey endChunk(int typeCode) throws DhtException {
  1067. ChunkFormatter w = openChunks[typeCode];
  1068. if (w == null)
  1069. return null;
  1070. openChunks[typeCode] = null;
  1071. currChunk = null;
  1072. if (w.isEmpty())
  1073. return null;
  1074. ChunkKey key = w.end(chunkKeyDigest);
  1075. ChunkInfo info = w.getChunkInfo();
  1076. if (chunkByOrder[typeCode] == null)
  1077. chunkByOrder[typeCode] = new ArrayList<ChunkKey>();
  1078. chunkByOrder[typeCode].add(key);
  1079. infoByKey.put(key, info);
  1080. if (w.getChunkMeta() != null)
  1081. chunkMeta.put(key, w.getChunkMeta());
  1082. Edges e = openEdges[typeCode];
  1083. if (e != null) {
  1084. chunkEdges.put(key, e);
  1085. openEdges[typeCode] = null;
  1086. }
  1087. if (currFragments == null)
  1088. chunkReadBackCache.put(key, w.getPackChunk());
  1089. w.unsafePut(db, dbWriteBuffer);
  1090. return key;
  1091. }
  1092. private int findStreamIndex(long streamPosition) throws DhtException {
  1093. int high = objStreamPos.size();
  1094. int low = 0;
  1095. do {
  1096. final int mid = (low + high) >>> 1;
  1097. final long pos = objStreamPos.get(mid);
  1098. if (streamPosition < pos)
  1099. high = mid;
  1100. else if (streamPosition == pos)
  1101. return mid;
  1102. else
  1103. low = mid + 1;
  1104. } while (low < high);
  1105. throw new DhtException(MessageFormat.format(
  1106. DhtText.get().noSavedTypeForBase, Long.valueOf(streamPosition)));
  1107. }
  1108. private long makeObjectPointer(ChunkFormatter w, int typeCode) {
  1109. List<ChunkKey> list = chunkByOrder[typeCode];
  1110. int idx = list == null ? 0 : list.size();
  1111. int ptr = w.position();
  1112. return (((long) typeCode) << 61) | (((long) idx) << 32) | ptr;
  1113. }
  1114. private static int typeOf(long objectPtr) {
  1115. return (int) (objectPtr >>> 61);
  1116. }
  1117. private static int chunkIdx(long objectPtr) {
  1118. return ((int) ((objectPtr << 3) >>> (32 + 3)));
  1119. }
  1120. private static int offsetOf(long objectPtr) {
  1121. return (int) objectPtr;
  1122. }
  1123. private boolean isInCurrentChunk(long objectPtr) {
  1124. List<ChunkKey> list = chunkByOrder[typeOf(objectPtr)];
  1125. if (list == null)
  1126. return chunkIdx(objectPtr) == 0;
  1127. return chunkIdx(objectPtr) == list.size();
  1128. }
  1129. private ChunkKey chunkOf(long objectPtr) throws DhtException {
  1130. List<ChunkKey> list = chunkByOrder[typeOf(objectPtr)];
  1131. int idx = chunkIdx(objectPtr);
  1132. if (list == null || list.size() <= idx) {
  1133. throw new DhtException(MessageFormat.format(
  1134. DhtText.get().packParserInvalidPointer, //
  1135. Constants.typeString(typeOf(objectPtr)), //
  1136. Integer.valueOf(idx), //
  1137. Integer.valueOf(offsetOf(objectPtr))));
  1138. }
  1139. return list.get(idx);
  1140. }
  1141. private static DhtException panicCannotInsert() {
  1142. // This exception should never happen.
  1143. return new DhtException(DhtText.get().cannotInsertObject);
  1144. }
  1145. static class DhtInfo extends PackedObjectInfo {
  1146. private static final int REFERENCED = 1 << 3;
  1147. static final int FRAGMENTED = 1 << 4;
  1148. long chunkPtr;
  1149. long packedSize;
  1150. long inflatedSize;
  1151. ObjectId base;
  1152. DhtInfo(AnyObjectId id) {
  1153. super(id);
  1154. }
  1155. boolean isInPack() {
  1156. return chunkPtr != 0;
  1157. }
  1158. boolean isReferenced() {
  1159. return (getCRC() & REFERENCED) != 0;
  1160. }
  1161. void setReferenced() {
  1162. setCRC(getCRC() | REFERENCED);
  1163. }
  1164. boolean isFragmented() {
  1165. return (getCRC() & FRAGMENTED) != 0;
  1166. }
  1167. void setFragmented() {
  1168. setCRC(getCRC() | FRAGMENTED);
  1169. }
  1170. int getType() {
  1171. return getCRC() & 7;
  1172. }
  1173. void setType(int type) {
  1174. setCRC((getCRC() & ~7) | type);
  1175. }
  1176. ObjectInfo info(ChunkKey chunkKey) {
  1177. GitStore.ObjectInfo.Builder b = GitStore.ObjectInfo.newBuilder();
  1178. b.setObjectType(GitStore.ObjectInfo.ObjectType.valueOf(getType()));
  1179. b.setOffset(offsetOf(chunkPtr));
  1180. b.setPackedSize(packedSize);
  1181. b.setInflatedSize(inflatedSize);
  1182. if (base != null) {
  1183. byte[] t = new byte[Constants.OBJECT_ID_LENGTH];
  1184. base.copyRawTo(t, 0);
  1185. b.setDeltaBase(ByteString.copyFrom(t));
  1186. }
  1187. if (isFragmented())
  1188. b.setIsFragmented(true);
  1189. return new ObjectInfo(chunkKey, b.build());
  1190. }
  1191. }
  1192. static class DhtDelta extends UnresolvedDelta {
  1193. long chunkPtr;
  1194. long packedSize;
  1195. long inflatedSize;
  1196. int getType() {
  1197. return getCRC() & 7;
  1198. }
  1199. void setType(int type) {
  1200. setCRC((getCRC() & ~7) | type);
  1201. }
  1202. boolean isFragmented() {
  1203. return (getCRC() & DhtInfo.FRAGMENTED) != 0;
  1204. }
  1205. void setFragmented() {
  1206. setCRC(getCRC() | DhtInfo.FRAGMENTED);
  1207. }
  1208. }
  1209. }