You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

ObjectWriter.java 7.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /*
  2. * Copyright (C) 2011, Google Inc.
  3. * and other copyright owners as documented in the project's IP log.
  4. *
  5. * This program and the accompanying materials are made available
  6. * under the terms of the Eclipse Distribution License v1.0 which
  7. * accompanies this distribution, is reproduced below, and is
  8. * available at http://www.eclipse.org/org/documents/edl-v10.php
  9. *
  10. * All rights reserved.
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * - Neither the name of the Eclipse Foundation, Inc. nor the
  25. * names of its contributors may be used to endorse or promote
  26. * products derived from this software without specific prior
  27. * written permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  30. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  31. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  32. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  34. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  35. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  36. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  37. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  38. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  40. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  41. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. */
  43. package org.eclipse.jgit.storage.dht;
  44. import java.util.Collections;
  45. import java.util.Comparator;
  46. import java.util.HashMap;
  47. import java.util.HashSet;
  48. import java.util.LinkedHashMap;
  49. import java.util.List;
  50. import java.util.Map;
  51. import java.util.Set;
  52. import java.util.concurrent.Semaphore;
  53. import java.util.concurrent.atomic.AtomicReference;
  54. import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta;
  55. import org.eclipse.jgit.storage.dht.spi.Context;
  56. import org.eclipse.jgit.util.BlockList;
  57. /**
  58. * Re-orders objects destined for a pack stream by chunk locality.
  59. * <p>
  60. * By re-ordering objects according to chunk locality, and then the original
  61. * order the PackWriter intended to use, objects can be copied quickly from
  62. * chunks, and each chunk is visited at most once. A {@link Prefetcher} for the
  63. * {@link DhtReader} is used to fetch chunks in the order they will be used,
  64. * improving throughput by reducing the number of round-trips required to the
  65. * storage system.
  66. */
  67. final class ObjectWriter {
  68. private final DhtReader ctx;
  69. private final Prefetcher prefetch;
  70. private final int batchSize;
  71. private final Semaphore metaBatches;
  72. private final AtomicReference<DhtException> metaError;
  73. private final LinkedHashMap<ChunkKey, Integer> allVisits;
  74. private final Map<ChunkKey, ChunkMeta> allMeta;
  75. private final Set<ChunkKey> metaMissing;
  76. private Set<ChunkKey> metaToRead;
  77. private int curVisit;
  78. ObjectWriter(DhtReader ctx, Prefetcher prefetch) {
  79. this.ctx = ctx;
  80. this.prefetch = prefetch;
  81. batchSize = ctx.getOptions().getObjectIndexBatchSize();
  82. metaBatches = new Semaphore(batchSize);
  83. metaError = new AtomicReference<DhtException>();
  84. allVisits = new LinkedHashMap<ChunkKey, Integer>();
  85. allMeta = new HashMap<ChunkKey, ChunkMeta>();
  86. metaMissing = new HashSet<ChunkKey>();
  87. metaToRead = new HashSet<ChunkKey>();
  88. curVisit = 1;
  89. }
  90. void plan(List<DhtObjectToPack> list) throws DhtException {
  91. try {
  92. for (DhtObjectToPack obj : list)
  93. visit(obj);
  94. if (!metaToRead.isEmpty())
  95. startBatch(Context.FAST_MISSING_OK);
  96. awaitPendingBatches();
  97. synchronized (metaMissing) {
  98. if (!metaMissing.isEmpty()) {
  99. metaBatches.release(batchSize);
  100. resolveMissing();
  101. awaitPendingBatches();
  102. }
  103. }
  104. } catch (InterruptedException err) {
  105. throw new DhtTimeoutException(err);
  106. }
  107. Iterable<ChunkKey> order;
  108. synchronized (allMeta) {
  109. if (allMeta.isEmpty()) {
  110. order = allVisits.keySet();
  111. } else {
  112. BlockList<ChunkKey> keys = new BlockList<ChunkKey>();
  113. for (ChunkKey key : allVisits.keySet()) {
  114. keys.add(key);
  115. ChunkMeta meta = allMeta.remove(key);
  116. if (meta != null) {
  117. for (int i = 1; i < meta.getFragmentCount(); i++)
  118. keys.add(ChunkKey.fromString(meta.getFragment(i)));
  119. }
  120. }
  121. order = keys;
  122. }
  123. }
  124. prefetch.push(order);
  125. Collections.sort(list, new Comparator<DhtObjectToPack>() {
  126. public int compare(DhtObjectToPack a, DhtObjectToPack b) {
  127. return a.visitOrder - b.visitOrder;
  128. }
  129. });
  130. }
  131. private void visit(DhtObjectToPack obj) throws InterruptedException,
  132. DhtTimeoutException {
  133. // Plan the visit to the delta base before the object. This
  134. // ensures the base is in the stream first, and OFS_DELTA can
  135. // be used for the delta.
  136. //
  137. DhtObjectToPack base = (DhtObjectToPack) obj.getDeltaBase();
  138. if (base != null && base.visitOrder == 0) {
  139. // Use the current visit, even if its wrong. This will
  140. // prevent infinite recursion when there is a cycle in the
  141. // delta chain. Cycles are broken during writing, not in
  142. // the earlier planning phases.
  143. //
  144. obj.visitOrder = curVisit;
  145. visit(base);
  146. }
  147. ChunkKey key = obj.chunk;
  148. if (key != null) {
  149. Integer i = allVisits.get(key);
  150. if (i == null) {
  151. i = Integer.valueOf(1 + allVisits.size());
  152. allVisits.put(key, i);
  153. }
  154. curVisit = i.intValue();
  155. }
  156. if (obj.isFragmented()) {
  157. metaToRead.add(key);
  158. if (metaToRead.size() == batchSize)
  159. startBatch(Context.FAST_MISSING_OK);
  160. }
  161. obj.visitOrder = curVisit;
  162. }
  163. private void resolveMissing() throws DhtTimeoutException,
  164. InterruptedException {
  165. metaToRead = new HashSet<ChunkKey>();
  166. for (ChunkKey key : metaMissing) {
  167. metaToRead.add(key);
  168. if (metaToRead.size() == batchSize)
  169. startBatch(Context.LOCAL);
  170. }
  171. if (!metaToRead.isEmpty())
  172. startBatch(Context.LOCAL);
  173. }
  174. private void startBatch(Context context) throws InterruptedException,
  175. DhtTimeoutException {
  176. Timeout to = ctx.getOptions().getTimeout();
  177. if (!metaBatches.tryAcquire(1, to.getTime(), to.getUnit()))
  178. throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta);
  179. Set<ChunkKey> keys = metaToRead;
  180. ctx.getDatabase().chunk().getMeta(
  181. context,
  182. keys,
  183. new MetaLoader(context, keys));
  184. metaToRead = new HashSet<ChunkKey>();
  185. }
  186. private void awaitPendingBatches() throws InterruptedException,
  187. DhtTimeoutException, DhtException {
  188. Timeout to = ctx.getOptions().getTimeout();
  189. if (!metaBatches.tryAcquire(batchSize, to.getTime(), to.getUnit()))
  190. throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta);
  191. if (metaError.get() != null)
  192. throw metaError.get();
  193. }
  194. private class MetaLoader implements AsyncCallback<Map<ChunkKey, ChunkMeta>> {
  195. private final Context context;
  196. private final Set<ChunkKey> keys;
  197. MetaLoader(Context context, Set<ChunkKey> keys) {
  198. this.context = context;
  199. this.keys = keys;
  200. }
  201. public void onSuccess(Map<ChunkKey, ChunkMeta> result) {
  202. try {
  203. synchronized (allMeta) {
  204. allMeta.putAll(result);
  205. keys.removeAll(result.keySet());
  206. }
  207. if (context == Context.FAST_MISSING_OK && !keys.isEmpty()) {
  208. synchronized (metaMissing) {
  209. metaMissing.addAll(keys);
  210. }
  211. }
  212. } finally {
  213. metaBatches.release(1);
  214. }
  215. }
  216. public void onFailure(DhtException error) {
  217. metaError.compareAndSet(null, error);
  218. metaBatches.release(1);
  219. }
  220. }
  221. }