summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--org.eclipse.jgit.storage.dht.test/.classpath7
-rw-r--r--org.eclipse.jgit.storage.dht.test/.gitignore2
-rw-r--r--org.eclipse.jgit.storage.dht.test/.project28
-rw-r--r--org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.resources.prefs6
-rw-r--r--org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.runtime.prefs3
-rw-r--r--org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.core.prefs320
-rw-r--r--org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.ui.prefs63
-rw-r--r--org.eclipse.jgit.storage.dht.test/META-INF/MANIFEST.MF19
-rw-r--r--org.eclipse.jgit.storage.dht.test/build.properties4
-rw-r--r--org.eclipse.jgit.storage.dht.test/org.eclipse.jgit.storage.dht--All-Tests.launch15
-rw-r--r--org.eclipse.jgit.storage.dht.test/plugin.properties2
-rw-r--r--org.eclipse.jgit.storage.dht.test/pom.xml101
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkIndexTest.java317
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkKeyTest.java88
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtPackParserTest.java343
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtRepositoryBuilderTest.java96
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/LargeNonDeltaObjectTest.java117
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ObjectIndexKeyTest.java76
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/RepositoryKeyTest.java59
-rw-r--r--org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/TimeoutTest.java75
-rw-r--r--org.eclipse.jgit.storage.dht/.classpath8
-rw-r--r--org.eclipse.jgit.storage.dht/.fbprefs125
-rw-r--r--org.eclipse.jgit.storage.dht/.gitignore2
-rw-r--r--org.eclipse.jgit.storage.dht/.project28
-rw-r--r--org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs3
-rw-r--r--org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs3
-rw-r--r--org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs334
-rw-r--r--org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs62
-rw-r--r--org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF24
-rw-r--r--org.eclipse.jgit.storage.dht/README89
-rw-r--r--org.eclipse.jgit.storage.dht/build.properties5
-rw-r--r--org.eclipse.jgit.storage.dht/plugin.properties2
-rw-r--r--org.eclipse.jgit.storage.dht/pom.xml155
-rw-r--r--org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties35
-rw-r--r--org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html1151
-rw-r--r--org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/git_store.proto264
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java74
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java264
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackInfo.java212
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java154
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCache.java436
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCacheConfig.java97
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java461
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java429
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java286
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java173
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMeta.java391
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java196
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java151
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java61
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java89
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java305
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java223
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java83
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java103
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java89
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java90
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java1380
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java747
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java294
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java443
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java72
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java199
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java166
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java236
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java91
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java85
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java83
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java158
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java127
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java255
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java257
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java186
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java803
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java434
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java296
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java215
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java91
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefData.java235
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java139
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java125
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java107
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java75
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java78
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java77
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java73
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java199
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java242
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/TinyProtobuf.java755
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java164
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java60
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java108
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java125
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java116
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java90
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java140
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java79
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java191
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java454
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java153
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java122
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java336
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java113
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java90
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java131
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java165
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java170
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java155
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java133
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java101
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java93
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java81
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java106
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java299
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java135
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java397
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java146
-rw-r--r--org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java95
-rw-r--r--org.eclipse.jgit/src/org/eclipse/jgit/lib/ObjectId.java2
-rw-r--r--org.eclipse.jgit/src/org/eclipse/jgit/storage/file/ObjectDirectoryPackParser.java11
-rw-r--r--org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackOutputStream.java2
-rw-r--r--org.eclipse.jgit/src/org/eclipse/jgit/transport/PackParser.java34
-rw-r--r--pom.xml5
123 files changed, 21719 insertions, 4 deletions
diff --git a/org.eclipse.jgit.storage.dht.test/.classpath b/org.eclipse.jgit.storage.dht.test/.classpath
new file mode 100644
index 0000000000..859bd118e8
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.classpath
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="tst"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/J2SE-1.5"/>
+ <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+ <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/org.eclipse.jgit.storage.dht.test/.gitignore b/org.eclipse.jgit.storage.dht.test/.gitignore
new file mode 100644
index 0000000000..934e0e06ff
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.gitignore
@@ -0,0 +1,2 @@
+/bin
+/target
diff --git a/org.eclipse.jgit.storage.dht.test/.project b/org.eclipse.jgit.storage.dht.test/.project
new file mode 100644
index 0000000000..0d462aec76
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>org.eclipse.jgit.storage.dht.test</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.ManifestBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.SchemaBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ <nature>org.eclipse.pde.PluginNature</nature>
+ </natures>
+</projectDescription>
diff --git a/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.resources.prefs b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000000..6a9621db1d
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,6 @@
+#Sat Dec 20 21:21:24 CET 2008
+eclipse.preferences.version=1
+encoding//tst-rsrc/org/eclipse/jgit/patch/testGetText_BothISO88591.patch=ISO-8859-1
+encoding//tst-rsrc/org/eclipse/jgit/patch/testGetText_Convert.patch=ISO-8859-1
+encoding//tst-rsrc/org/eclipse/jgit/patch/testGetText_DiffCc.patch=ISO-8859-1
+encoding/<project>=UTF-8
diff --git a/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.runtime.prefs b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.runtime.prefs
new file mode 100644
index 0000000000..9f733eeea7
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.core.runtime.prefs
@@ -0,0 +1,3 @@
+#Mon Mar 24 18:55:56 EDT 2008
+eclipse.preferences.version=1
+line.separator=\n
diff --git a/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.core.prefs b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000000..8bfa5f141a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,320 @@
+#Tue Feb 05 00:01:29 CET 2008
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5
+org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
+org.eclipse.jdt.core.compiler.compliance=1.5
+org.eclipse.jdt.core.compiler.debug.lineNumber=generate
+org.eclipse.jdt.core.compiler.debug.localVariable=generate
+org.eclipse.jdt.core.compiler.debug.sourceFile=generate
+org.eclipse.jdt.core.compiler.doc.comment.support=enabled
+org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=warning
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.autoboxing=warning
+org.eclipse.jdt.core.compiler.problem.deprecation=warning
+org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled
+org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled
+org.eclipse.jdt.core.compiler.problem.discouragedReference=warning
+org.eclipse.jdt.core.compiler.problem.emptyStatement=warning
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.problem.fallthroughCase=warning
+org.eclipse.jdt.core.compiler.problem.fieldHiding=warning
+org.eclipse.jdt.core.compiler.problem.finalParameterBound=warning
+org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=error
+org.eclipse.jdt.core.compiler.problem.forbiddenReference=error
+org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error
+org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=warning
+org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=warning
+org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=error
+org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private
+org.eclipse.jdt.core.compiler.problem.localVariableHiding=warning
+org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error
+org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore
+org.eclipse.jdt.core.compiler.problem.missingJavadocComments=ignore
+org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=public
+org.eclipse.jdt.core.compiler.problem.missingJavadocTags=error
+org.eclipse.jdt.core.compiler.problem.missingJavadocTagsOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.missingJavadocTagsVisibility=private
+org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotation=ignore
+org.eclipse.jdt.core.compiler.problem.missingSerialVersion=warning
+org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
+org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
+org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore
+org.eclipse.jdt.core.compiler.problem.nullReference=warning
+org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=warning
+org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore
+org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=error
+org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
+org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
+org.eclipse.jdt.core.compiler.problem.redundantNullCheck=warning
+org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled
+org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=error
+org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled
+org.eclipse.jdt.core.compiler.problem.typeParameterHiding=warning
+org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=warning
+org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=warning
+org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=warning
+org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=error
+org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore
+org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=error
+org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.unusedImport=error
+org.eclipse.jdt.core.compiler.problem.unusedLabel=error
+org.eclipse.jdt.core.compiler.problem.unusedLocal=error
+org.eclipse.jdt.core.compiler.problem.unusedParameter=warning
+org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
+org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
+org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
+org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
+org.eclipse.jdt.core.compiler.source=1.5
+org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_assignment=0
+org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
+org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
+org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
+org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
+org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
+org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_after_package=1
+org.eclipse.jdt.core.formatter.blank_lines_before_field=1
+org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
+org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
+org.eclipse.jdt.core.formatter.blank_lines_before_method=1
+org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
+org.eclipse.jdt.core.formatter.blank_lines_before_package=0
+org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
+org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines=false
+org.eclipse.jdt.core.formatter.comment.format_comments=true
+org.eclipse.jdt.core.formatter.comment.format_header=false
+org.eclipse.jdt.core.formatter.comment.format_html=true
+org.eclipse.jdt.core.formatter.comment.format_source_code=true
+org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
+org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
+org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
+org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
+org.eclipse.jdt.core.formatter.comment.line_length=80
+org.eclipse.jdt.core.formatter.compact_else_if=true
+org.eclipse.jdt.core.formatter.continuation_indentation=2
+org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
+org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
+org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_empty_lines=false
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
+org.eclipse.jdt.core.formatter.indentation.size=4
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
+org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.lineSplit=80
+org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
+org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
+org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
+org.eclipse.jdt.core.formatter.tabulation.char=tab
+org.eclipse.jdt.core.formatter.tabulation.size=4
+org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
diff --git a/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.ui.prefs b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000..df87aaa160
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,63 @@
+#Thu Aug 26 12:30:07 CDT 2010
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_JGit Format
+formatter_settings_version=11
+internal.default.compliance=default
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=java;javax;org;com;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8"?><templates/>
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=false
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=false
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=false
+sp_cleanup.always_use_this_for_non_static_method_access=false
+sp_cleanup.convert_to_enhanced_for_loop=false
+sp_cleanup.correct_indentation=false
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=true
+sp_cleanup.make_local_variable_final=false
+sp_cleanup.make_parameters_final=false
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=false
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=false
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=false
+sp_cleanup.remove_unnecessary_nls_tags=false
+sp_cleanup.remove_unused_imports=false
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_blocks=false
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_parentheses_in_expressions=false
+sp_cleanup.use_this_for_non_static_field_access=false
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=true
+sp_cleanup.use_this_for_non_static_method_access=false
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=true
diff --git a/org.eclipse.jgit.storage.dht.test/META-INF/MANIFEST.MF b/org.eclipse.jgit.storage.dht.test/META-INF/MANIFEST.MF
new file mode 100644
index 0000000000..de49a9ecc9
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/META-INF/MANIFEST.MF
@@ -0,0 +1,19 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: %plugin_name
+Bundle-SymbolicName: org.eclipse.jgit.storage.dht.test
+Bundle-Version: 1.0.0.qualifier
+Bundle-Localization: plugin
+Bundle-Vendor: %provider_name
+Bundle-ActivationPolicy: lazy
+Bundle-RequiredExecutionEnvironment: J2SE-1.5
+Import-Package: org.eclipse.jgit.lib;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.errors;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.dht;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.dht.spi.memory;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.file;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.pack;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.transport;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.util;version="[1.0.0,2.0.0)",
+ org.junit;version="[4.0.0,5.0.0)",
+ org.hamcrest.core;version="[1.1.0,2.0.0)"
diff --git a/org.eclipse.jgit.storage.dht.test/build.properties b/org.eclipse.jgit.storage.dht.test/build.properties
new file mode 100644
index 0000000000..32c717a7dc
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/build.properties
@@ -0,0 +1,4 @@
+source.. = tst/
+bin.includes = META-INF/,\
+ .,\
+ plugin.properties
diff --git a/org.eclipse.jgit.storage.dht.test/org.eclipse.jgit.storage.dht--All-Tests.launch b/org.eclipse.jgit.storage.dht.test/org.eclipse.jgit.storage.dht--All-Tests.launch
new file mode 100644
index 0000000000..039b441a33
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/org.eclipse.jgit.storage.dht--All-Tests.launch
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<launchConfiguration type="org.eclipse.jdt.junit.launchconfig">
+<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
+<listEntry value="/org.eclipse.jgit.storage.dht.test"/>
+</listAttribute>
+<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
+<listEntry value="4"/>
+</listAttribute>
+<stringAttribute key="org.eclipse.jdt.junit.CONTAINER" value="=org.eclipse.jgit.storage.dht.test"/>
+<booleanAttribute key="org.eclipse.jdt.junit.KEEPRUNNING_ATTR" value="false"/>
+<stringAttribute key="org.eclipse.jdt.junit.TESTNAME" value=""/>
+<stringAttribute key="org.eclipse.jdt.junit.TEST_KIND" value="org.eclipse.jdt.junit.loader.junit4"/>
+<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value=""/>
+<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="org.eclipse.jgit.storage.dht.test"/>
+</launchConfiguration>
diff --git a/org.eclipse.jgit.storage.dht.test/plugin.properties b/org.eclipse.jgit.storage.dht.test/plugin.properties
new file mode 100644
index 0000000000..99eb7237ce
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/plugin.properties
@@ -0,0 +1,2 @@
+plugin_name=JGit DHT Storage Tests (Incubation)
+provider_name=Eclipse.org
diff --git a/org.eclipse.jgit.storage.dht.test/pom.xml b/org.eclipse.jgit.storage.dht.test/pom.xml
new file mode 100644
index 0000000000..19a3b05de2
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/pom.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (C) 2011, Google Inc.
+ and other copyright owners as documented in the project's IP log.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Distribution License v1.0 which
+ accompanies this distribution, is reproduced below, and is
+ available at http://www.eclipse.org/org/documents/edl-v10.php
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, are permitted provided that the following
+ conditions are met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ - Neither the name of the Eclipse Foundation, Inc. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit-parent</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>org.eclipse.jgit.storage.dht.test</artifactId>
+ <name>JGit - DHT Storage Tests</name>
+
+ <description>
+ JUnit tests for Git repository storage on a distributed hashtable
+ </description>
+
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit.junit</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit.storage.dht</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <testSourceDirectory>tst/</testSourceDirectory>
+
+ <plugins>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <argLine>-Xmx256m -Dfile.encoding=UTF-8</argLine>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkIndexTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkIndexTest.java
new file mode 100644
index 0000000000..a5524edcec
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkIndexTest.java
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.eclipse.jgit.lib.MutableObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.transport.PackedObjectInfo;
+import org.eclipse.jgit.util.NB;
+import org.junit.Test;
+
+public class ChunkIndexTest {
+ @Test
+ public void testSingleObject_NotFound() throws DhtException {
+ List<PackedObjectInfo> objs = list(object(1, 1));
+ ChunkIndex idx = index(objs);
+ assertEquals(-1, idx.findOffset(ObjectId.zeroId()));
+ }
+
+ @Test
+ public void testSingleObject_Offset1() throws DhtException {
+ assertEquals(header(0, 1), header(list(object(1, 0))));
+
+ List<PackedObjectInfo> objs = list(object(0x1200, 255));
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(0, 1), header(objs));
+ assertEquals(1, idx.getObjectCount());
+ assertEquals(2 + 20 + 1, idx.getIndexSize());
+
+ assertEquals(objs.get(0), idx.getObjectId(0));
+ assertEquals(objs.get(0).getOffset(), idx.getOffset(0));
+ assertEquals(objs.get(0).getOffset(), idx.findOffset(objs.get(0)));
+ }
+
+ @Test
+ public void testSingleObject_Offset2() throws DhtException {
+ assertEquals(header(0, 2), header(list(object(1, 1 << 8))));
+
+ List<PackedObjectInfo> objs = list(object(0x1200, 0xab34));
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(0, 2), header(objs));
+ assertEquals(1, idx.getObjectCount());
+ assertEquals(2 + 20 + 2, idx.getIndexSize());
+
+ assertEquals(objs.get(0), idx.getObjectId(0));
+ assertEquals(objs.get(0).getOffset(), idx.getOffset(0));
+ assertEquals(objs.get(0).getOffset(), idx.findOffset(objs.get(0)));
+ }
+
+ @Test
+ public void testSingleObject_Offset3() throws DhtException {
+ assertEquals(header(0, 3), header(list(object(1, 1 << 16))));
+
+ List<PackedObjectInfo> objs = list(object(0x1200, 0xab1234));
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(0, 3), header(objs));
+ assertEquals(1, idx.getObjectCount());
+ assertEquals(2 + 20 + 3, idx.getIndexSize());
+
+ assertEquals(objs.get(0), idx.getObjectId(0));
+ assertEquals(objs.get(0).getOffset(), idx.getOffset(0));
+ assertEquals(objs.get(0).getOffset(), idx.findOffset(objs.get(0)));
+ }
+
+ @Test
+ public void testSingleObject_Offset4() throws DhtException {
+ assertEquals(header(0, 4), header(list(object(1, 1 << 24))));
+
+ List<PackedObjectInfo> objs = list(object(0x1200, 0x7bcdef42));
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(0, 4), header(objs));
+ assertEquals(1, idx.getObjectCount());
+ assertEquals(objs.get(0), idx.getObjectId(0));
+
+ assertEquals(2 + 20 + 4, idx.getIndexSize());
+ assertEquals(objs.get(0).getOffset(), idx.getOffset(0));
+ assertEquals(objs.get(0).getOffset(), idx.findOffset(objs.get(0)));
+ }
+
+ @Test
+ public void testObjects3() throws DhtException {
+ List<PackedObjectInfo> objs = objects(2, 3, 1);
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(0, 1), header(objs));
+ assertEquals(3, idx.getObjectCount());
+ assertEquals(2 + 3 * 20 + 3 * 1, idx.getIndexSize());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ @Test
+ public void testObjects255_SameBucket() throws DhtException {
+ int[] ints = new int[255];
+ for (int i = 0; i < 255; i++)
+ ints[i] = i;
+ List<PackedObjectInfo> objs = objects(ints);
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(1, 2), header(objs));
+ assertEquals(255, idx.getObjectCount());
+ assertEquals(2 + 256 + 255 * 20 + 255 * 2 //
+ + 12 + 4 * 256, idx.getIndexSize());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ @Test
+ public void testObjects512_ManyBuckets() throws DhtException {
+ int[] ints = new int[512];
+ for (int i = 0; i < 256; i++) {
+ ints[i] = (i << 8) | 0;
+ ints[i + 256] = (i << 8) | 1;
+ }
+ List<PackedObjectInfo> objs = objects(ints);
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(1, 2), header(objs));
+ assertEquals(512, idx.getObjectCount());
+ assertEquals(2 + 256 + 512 * 20 + 512 * 2 //
+ + 12 + 4 * 256, idx.getIndexSize());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ @Test
+ public void testFanout2() throws DhtException {
+ List<PackedObjectInfo> objs = new ArrayList<PackedObjectInfo>(65280);
+ MutableObjectId idBuf = new MutableObjectId();
+ for (int i = 0; i < 256; i++) {
+ idBuf.setByte(2, i & 0xff);
+ for (int j = 0; j < 255; j++) {
+ idBuf.setByte(3, j & 0xff);
+ PackedObjectInfo oe = new PackedObjectInfo(idBuf);
+ oe.setOffset((i << 8) | j);
+ objs.add(oe);
+ }
+ }
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(2, 2), header(objs));
+ assertEquals(256 * 255, idx.getObjectCount());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ @Test
+ public void testFanout3() throws DhtException {
+ List<PackedObjectInfo> objs = new ArrayList<PackedObjectInfo>(1 << 16);
+ MutableObjectId idBuf = new MutableObjectId();
+ for (int i = 0; i < 256; i++) {
+ idBuf.setByte(2, i & 0xff);
+ for (int j = 0; j < 256; j++) {
+ idBuf.setByte(3, j & 0xff);
+ PackedObjectInfo oe = new PackedObjectInfo(idBuf);
+ oe.setOffset((i << 8) | j);
+ objs.add(oe);
+ }
+ }
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(3, 2), header(objs));
+ assertEquals(256 * 256, idx.getObjectCount());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ @Test
+ public void testObjects65280_ManyBuckets() throws DhtException {
+ List<PackedObjectInfo> objs = new ArrayList<PackedObjectInfo>(65280);
+ MutableObjectId idBuf = new MutableObjectId();
+ for (int i = 0; i < 256; i++) {
+ idBuf.setByte(0, i & 0xff);
+ for (int j = 0; j < 255; j++) {
+ idBuf.setByte(3, j & 0xff);
+ PackedObjectInfo oe = new PackedObjectInfo(idBuf);
+ oe.setOffset((i << 8) | j);
+ objs.add(oe);
+ }
+ }
+ ChunkIndex idx = index(objs);
+
+ assertEquals(header(1, 2), header(objs));
+ assertEquals(65280, idx.getObjectCount());
+ assertTrue(isSorted(objs));
+
+ for (int i = 0; i < objs.size(); i++) {
+ assertEquals(objs.get(i), idx.getObjectId(i));
+ assertEquals(objs.get(i).getOffset(), idx.getOffset(i));
+ assertEquals(objs.get(i).getOffset(), idx.findOffset(objs.get(i)));
+ }
+ }
+
+ private boolean isSorted(List<PackedObjectInfo> objs) {
+ PackedObjectInfo last = objs.get(0);
+ for (int i = 1; i < objs.size(); i++) {
+ PackedObjectInfo oe = objs.get(i);
+ if (oe.compareTo(last) <= 0)
+ return false;
+ }
+ return true;
+ }
+
+ private List<PackedObjectInfo> list(PackedObjectInfo... all) {
+ List<PackedObjectInfo> objs = new ArrayList<PackedObjectInfo>();
+ for (PackedObjectInfo o : all)
+ objs.add(o);
+ return objs;
+ }
+
+ private int header(int fanoutTable, int offsetTable) {
+ return (0x01 << 8) | (fanoutTable << 3) | offsetTable;
+ }
+
+ private int header(List<PackedObjectInfo> objs) {
+ byte[] index = ChunkIndex.create(objs);
+ return NB.decodeUInt16(index, 0);
+ }
+
+ private ChunkIndex index(List<PackedObjectInfo> objs) throws DhtException {
+ ChunkKey key = null;
+ byte[] index = ChunkIndex.create(objs);
+ return ChunkIndex.fromBytes(key, index, 0, index.length);
+ }
+
+ private List<PackedObjectInfo> objects(int... values) {
+ List<PackedObjectInfo> objs = new ArrayList<PackedObjectInfo>();
+ for (int i = 0; i < values.length; i++)
+ objs.add(object(values[i], i * 10));
+ return objs;
+ }
+
+ private PackedObjectInfo object(int id, int off) {
+ MutableObjectId idBuf = new MutableObjectId();
+ idBuf.setByte(0, (id >>> 8) & 0xff);
+ idBuf.setByte(1, id & 0xff);
+
+ PackedObjectInfo obj = new PackedObjectInfo(idBuf);
+ obj.setOffset(off);
+ return obj;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkKeyTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkKeyTest.java
new file mode 100644
index 0000000000..63cbf520c0
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ChunkKeyTest.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.*;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.junit.Test;
+
+public class ChunkKeyTest {
+ @Test
+ public void testKey() {
+ RepositoryKey repo1 = RepositoryKey.fromInt(0x41234567);
+ RepositoryKey repo2 = RepositoryKey.fromInt(2);
+ ObjectId id = ObjectId
+ .fromString("3e64b928d51b3a28e89cfe2a3f0eeae35ef07839");
+
+ ChunkKey key1 = ChunkKey.create(repo1, id);
+ assertEquals(repo1.asInt(), key1.getRepositoryId());
+ assertEquals(id, key1.getChunkHash());
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key1.asString());
+
+ ChunkKey key2 = ChunkKey.fromBytes(key1.asBytes());
+ assertEquals(repo1.asInt(), key2.getRepositoryId());
+ assertEquals(id, key2.getChunkHash());
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key2.asString());
+
+ ChunkKey key3 = ChunkKey.fromString(key1.asString());
+ assertEquals(repo1.asInt(), key3.getRepositoryId());
+ assertEquals(id, key3.getChunkHash());
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key3.asString());
+
+ assertEquals(key1, key2);
+ assertEquals(key2, key3);
+
+ ChunkKey key4 = ChunkKey.create(repo2, id);
+ assertFalse("not equal", key2.equals(key4));
+
+ ObjectId id2 = ObjectId
+ .fromString("3e64b928d51b3a28e89cfe2a3f0eeae35ef07840");
+ ChunkKey key5 = ChunkKey.create(repo1, id2);
+ assertFalse("not equal", key2.equals(key5));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtPackParserTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtPackParserTest.java
new file mode 100644
index 0000000000..6bb09a7c4e
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtPackParserTest.java
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
+import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
+import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
+import static org.eclipse.jgit.lib.Constants.PACK_SIGNATURE;
+import static org.eclipse.jgit.lib.Constants.newMessageDigest;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.zip.Deflater;
+
+import org.eclipse.jgit.lib.NullProgressMonitor;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.eclipse.jgit.storage.dht.spi.memory.MemoryDatabase;
+import org.eclipse.jgit.storage.file.PackLock;
+import org.eclipse.jgit.storage.pack.DeltaEncoder;
+import org.eclipse.jgit.util.NB;
+import org.eclipse.jgit.util.TemporaryBuffer;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DhtPackParserTest {
+ private MemoryDatabase db;
+
+ @Before
+ public void setUpDatabase() {
+ db = new MemoryDatabase();
+ }
+
+ @Test
+ public void testParse() throws IOException {
+ DhtRepository repo = db.open("test.git");
+ repo.create(true);
+
+ ObjectInserter.Formatter fmt = new ObjectInserter.Formatter();
+ byte[] data0 = new byte[512];
+ Arrays.fill(data0, (byte) 0xf3);
+ ObjectId id0 = fmt.idFor(OBJ_BLOB, data0);
+
+ TemporaryBuffer.Heap pack = new TemporaryBuffer.Heap(64 * 1024);
+ packHeader(pack, 4);
+ objectHeader(pack, OBJ_BLOB, data0.length);
+ deflate(pack, data0);
+
+ byte[] data1 = clone(0x01, data0);
+ byte[] delta1 = delta(data0, data1);
+ ObjectId id1 = fmt.idFor(OBJ_BLOB, data1);
+ objectHeader(pack, OBJ_REF_DELTA, delta1.length);
+ id0.copyRawTo(pack);
+ deflate(pack, delta1);
+
+ byte[] data2 = clone(0x02, data1);
+ byte[] delta2 = delta(data1, data2);
+ ObjectId id2 = fmt.idFor(OBJ_BLOB, data2);
+ objectHeader(pack, OBJ_REF_DELTA, delta2.length);
+ id1.copyRawTo(pack);
+ deflate(pack, delta2);
+
+ byte[] data3 = clone(0x03, data2);
+ byte[] delta3 = delta(data2, data3);
+ ObjectId id3 = fmt.idFor(OBJ_BLOB, data3);
+ objectHeader(pack, OBJ_REF_DELTA, delta3.length);
+ id2.copyRawTo(pack);
+ deflate(pack, delta3);
+ digest(pack);
+
+ ObjectInserter ins = repo.newObjectInserter();
+ try {
+ InputStream is = new ByteArrayInputStream(pack.toByteArray());
+ DhtPackParser p = (DhtPackParser) ins.newPackParser(is);
+ PackLock lock = p.parse(NullProgressMonitor.INSTANCE);
+ assertNull(lock);
+ } finally {
+ ins.release();
+ }
+
+ ObjectReader ctx = repo.newObjectReader();
+ try {
+ assertTrue(ctx.has(id0, OBJ_BLOB));
+ assertTrue(ctx.has(id1, OBJ_BLOB));
+ assertTrue(ctx.has(id2, OBJ_BLOB));
+ assertTrue(ctx.has(id3, OBJ_BLOB));
+ } finally {
+ ctx.release();
+ }
+ }
+
+ @Test
+ public void testLargeFragmentWithRefDelta() throws IOException {
+ DhtInserterOptions insOpt = new DhtInserterOptions().setChunkSize(256);
+ @SuppressWarnings("unchecked")
+ DhtRepository repo = (DhtRepository) new DhtRepositoryBuilder<DhtRepositoryBuilder, DhtRepository, MemoryDatabase>()
+ .setInserterOptions(insOpt).setDatabase(db) //
+ .setRepositoryName("test.git") //
+ .setMustExist(false) //
+ .build();
+ repo.create(true);
+
+ ObjectInserter.Formatter fmt = new ObjectInserter.Formatter();
+ TemporaryBuffer.Heap pack = new TemporaryBuffer.Heap(64 * 1024);
+ packHeader(pack, 3);
+
+ byte[] data3 = new byte[4];
+ Arrays.fill(data3, (byte) 0xf3);
+ ObjectId id3 = fmt.idFor(OBJ_BLOB, data3);
+ objectHeader(pack, OBJ_BLOB, data3.length);
+ deflate(pack, data3);
+
+ byte[] data0 = newArray(insOpt.getChunkSize() * 2);
+ ObjectId id0 = fmt.idFor(OBJ_BLOB, data0);
+ objectHeader(pack, OBJ_BLOB, data0.length);
+ store(pack, data0);
+ assertTrue(pack.length() > insOpt.getChunkSize());
+
+ byte[] data1 = clone(1, data0);
+ ObjectId id1 = fmt.idFor(OBJ_BLOB, data1);
+ byte[] delta1 = delta(data0, data1);
+ objectHeader(pack, OBJ_REF_DELTA, delta1.length);
+ id0.copyRawTo(pack);
+ deflate(pack, delta1);
+
+ digest(pack);
+
+ ObjectInserter ins = repo.newObjectInserter();
+ try {
+ InputStream is = new ByteArrayInputStream(pack.toByteArray());
+ DhtPackParser p = (DhtPackParser) ins.newPackParser(is);
+ PackLock lock = p.parse(NullProgressMonitor.INSTANCE);
+ assertNull(lock);
+ } finally {
+ ins.release();
+ }
+
+ ObjectReader ctx = repo.newObjectReader();
+ try {
+ assertTrue(ctx.has(id0, OBJ_BLOB));
+ assertTrue(ctx.has(id1, OBJ_BLOB));
+ assertTrue(ctx.has(id3, OBJ_BLOB));
+ } finally {
+ ctx.release();
+ }
+ }
+
+ @Test
+ public void testLargeFragmentWithOfsDelta() throws IOException {
+ DhtInserterOptions insOpt = new DhtInserterOptions().setChunkSize(256);
+ @SuppressWarnings("unchecked")
+ DhtRepository repo = (DhtRepository) new DhtRepositoryBuilder<DhtRepositoryBuilder, DhtRepository, MemoryDatabase>()
+ .setInserterOptions(insOpt).setDatabase(db) //
+ .setRepositoryName("test.git") //
+ .setMustExist(false) //
+ .build();
+ repo.create(true);
+
+ ObjectInserter.Formatter fmt = new ObjectInserter.Formatter();
+ TemporaryBuffer.Heap pack = new TemporaryBuffer.Heap(64 * 1024);
+ packHeader(pack, 3);
+
+ byte[] data3 = new byte[4];
+ Arrays.fill(data3, (byte) 0xf3);
+ ObjectId id3 = fmt.idFor(OBJ_BLOB, data3);
+ objectHeader(pack, OBJ_BLOB, data3.length);
+ deflate(pack, data3);
+
+ byte[] data0 = newArray(insOpt.getChunkSize() * 2);
+ ObjectId id0 = fmt.idFor(OBJ_BLOB, data0);
+ long pos0 = pack.length();
+ objectHeader(pack, OBJ_BLOB, data0.length);
+ store(pack, data0);
+ assertTrue(pack.length() > insOpt.getChunkSize());
+
+ byte[] data1 = clone(1, data0);
+ ObjectId id1 = fmt.idFor(OBJ_BLOB, data1);
+ byte[] delta1 = delta(data0, data1);
+ long pos1 = pack.length();
+ objectHeader(pack, OBJ_OFS_DELTA, delta1.length);
+ writeOffset(pack, pos1 - pos0);
+ deflate(pack, delta1);
+
+ digest(pack);
+
+ ObjectInserter ins = repo.newObjectInserter();
+ try {
+ InputStream is = new ByteArrayInputStream(pack.toByteArray());
+ DhtPackParser p = (DhtPackParser) ins.newPackParser(is);
+ PackLock lock = p.parse(NullProgressMonitor.INSTANCE);
+ assertNull(lock);
+ } finally {
+ ins.release();
+ }
+
+ ObjectReader ctx = repo.newObjectReader();
+ try {
+ assertTrue(ctx.has(id0, OBJ_BLOB));
+ assertTrue(ctx.has(id1, OBJ_BLOB));
+ assertTrue(ctx.has(id3, OBJ_BLOB));
+ } finally {
+ ctx.release();
+ }
+ }
+
+ private byte[] newArray(int size) {
+ byte[] r = new byte[size];
+ for (int i = 0; i < r.length; i++)
+ r[i] = (byte) (42 + i);
+ return r;
+ }
+
+ private byte[] clone(int first, byte[] base) {
+ byte[] r = new byte[base.length];
+ System.arraycopy(base, 1, r, 1, r.length - 1);
+ r[0] = (byte) first;
+ return r;
+ }
+
+ private byte[] delta(byte[] base, byte[] dest) throws IOException {
+ ByteArrayOutputStream tmp = new ByteArrayOutputStream();
+ DeltaEncoder de = new DeltaEncoder(tmp, base.length, dest.length);
+ de.insert(dest, 0, 1);
+ de.copy(1, base.length - 1);
+ return tmp.toByteArray();
+ }
+
+ private void packHeader(TemporaryBuffer.Heap pack, int cnt)
+ throws IOException {
+ final byte[] hdr = new byte[8];
+ NB.encodeInt32(hdr, 0, 2);
+ NB.encodeInt32(hdr, 4, cnt);
+ pack.write(PACK_SIGNATURE);
+ pack.write(hdr, 0, 8);
+ }
+
+ private void objectHeader(TemporaryBuffer.Heap pack, int type, int sz)
+ throws IOException {
+ byte[] buf = new byte[8];
+ int nextLength = sz >>> 4;
+ buf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (sz & 0x0F));
+ sz = nextLength;
+ int n = 1;
+ while (sz > 0) {
+ nextLength >>>= 7;
+ buf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (sz & 0x7F));
+ sz = nextLength;
+ }
+ pack.write(buf, 0, n);
+ }
+
+ private void writeOffset(TemporaryBuffer.Heap pack, long offsetDiff)
+ throws IOException {
+ byte[] headerBuffer = new byte[32];
+ int n = headerBuffer.length - 1;
+ headerBuffer[n] = (byte) (offsetDiff & 0x7F);
+ while ((offsetDiff >>= 7) > 0)
+ headerBuffer[--n] = (byte) (0x80 | (--offsetDiff & 0x7F));
+ pack.write(headerBuffer, n, headerBuffer.length - n);
+ }
+
+ private void deflate(TemporaryBuffer.Heap pack, byte[] content)
+ throws IOException {
+ final Deflater deflater = new Deflater();
+ final byte[] buf = new byte[128];
+ deflater.setInput(content, 0, content.length);
+ deflater.finish();
+ do {
+ final int n = deflater.deflate(buf, 0, buf.length);
+ if (n > 0)
+ pack.write(buf, 0, n);
+ } while (!deflater.finished());
+ deflater.end();
+ }
+
+ private void store(TemporaryBuffer.Heap pack, byte[] content)
+ throws IOException {
+ final Deflater deflater = new Deflater(Deflater.NO_COMPRESSION);
+ final byte[] buf = new byte[128];
+ deflater.setInput(content, 0, content.length);
+ deflater.finish();
+ do {
+ final int n = deflater.deflate(buf, 0, buf.length);
+ if (n > 0)
+ pack.write(buf, 0, n);
+ } while (!deflater.finished());
+ deflater.end();
+ }
+
+ private void digest(TemporaryBuffer.Heap buf) throws IOException {
+ MessageDigest md = newMessageDigest();
+ md.update(buf.toByteArray());
+ buf.write(md.digest());
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtRepositoryBuilderTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtRepositoryBuilderTest.java
new file mode 100644
index 0000000000..0300004a23
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/DhtRepositoryBuilderTest.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.Ref;
+import org.eclipse.jgit.storage.dht.spi.memory.MemoryDatabase;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DhtRepositoryBuilderTest {
+ private MemoryDatabase db;
+
+ @Before
+ public void setUpDatabase() {
+ db = new MemoryDatabase();
+ }
+
+ @Test
+ public void testCreateAndOpen() throws IOException {
+ String name = "test.git";
+
+ DhtRepository repo1 = db.open(name);
+ assertSame(db, repo1.getDatabase());
+ assertSame(repo1, repo1.getRefDatabase().getRepository());
+ assertSame(repo1, repo1.getObjectDatabase().getRepository());
+
+ assertEquals(name, repo1.getRepositoryName().asString());
+ assertNull(repo1.getRepositoryKey());
+ assertFalse(repo1.getObjectDatabase().exists());
+
+ repo1.create(true);
+ assertNotNull(repo1.getRepositoryKey());
+ assertTrue(repo1.getObjectDatabase().exists());
+
+ DhtRepository repo2 = db.open(name);
+ assertNotNull(repo2.getRepositoryKey());
+ assertTrue(repo2.getObjectDatabase().exists());
+ assertEquals(0, repo2.getAllRefs().size());
+
+ Ref HEAD = repo2.getRef(Constants.HEAD);
+ assertTrue(HEAD.isSymbolic());
+ assertEquals(Constants.R_HEADS + Constants.MASTER, //
+ HEAD.getLeaf().getName());
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/LargeNonDeltaObjectTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/LargeNonDeltaObjectTest.java
new file mode 100644
index 0000000000..9f1bbf1983
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/LargeNonDeltaObjectTest.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.zip.Deflater;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.eclipse.jgit.lib.ObjectStream;
+import org.eclipse.jgit.lib.Repository;
+import org.eclipse.jgit.storage.dht.spi.memory.MemoryDatabase;
+import org.eclipse.jgit.util.IO;
+import org.junit.Before;
+import org.junit.Test;
+
+public class LargeNonDeltaObjectTest {
+ private MemoryDatabase db;
+
+ @Before
+ public void setUpDatabase() {
+ db = new MemoryDatabase();
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testInsertRead() throws IOException {
+ DhtInserterOptions insopt = new DhtInserterOptions();
+ insopt.setChunkSize(128);
+ insopt.setCompression(Deflater.NO_COMPRESSION);
+
+ Repository repo = new DhtRepositoryBuilder() //
+ .setDatabase(db) //
+ .setInserterOptions(insopt) //
+ .setRepositoryName("test.git") //
+ .setMustExist(false) //
+ .build();
+ repo.create(true);
+
+ byte[] data = new byte[insopt.getChunkSize() * 3];
+ Arrays.fill(data, (byte) 0x42);
+
+ ObjectInserter ins = repo.newObjectInserter();
+ ObjectId id = ins.insert(Constants.OBJ_BLOB, data);
+ ins.flush();
+ ins.release();
+
+ ObjectReader reader = repo.newObjectReader();
+ ObjectLoader ldr = reader.open(id);
+ assertEquals(Constants.OBJ_BLOB, ldr.getType());
+ assertEquals(data.length, ldr.getSize());
+ assertTrue(ldr.isLarge());
+
+ byte[] dst = new byte[data.length];
+ ObjectStream in = ldr.openStream();
+ IO.readFully(in, dst, 0, dst.length);
+ assertTrue(Arrays.equals(data, dst));
+ in.close();
+
+ // Reading should still work, even though initial chunk is gone.
+ dst = new byte[data.length];
+ in = ldr.openStream();
+ IO.readFully(in, dst, 0, dst.length);
+ assertTrue(Arrays.equals(data, dst));
+ in.close();
+
+ reader.release();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ObjectIndexKeyTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ObjectIndexKeyTest.java
new file mode 100644
index 0000000000..ab3b423ede
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/ObjectIndexKeyTest.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.junit.Test;
+
+public class ObjectIndexKeyTest {
+ @Test
+ public void testKey() {
+ RepositoryKey repo = RepositoryKey.fromInt(0x41234567);
+ ObjectId id = ObjectId
+ .fromString("3e64b928d51b3a28e89cfe2a3f0eeae35ef07839");
+
+ ObjectIndexKey key1 = ObjectIndexKey.create(repo, id);
+ assertEquals(repo.asInt(), key1.getRepositoryId());
+ assertEquals(key1, id);
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key1.asString());
+
+ ObjectIndexKey key2 = ObjectIndexKey.fromBytes(key1.asBytes());
+ assertEquals(repo.asInt(), key2.getRepositoryId());
+ assertEquals(key2, id);
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key2.asString());
+
+ ObjectIndexKey key3 = ObjectIndexKey.fromString(key1.asString());
+ assertEquals(repo.asInt(), key3.getRepositoryId());
+ assertEquals(key3, id);
+ assertEquals("3e.41234567.3e64b928d51b3a28e89cfe2a3f0eeae35ef07839",
+ key3.asString());
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/RepositoryKeyTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/RepositoryKeyTest.java
new file mode 100644
index 0000000000..6dc7e0e84b
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/RepositoryKeyTest.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class RepositoryKeyTest {
+ @Test
+ public void fromString() {
+ assertEquals(RepositoryKey.create(2), RepositoryKey
+ .fromString("40000000"));
+
+ assertEquals(RepositoryKey.create(1), RepositoryKey
+ .fromString("80000000"));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/TimeoutTest.java b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/TimeoutTest.java
new file mode 100644
index 0000000000..188158b8bd
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht.test/tst/org/eclipse/jgit/storage/dht/TimeoutTest.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.concurrent.TimeUnit;
+
+import org.eclipse.jgit.lib.Config;
+import org.junit.Test;
+
+public class TimeoutTest {
+ @Test
+ public void testGetTimeout() {
+ Timeout def = Timeout.seconds(2);
+ Config cfg = new Config();
+ Timeout t;
+
+ cfg.setString("core", "dht", "timeout", "500 ms");
+ t = Timeout.getTimeout(cfg, "core", "dht", "timeout", def);
+ assertEquals(500, t.getTime());
+ assertEquals(TimeUnit.MILLISECONDS, t.getUnit());
+
+ cfg.setString("core", "dht", "timeout", "5.2 sec");
+ t = Timeout.getTimeout(cfg, "core", "dht", "timeout", def);
+ assertEquals(5200, t.getTime());
+ assertEquals(TimeUnit.MILLISECONDS, t.getUnit());
+
+ cfg.setString("core", "dht", "timeout", "1 min");
+ t = Timeout.getTimeout(cfg, "core", "dht", "timeout", def);
+ assertEquals(60, t.getTime());
+ assertEquals(TimeUnit.SECONDS, t.getUnit());
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/.classpath b/org.eclipse.jgit.storage.dht/.classpath
new file mode 100644
index 0000000000..d7edf529a2
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.classpath
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="src"/>
+ <classpathentry kind="src" path="resources"/>
+ <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/J2SE-1.5"/>
+ <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/>
+ <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/org.eclipse.jgit.storage.dht/.fbprefs b/org.eclipse.jgit.storage.dht/.fbprefs
new file mode 100644
index 0000000000..81a0767ff6
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.fbprefs
@@ -0,0 +1,125 @@
+#FindBugs User Preferences
+#Mon May 04 16:24:13 PDT 2009
+detectorAppendingToAnObjectOutputStream=AppendingToAnObjectOutputStream|true
+detectorBadAppletConstructor=BadAppletConstructor|false
+detectorBadResultSetAccess=BadResultSetAccess|true
+detectorBadSyntaxForRegularExpression=BadSyntaxForRegularExpression|true
+detectorBadUseOfReturnValue=BadUseOfReturnValue|true
+detectorBadlyOverriddenAdapter=BadlyOverriddenAdapter|true
+detectorBooleanReturnNull=BooleanReturnNull|true
+detectorCallToUnsupportedMethod=CallToUnsupportedMethod|true
+detectorCheckImmutableAnnotation=CheckImmutableAnnotation|true
+detectorCheckTypeQualifiers=CheckTypeQualifiers|true
+detectorCloneIdiom=CloneIdiom|false
+detectorComparatorIdiom=ComparatorIdiom|true
+detectorConfusedInheritance=ConfusedInheritance|true
+detectorConfusionBetweenInheritedAndOuterMethod=ConfusionBetweenInheritedAndOuterMethod|true
+detectorCrossSiteScripting=CrossSiteScripting|true
+detectorDoInsideDoPrivileged=DoInsideDoPrivileged|true
+detectorDontCatchIllegalMonitorStateException=DontCatchIllegalMonitorStateException|true
+detectorDontUseEnum=DontUseEnum|true
+detectorDroppedException=DroppedException|true
+detectorDumbMethodInvocations=DumbMethodInvocations|true
+detectorDumbMethods=DumbMethods|true
+detectorDuplicateBranches=DuplicateBranches|true
+detectorEmptyZipFileEntry=EmptyZipFileEntry|true
+detectorEqualsOperandShouldHaveClassCompatibleWithThis=EqualsOperandShouldHaveClassCompatibleWithThis|true
+detectorFinalizerNullsFields=FinalizerNullsFields|true
+detectorFindBadCast2=FindBadCast2|true
+detectorFindBadForLoop=FindBadForLoop|true
+detectorFindCircularDependencies=FindCircularDependencies|false
+detectorFindDeadLocalStores=FindDeadLocalStores|true
+detectorFindDoubleCheck=FindDoubleCheck|true
+detectorFindEmptySynchronizedBlock=FindEmptySynchronizedBlock|true
+detectorFindFieldSelfAssignment=FindFieldSelfAssignment|true
+detectorFindFinalizeInvocations=FindFinalizeInvocations|true
+detectorFindFloatEquality=FindFloatEquality|true
+detectorFindHEmismatch=FindHEmismatch|true
+detectorFindInconsistentSync2=FindInconsistentSync2|true
+detectorFindJSR166LockMonitorenter=FindJSR166LockMonitorenter|true
+detectorFindLocalSelfAssignment2=FindLocalSelfAssignment2|true
+detectorFindMaskedFields=FindMaskedFields|true
+detectorFindMismatchedWaitOrNotify=FindMismatchedWaitOrNotify|true
+detectorFindNakedNotify=FindNakedNotify|true
+detectorFindNonSerializableStoreIntoSession=FindNonSerializableStoreIntoSession|true
+detectorFindNonSerializableValuePassedToWriteObject=FindNonSerializableValuePassedToWriteObject|true
+detectorFindNonShortCircuit=FindNonShortCircuit|true
+detectorFindNullDeref=FindNullDeref|true
+detectorFindNullDerefsInvolvingNonShortCircuitEvaluation=FindNullDerefsInvolvingNonShortCircuitEvaluation|true
+detectorFindOpenStream=FindOpenStream|true
+detectorFindPuzzlers=FindPuzzlers|true
+detectorFindRefComparison=FindRefComparison|true
+detectorFindReturnRef=FindReturnRef|true
+detectorFindRunInvocations=FindRunInvocations|true
+detectorFindSelfComparison=FindSelfComparison|true
+detectorFindSelfComparison2=FindSelfComparison2|true
+detectorFindSleepWithLockHeld=FindSleepWithLockHeld|true
+detectorFindSpinLoop=FindSpinLoop|true
+detectorFindSqlInjection=FindSqlInjection|true
+detectorFindTwoLockWait=FindTwoLockWait|true
+detectorFindUncalledPrivateMethods=FindUncalledPrivateMethods|true
+detectorFindUnconditionalWait=FindUnconditionalWait|true
+detectorFindUninitializedGet=FindUninitializedGet|true
+detectorFindUnrelatedTypesInGenericContainer=FindUnrelatedTypesInGenericContainer|true
+detectorFindUnreleasedLock=FindUnreleasedLock|true
+detectorFindUnsatisfiedObligation=FindUnsatisfiedObligation|true
+detectorFindUnsyncGet=FindUnsyncGet|true
+detectorFindUselessControlFlow=FindUselessControlFlow|true
+detectorFormatStringChecker=FormatStringChecker|true
+detectorHugeSharedStringConstants=HugeSharedStringConstants|true
+detectorIDivResultCastToDouble=IDivResultCastToDouble|true
+detectorIncompatMask=IncompatMask|true
+detectorInconsistentAnnotations=InconsistentAnnotations|true
+detectorInefficientMemberAccess=InefficientMemberAccess|false
+detectorInefficientToArray=InefficientToArray|true
+detectorInfiniteLoop=InfiniteLoop|true
+detectorInfiniteRecursiveLoop=InfiniteRecursiveLoop|true
+detectorInfiniteRecursiveLoop2=InfiniteRecursiveLoop2|false
+detectorInheritanceUnsafeGetResource=InheritanceUnsafeGetResource|true
+detectorInitializationChain=InitializationChain|true
+detectorInstantiateStaticClass=InstantiateStaticClass|true
+detectorInvalidJUnitTest=InvalidJUnitTest|true
+detectorIteratorIdioms=IteratorIdioms|true
+detectorLazyInit=LazyInit|true
+detectorLoadOfKnownNullValue=LoadOfKnownNullValue|true
+detectorMethodReturnCheck=MethodReturnCheck|true
+detectorMultithreadedInstanceAccess=MultithreadedInstanceAccess|true
+detectorMutableLock=MutableLock|true
+detectorMutableStaticFields=MutableStaticFields|true
+detectorNaming=Naming|true
+detectorNumberConstructor=NumberConstructor|true
+detectorOverridingEqualsNotSymmetrical=OverridingEqualsNotSymmetrical|true
+detectorPreferZeroLengthArrays=PreferZeroLengthArrays|true
+detectorPublicSemaphores=PublicSemaphores|false
+detectorQuestionableBooleanAssignment=QuestionableBooleanAssignment|true
+detectorReadReturnShouldBeChecked=ReadReturnShouldBeChecked|true
+detectorRedundantInterfaces=RedundantInterfaces|true
+detectorRepeatedConditionals=RepeatedConditionals|true
+detectorRuntimeExceptionCapture=RuntimeExceptionCapture|true
+detectorSerializableIdiom=SerializableIdiom|true
+detectorStartInConstructor=StartInConstructor|true
+detectorStaticCalendarDetector=StaticCalendarDetector|true
+detectorStringConcatenation=StringConcatenation|true
+detectorSuperfluousInstanceOf=SuperfluousInstanceOf|true
+detectorSuspiciousThreadInterrupted=SuspiciousThreadInterrupted|true
+detectorSwitchFallthrough=SwitchFallthrough|true
+detectorSynchronizeAndNullCheckField=SynchronizeAndNullCheckField|true
+detectorSynchronizeOnClassLiteralNotGetClass=SynchronizeOnClassLiteralNotGetClass|true
+detectorSynchronizingOnContentsOfFieldToProtectField=SynchronizingOnContentsOfFieldToProtectField|true
+detectorURLProblems=URLProblems|true
+detectorUncallableMethodOfAnonymousClass=UncallableMethodOfAnonymousClass|true
+detectorUnnecessaryMath=UnnecessaryMath|true
+detectorUnreadFields=UnreadFields|true
+detectorUseObjectEquals=UseObjectEquals|false
+detectorUselessSubclassMethod=UselessSubclassMethod|false
+detectorVarArgsProblems=VarArgsProblems|true
+detectorVolatileUsage=VolatileUsage|true
+detectorWaitInLoop=WaitInLoop|true
+detectorWrongMapIterator=WrongMapIterator|true
+detectorXMLFactoryBypass=XMLFactoryBypass|true
+detector_threshold=2
+effort=default
+excludefilter0=findBugs/FindBugsExcludeFilter.xml
+filter_settings=Medium|BAD_PRACTICE,CORRECTNESS,MT_CORRECTNESS,PERFORMANCE,STYLE|false
+filter_settings_neg=MALICIOUS_CODE,NOISE,I18N,SECURITY,EXPERIMENTAL|
+run_at_full_build=true
diff --git a/org.eclipse.jgit.storage.dht/.gitignore b/org.eclipse.jgit.storage.dht/.gitignore
new file mode 100644
index 0000000000..934e0e06ff
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.gitignore
@@ -0,0 +1,2 @@
+/bin
+/target
diff --git a/org.eclipse.jgit.storage.dht/.project b/org.eclipse.jgit.storage.dht/.project
new file mode 100644
index 0000000000..11af6dea7d
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.project
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>org.eclipse.jgit.storage.dht</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.ManifestBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.pde.SchemaBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ <nature>org.eclipse.pde.PluginNature</nature>
+ </natures>
+</projectDescription>
diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs
new file mode 100644
index 0000000000..66ac15c47c
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs
@@ -0,0 +1,3 @@
+#Mon Aug 11 16:46:12 PDT 2008
+eclipse.preferences.version=1
+encoding/<project>=UTF-8
diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs
new file mode 100644
index 0000000000..006e07ede5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs
@@ -0,0 +1,3 @@
+#Mon Mar 24 18:55:50 EDT 2008
+eclipse.preferences.version=1
+line.separator=\n
diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs
new file mode 100644
index 0000000000..76557139ec
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs
@@ -0,0 +1,334 @@
+#Fri Oct 02 18:43:47 PDT 2009
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5
+org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
+org.eclipse.jdt.core.compiler.compliance=1.5
+org.eclipse.jdt.core.compiler.debug.lineNumber=generate
+org.eclipse.jdt.core.compiler.debug.localVariable=generate
+org.eclipse.jdt.core.compiler.debug.sourceFile=generate
+org.eclipse.jdt.core.compiler.doc.comment.support=enabled
+org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=warning
+org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
+org.eclipse.jdt.core.compiler.problem.autoboxing=warning
+org.eclipse.jdt.core.compiler.problem.deprecation=warning
+org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled
+org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled
+org.eclipse.jdt.core.compiler.problem.discouragedReference=warning
+org.eclipse.jdt.core.compiler.problem.emptyStatement=warning
+org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
+org.eclipse.jdt.core.compiler.problem.fallthroughCase=warning
+org.eclipse.jdt.core.compiler.problem.fieldHiding=warning
+org.eclipse.jdt.core.compiler.problem.finalParameterBound=warning
+org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=error
+org.eclipse.jdt.core.compiler.problem.forbiddenReference=error
+org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error
+org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=warning
+org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=warning
+org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=error
+org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=enabled
+org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private
+org.eclipse.jdt.core.compiler.problem.localVariableHiding=warning
+org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error
+org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore
+org.eclipse.jdt.core.compiler.problem.missingJavadocComments=error
+org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=protected
+org.eclipse.jdt.core.compiler.problem.missingJavadocTagDescription=return_tag
+org.eclipse.jdt.core.compiler.problem.missingJavadocTags=error
+org.eclipse.jdt.core.compiler.problem.missingJavadocTagsOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.missingJavadocTagsVisibility=private
+org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotation=ignore
+org.eclipse.jdt.core.compiler.problem.missingSerialVersion=warning
+org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
+org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
+org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore
+org.eclipse.jdt.core.compiler.problem.nullReference=warning
+org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=warning
+org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore
+org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=error
+org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
+org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
+org.eclipse.jdt.core.compiler.problem.redundantNullCheck=warning
+org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled
+org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=error
+org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled
+org.eclipse.jdt.core.compiler.problem.typeParameterHiding=warning
+org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=warning
+org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=warning
+org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=warning
+org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=error
+org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore
+org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=warning
+org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
+org.eclipse.jdt.core.compiler.problem.unusedImport=error
+org.eclipse.jdt.core.compiler.problem.unusedLabel=error
+org.eclipse.jdt.core.compiler.problem.unusedLocal=error
+org.eclipse.jdt.core.compiler.problem.unusedParameter=warning
+org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
+org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
+org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
+org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
+org.eclipse.jdt.core.compiler.source=1.5
+org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_assignment=0
+org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
+org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
+org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
+org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
+org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
+org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
+org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
+org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
+org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_after_package=1
+org.eclipse.jdt.core.formatter.blank_lines_before_field=1
+org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
+org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
+org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
+org.eclipse.jdt.core.formatter.blank_lines_before_method=1
+org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
+org.eclipse.jdt.core.formatter.blank_lines_before_package=0
+org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
+org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
+org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
+org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines=false
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
+org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
+org.eclipse.jdt.core.formatter.comment.format_block_comments=true
+org.eclipse.jdt.core.formatter.comment.format_comments=true
+org.eclipse.jdt.core.formatter.comment.format_header=false
+org.eclipse.jdt.core.formatter.comment.format_html=true
+org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true
+org.eclipse.jdt.core.formatter.comment.format_line_comments=true
+org.eclipse.jdt.core.formatter.comment.format_source_code=true
+org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
+org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
+org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
+org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
+org.eclipse.jdt.core.formatter.comment.line_length=80
+org.eclipse.jdt.core.formatter.compact_else_if=true
+org.eclipse.jdt.core.formatter.continuation_indentation=2
+org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
+org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
+org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
+org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_empty_lines=false
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
+org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
+org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
+org.eclipse.jdt.core.formatter.indentation.size=4
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member=insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
+org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
+org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
+org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
+org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
+org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
+org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
+org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
+org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
+org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
+org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
+org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
+org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
+org.eclipse.jdt.core.formatter.lineSplit=80
+org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
+org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
+org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
+org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
+org.eclipse.jdt.core.formatter.tabulation.char=tab
+org.eclipse.jdt.core.formatter.tabulation.size=4
+org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
+org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000..7b2cdca106
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,62 @@
+#Thu Aug 26 12:30:58 CDT 2010
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_JGit Format
+formatter_settings_version=11
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=java;javax;org;com;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates/>
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=false
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=false
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=false
+sp_cleanup.always_use_this_for_non_static_method_access=false
+sp_cleanup.convert_to_enhanced_for_loop=false
+sp_cleanup.correct_indentation=false
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=true
+sp_cleanup.make_local_variable_final=false
+sp_cleanup.make_parameters_final=false
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=false
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=false
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=false
+sp_cleanup.remove_unnecessary_nls_tags=false
+sp_cleanup.remove_unused_imports=false
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_blocks=false
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_parentheses_in_expressions=false
+sp_cleanup.use_this_for_non_static_field_access=false
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=true
+sp_cleanup.use_this_for_non_static_method_access=false
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=true
diff --git a/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF b/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF
new file mode 100644
index 0000000000..10bc4867c6
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF
@@ -0,0 +1,24 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: %plugin_name
+Bundle-SymbolicName: org.eclipse.jgit.storage.dht
+Bundle-Version: 1.0.0.qualifier
+Bundle-Localization: plugin
+Bundle-Vendor: %provider_name
+Export-Package: org.eclipse.jgit.storage.dht;version="1.0.0",
+ org.eclipse.jgit.storage.dht.spi;version="1.0.0",
+ org.eclipse.jgit.storage.dht.spi.cache;version="1.0.0",
+ org.eclipse.jgit.storage.dht.spi.util;version="1.0.0",
+ org.eclipse.jgit.storage.dht.spi.memory;version="1.0.0"
+Bundle-ActivationPolicy: lazy
+Bundle-RequiredExecutionEnvironment: J2SE-1.5
+Import-Package: org.eclipse.jgit.errors;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.lib;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.nls;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.revwalk;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.transport;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.treewalk;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.file;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.storage.pack;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.util;version="[1.0.0,2.0.0)",
+ org.eclipse.jgit.util.io;version="[1.0.0,2.0.0)"
diff --git a/org.eclipse.jgit.storage.dht/README b/org.eclipse.jgit.storage.dht/README
new file mode 100644
index 0000000000..1e07d377e7
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/README
@@ -0,0 +1,89 @@
+JGit Storage on DHT
+-------------------
+
+This implementation still has some pending issues:
+
+* DhtInserter must skip existing objects
+
+ DirCache writes all trees to the ObjectInserter, letting the
+ inserter figure out which trees we already have, and which are new.
+ DhtInserter should buffer trees into a chunk, then before writing
+ the chunk to the DHT do a batch lookup to find the existing
+ ObjectInfo (if any). If any exist, the chunk should be compacted to
+ eliminate these objects, and if there is room in the chunk for more
+ objects, it should go back to the DhtInserter to be filled further
+ before flushing.
+
+ This implies the DhtInserter needs to work on multiple chunks at
+ once, and may need to combine chunks together when there is more
+ than one partial chunk.
+
+* DhtPackParser must check for collisions
+
+ Because ChunkCache blindly assumes any copy of an object is an OK
+ copy of an object, DhtPackParser needs to validate all new objects
+ at the end of its importing phase, before it links the objects into
+ the ObjectIndexTable. Most objects won't already exist, but some
+ may, and those that do must either be removed from their chunk, or
+ have their content byte-for-byte validated.
+
+ Removal from a chunk just means deleting it from the chunk's local
+ index, and not writing it to the global ObjectIndexTable. This
+ creates a hole in the chunk which is wasted space, and that isn't
+ very useful. Fortunately objects that fit fully within one chunk
+ may be easy to inflate and double check, as they are small. Objects
+ that are big span multiple chunks, and the new chunks can simply be
+ deleted from the ChunkTable, leaving the original chunks.
+
+ Deltas can be checked quickly by inflating the delta and checking
+ only the insertion point text, comparing that to the existing data
+ in the repository. Unfortunately the repository is likely to use a
+ different delta representation, which means at least one of them
+ will need to be fully inflated to check the delta against.
+
+* DhtPackParser should handle small-huge-small-huge
+
+ Multiple chunks need to be open at once, in case we get a bad
+ pattern of small-object, huge-object, small-object, huge-object. In
+ this case the small-objects should be put together into the same
+ chunk, to prevent having too many tiny chunks. This is tricky to do
+ with OFS_DELTA. A long OFS_DELTA requires all prior chunks to be
+ closed out so we know their lengths.
+
+* RepresentationSelector performance bad on Cassandra
+
+ The 1.8 million batch lookups done for linux-2.6 kills Cassandra, it
+ cannot handle this read load.
+
+* READ_REPAIR isn't fully accurate
+
+ There are a lot of places where the generic DHT code should be
+ helping to validate the local replica is consistent, and where it is
+ not, help the underlying storage system to heal the local replica by
+ reading from a remote replica and putting it back to the local one.
+ Most of this should be handled in the DHT SPI layer, but the generic
+ DHT code should be giving better hints during get() method calls.
+
+* LOCAL / WORLD writes
+
+ Many writes should be done locally first, before they replicate to
+ the other replicas, as they might be backed out on an abort.
+
+ Likewise some writes must take place across sufficient replicas to
+ ensure the write is not lost... and this may include ensuring that
+ earlier local-only writes have actually been committed to all
+ replicas. This committing to replicas might be happening in the
+ background automatically after the local write (e.g. Cassandra will
+ start to send writes made by one node to other nodes, but doesn't
+ promise they finish). But parts of the code may need to force this
+ replication to complete before the higher level git operation ends.
+
+* Forks/alternates
+
+ Forking is common, but we should avoid duplicating content into the
+ fork if the base repository has it. This requires some sort of
+ change to the key structure so that chunks are owned by an object
+ pool, and the object pool owns the repositories that use it. GC
+ proceeds at the object pool level, rather than the repository level,
+ but might want to take some of the reference namespace into account
+ to avoid placing forked less-common content near primary content.
diff --git a/org.eclipse.jgit.storage.dht/build.properties b/org.eclipse.jgit.storage.dht/build.properties
new file mode 100644
index 0000000000..aa1a008269
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/build.properties
@@ -0,0 +1,5 @@
+source.. = src/
+output.. = bin/
+bin.includes = META-INF/,\
+ .,\
+ plugin.properties
diff --git a/org.eclipse.jgit.storage.dht/plugin.properties b/org.eclipse.jgit.storage.dht/plugin.properties
new file mode 100644
index 0000000000..a6a7d72dba
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/plugin.properties
@@ -0,0 +1,2 @@
+plugin_name=JGit DHT Storage (Incubation)
+provider_name=Eclipse.org
diff --git a/org.eclipse.jgit.storage.dht/pom.xml b/org.eclipse.jgit.storage.dht/pom.xml
new file mode 100644
index 0000000000..c9fe599b50
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/pom.xml
@@ -0,0 +1,155 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (C) 2011, Google Inc.
+ and other copyright owners as documented in the project's IP log.
+
+ This program and the accompanying materials are made available
+ under the terms of the Eclipse Distribution License v1.0 which
+ accompanies this distribution, is reproduced below, and is
+ available at http://www.eclipse.org/org/documents/edl-v10.php
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or
+ without modification, are permitted provided that the following
+ conditions are met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ - Neither the name of the Eclipse Foundation, Inc. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit-parent</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ </parent>
+
+ <artifactId>org.eclipse.jgit.storage.dht</artifactId>
+ <name>JGit - DHT Storage</name>
+
+ <description>
+ Git repository storage on a distributed hashtable
+ </description>
+
+ <properties>
+ <translate-qualifier/>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.eclipse.jgit</groupId>
+ <artifactId>org.eclipse.jgit</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <sourceDirectory>src/</sourceDirectory>
+
+ <resources>
+ <resource>
+ <directory>.</directory>
+ <includes>
+ <include>plugin.properties</include>
+ </includes>
+ </resource>
+ <resource>
+ <directory>resources/</directory>
+ </resource>
+ </resources>
+
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-source-plugin</artifactId>
+ <inherited>true</inherited>
+ <executions>
+ <execution>
+ <id>attach-sources</id>
+ <phase>process-classes</phase>
+ <goals>
+ <goal>jar</goal>
+ </goals>
+ <configuration>
+ <archive>
+ <manifestFile>${source-bundle-manifest}</manifestFile>
+ </archive>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <configuration>
+ <archive>
+ <manifestFile>${bundle-manifest}</manifestFile>
+ </archive>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <configuration>
+ <findbugsXmlOutput>true</findbugsXmlOutput>
+ <failOnError>false</failOnError>
+ </configuration>
+ <executions>
+ <execution>
+ <goals>
+ <goal>check</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-pmd-plugin</artifactId>
+ <configuration>
+ <sourceEncoding>utf-8</sourceEncoding>
+ <minimumTokens>100</minimumTokens>
+ <targetJdk>1.5</targetJdk>
+ <format>xml</format>
+ <failOnViolation>false</failOnViolation>
+ </configuration>
+ <executions>
+ <execution>
+ <goals>
+ <goal>cpd-check</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties
new file mode 100644
index 0000000000..c004fda7ac
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties
@@ -0,0 +1,35 @@
+cannotInsertObject=Cannot insert any objects into a ChunkWriter
+corruptChunk=Chunk {0} is corrupt and does not match its name
+corruptCompressedObject=Corrupt deflate stream in {0} at {1}
+cycleInDeltaChain=Cycle in delta chain {0} offset {1}
+databaseRequired=Database is required
+expectedObjectSizeDuringCopyAsIs=Object {0} has size of 0
+invalidChunkKey=Invalid ChunkKey {0}
+invalidObjectIndexKey=Invalid ObjectIndexKey {0}
+invalidObjectInfo=Invalid ObjectInfo on {0}
+missingChunk=Missing {0}
+missingLongOffsetBase=Missing base for offset -{1} in meta of {0}
+nameRequired=Name or key is required
+noSavedTypeForBase=No type information for base object at {0}
+notTimeUnit=Invalid time unit value: {0}={1}
+objectListSelectingName=Selecting list name
+objectListCountingFrom=Counting objects in {0}
+objectTypeUnknown=unknown
+packParserInvalidPointer=Invalid pointer inside pack parser: {0}, chunk {1}, offset {2}.
+packParserRollbackFailed=DhtPackParser rollback failed
+protobufNegativeValuesNotSupported=Negative values are not supported
+protobufNoArray=bytes field requires ByteBuffer.hasArray to be true
+protobufNotBooleanValue=bool field {0} has invalid value {1}
+protobufUnsupportedFieldType=Unsupported protobuf field type {0}
+protobufWrongFieldLength=Field {0} should have length of {1}, found {2}
+protobufWrongFieldType=Field {0} is of type {1}, expected {2}
+recordingObjects=Recording objects
+repositoryAlreadyExists=Repository {0} already exists
+repositoryMustBeBare=Only bare repositories are supported
+shortCompressedObject=Short deflate stream in {0} at {1}
+timeoutChunkMeta=Timeout waiting for ChunkMeta
+timeoutLocatingRepository=Timeout locating {0}
+tooManyObjectsInPack={0} is too many objects in a pack file
+unsupportedChunkIndex=Unsupported index version {0} in {1}
+unsupportedObjectTypeInChunk=Unknown object type {0} in {1} at {2}
+wrongChunkPositionInCachedPack=Cached pack {0} put chunk {1} at {2} but delta in {3} expects it at {4}
diff --git a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html
new file mode 100644
index 0000000000..c2c8b4c245
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html
@@ -0,0 +1,1151 @@
+<html>
+<head>
+<title>Git on DHT Schema</title>
+
+<style type='text/css'>
+body { font-size: 10pt; }
+h1 { font-size: 16pt; }
+h2 { font-size: 12pt; }
+h3 { font-size: 10pt; }
+
+body {
+ margin-left: 8em;
+ margin-right: 8em;
+}
+h1 { margin-left: -3em; }
+h2 { margin-left: -2em; }
+h3 { margin-left: -1em; }
+hr { margin-left: -4em; margin-right: -4em; }
+
+.coltoc {
+ font-size: 8pt;
+ font-family: monospace;
+}
+
+.rowkey {
+ margin-left: 1em;
+ padding-top: 0.2em;
+ padding-left: 1em;
+ padding-right: 1em;
+ width: 54em;
+ border: 1px dotted red;
+ background-color: #efefef;
+ white-space: nowrap;
+}
+.rowkey .header {
+ font-weight: bold;
+ padding-right: 1em;
+}
+.rowkey .var {
+ font-style: italic;
+ font-family: monospace;
+}
+.rowkey .lit {
+ font-weight: bold;
+ font-family: monospace;
+}
+.rowkey .example {
+ font-family: monospace;
+}
+.rowkey p {
+ white-space: normal;
+}
+
+.colfamily {
+ margin-top: 0.5em;
+ padding-top: 0.2em;
+ padding-left: 1em;
+ padding-right: 1em;
+ width: 55em;
+ border: 1px dotted blue;
+ background-color: #efefef;
+ white-space: nowrap;
+}
+.colfamily .header {
+ font-weight: bold;
+ padding-right: 1em;
+}
+.colfamily .var {
+ font-style: italic;
+ font-family: monospace;
+}
+.colfamily .lit {
+ font-family: monospace;
+}
+.colfamily .example {
+ font-family: monospace;
+}
+.colfamily p {
+ white-space: normal;
+}
+
+.summary_table {
+ border-collapse: collapse;
+ border-spacing: 0;
+}
+.summary_table .desc {
+ font-size: 8pt;
+ white-space: nowrap;
+ text-align: right;
+ width: 20em;
+}
+.summary_table td {
+ border: 1px dotted lightgray;
+ padding-top: 2px;
+ padding-bottom: 2px;
+ padding-left: 5px;
+ padding-right: 5px;
+ vertical-align: top;
+}
+.summary_table tr.no_border td {
+ border: none;
+}
+</style>
+</head>
+<body>
+
+<h1>Git on DHT Schema</h1>
+
+<p>Storing Git repositories on a Distributed Hash Table (DHT) may
+improve scaling for large traffic, but also simplifies management when
+there are many small repositories.</p>
+
+<h2>Table of Contents</h2>
+<ul>
+ <li><a href="#concepts">Concepts</a></li>
+ <li><a href="#summary">Summary</a></li>
+ <li><a href="#security">Data Security</a></li>
+
+ <li>Tables:
+ <ul>
+ <li><a href="#REPOSITORY_INDEX">Table REPOSITORY_INDEX</a>
+ (
+ <a href="#REPOSITORY_INDEX.id" class="toccol">id</a>
+ )</li>
+
+ <li><a href="#REPOSITORY">Table REPOSITORY</a>
+ (
+ <a href="#REPOSITORY.chunk-info" class="toccol">chunk-info</a>,
+ <a href="#REPOSITORY.cached-pack" class="toccol">cached-pack</a>
+ )</li>
+
+ <li><a href="#REF">Table REF</a>
+ (
+ <a href="#REF.target" class="toccol">target</a>
+ )</li>
+
+ <li><a href="#OBJECT_INDEX">Table OBJECT_INDEX</a>
+ (
+ <a href="#OBJECT_INDEX.info" class="toccol">info</a>
+ )</li>
+
+ <li><a href="#CHUNK">Table CHUNK</a>
+ (
+ <a href="#CHUNK.chunk" class="toccol">chunk</a>,
+ <a href="#CHUNK.index" class="toccol">index</a>,
+ <a href="#CHUNK.meta" class="toccol">meta</a>
+ )</li>
+ </ul>
+ </li>
+
+ <li>Protocol Messages:
+ <ul>
+ <li><a href="#message_RefData">RefData</a></li>
+ <li><a href="#message_ObjectInfo">ObjectInfo</a></li>
+ <li><a href="#message_ChunkInfo">ChunkInfo</a></li>
+ <li><a href="#message_ChunkMeta">ChunkMeta</a></li>
+ <li><a href="#message_CachedPackInfo">CachedPackInfo</a></li>
+ </ul>
+ </li>
+</ul>
+
+<a name="concepts"><h2>Concepts</h2></a>
+
+<p><i>Git Repository</i>: Stores the version control history for a
+single project. Each repository is a directed acyclic graph (DAG)
+composed of objects. Revision history for a project is described by a
+commit object pointing to the complete set of files that make up that
+version of the project, and a pointer to the commit that came
+immediately before it. Repositories also contain references,
+associating a human readable branch or tag name to a specific commit
+object. Tommi Virtanen has a
+<a href="http://eagain.net/articles/git-for-computer-scientists/">more
+detailed description of the Git DAG</a>.</p>
+
+<p><i>Object</i>: Git objects are named by applying the SHA-1 hash
+algorithm to their contents. There are 4 object types: commit, tree,
+blob, tag. Objects are typically stored deflated using libz deflate,
+but may also be delta compressed against another similar object,
+further reducing the storage required. The big factor for Git
+repository size is usually object count, e.g. the linux-2.6 repository
+contains 1.8 million objects.</p>
+
+<p><i>Reference</i>: Associates a human readable symbolic name, such
+as <code>refs/heads/master</code> to a specific Git object, usually a
+commit or tag. References are updated to point to the most recent
+object whenever changes are committed to the repository.</p>
+
+<p><i>Git Pack File</i>: A container stream holding many objects in a
+highly compressed format. On the local filesystem, Git uses pack files
+to reduce both inode and space usage by combining millions of objects
+into a single data stream. On the network, Git uses pack files as the
+basic network protocol to transport objects from one system's
+repository to another.</p>
+
+<p><i>Garbage Collection</i>: Scanning the Git object graph to locate
+objects that are reachable, and others that are unreachable. Git also
+generally performs data recompression during this task to produce more
+optimal deltas between objects, reducing overall disk usage and data
+transfer sizes. This is independent of any GC that may be performed by
+the DHT to clean up old cells.</p>
+
+<p>The basic storage strategy employed by this schema is to break a
+standard Git pack file into chunks, approximately 1 MiB in size. Each
+chunk is stored as one row in the <a href="#CHUNK">CHUNK</a> table.
+During reading, chunks are paged into the application on demand, but
+may also be prefetched using prefetch hints. Rules are used to break
+the standard pack into chunks, these rules help to improve reference
+locality and reduce the number of chunk loads required to service
+common operations. In a nutshell, the DHT is used as a virtual memory
+system for pages about 1 MiB in size.</p>
+
+<a name="summary"><h2>Summary</h2></a>
+
+<p>The schema consists of a handful of tables. Size estimates are
+given for one copy of the linux-2.6 Git repository, a relative tortue
+test case that contains 1.8 million objects and is 425 MiB when stored
+on the local filesystem. All sizes are before any replication made by
+the DHT, or its underlying storage system.</p>
+
+<table style='margin-left: 2em' class='summary_table'>
+ <tr>
+ <th>Table</th>
+ <th>Rows</th>
+ <th>Cells/Row</th>
+ <th>Bytes</th>
+ <th>Bytes/Row</th>
+ </tr>
+
+ <tr>
+ <td><a href="#REPOSITORY_INDEX">REPOSITORY_INDEX</a>
+ <div class='desc'>Map host+path to surrogate key.</div></td>
+ <td align='right'>1</td>
+ <td align='right'>1</td>
+ <td align='right'>&lt; 100 bytes</td>
+ <td align='right'>&lt; 100 bytes</td>
+ </tr>
+
+ <tr>
+ <td><a href="#REPOSITORY">REPOSITORY</a>
+ <div class='desc'>Accounting and replica management.</div></td>
+ <td align='right'>1</td>
+ <td align='right'>403</td>
+ <td align='right'>65 KiB</td>
+ <td align='right'>65 KiB</td>
+ </tr>
+
+ <tr>
+ <td><a href="#REF">REF</a>
+ <div class='desc'>Bind branch/tag name to Git object.</div></td>
+ <td align='right'>211</td>
+ <td align='right'>1</td>
+ <td align='right'>14 KiB</td>
+ <td align='right'>67 bytes</td>
+ </tr>
+
+ <tr>
+ <td><a href="#OBJECT_INDEX">OBJECT_INDEX</a>
+ <div class='desc'>Locate Git object by SHA-1 name.</div></td>
+ <td align='right'>1,861,833</td>
+ <td align='right'>1</td>
+ <td align='right'>154 MiB</td>
+ <td align='right'>87 bytes</td>
+ </tr>
+
+ <tr>
+ <td><a href="#CHUNK">CHUNK</a>
+ <div class='desc'>Complete Git object storage.</div></td>
+ <td align='right'>402</td>
+ <td align='right'>3</td>
+ <td align='right'>417 MiB</td>
+ <td align='right'>~ 1 MiB</td>
+ </tr>
+
+ <tr class='no_border'>
+ <td align='right'><i>Total</i></td>
+ <td align='right'>1,862,448</td>
+ <td align='right'></td>
+ <td align='right'>571 MiB</td>
+ <td align='right'></td>
+ </tr>
+</table>
+
+<a name="security"><h2>Data Security</h2></a>
+
+<p>If data encryption is necessary to protect file contents, the <a
+href="#CHUNK.chunk">CHUNK.chunk</a> column can be encrypted with a
+block cipher such as AES. This column contains the revision commit
+messages, file paths, and file contents. By encrypting one column, the
+majority of the repository data is secured. As each cell value is
+about 1 MiB and contains a trailing 4 bytes of random data, an ECB
+mode of operation may be sufficient. Because the cells are already
+very highly compressed using the Git data compression algorithms,
+there is no increase in disk usage due to encryption.</p>
+
+<p>Branch and tag names (<a href="#REF">REF</a> row keys) are not
+encrypted. If these need to be secured the portion after the ':' would
+need to be encrypted with a block cipher. However these strings are
+very short and very common (HEAD, refs/heads/master, refs/tags/v1.0),
+making encryption difficult. A variation on the schema might move all
+rows for a repository into a single protocol messsage, then encrypt
+the protobuf into a single cell. Unfortunately this strategy has a
+high update cost, and references change frequently.</p>
+
+<p>Object SHA-1 names (<a href="#OBJECT_INDEX">OBJECT_INDEX</a> row
+keys and <a href="#CHUNK.index">CHUNK.index</a> values) are not
+encrypted. This allows a reader to determine if a repository contains
+a specific revision, but does not allow them to inspect the contents
+of the revision. The CHUNK.index column could also be encrypted with a
+block cipher when CHUNK.chunk is encrypted (see above), however the
+OBJECT_INDEX table row keys cannot be encrypted if abbrevation
+expansion is to be supported for end-users of the repository. The row
+keys must be unencrypted as abbreviation resolution is performed by a
+prefix range scan over the keys.</p>
+
+<p>The remaining tables and columns contain only statistics (e.g.
+object counts or cell sizes), or internal surrogate keys
+(repository_id, chunk_key) and do not require encryption.</p>
+
+<hr />
+<a name="REPOSITORY_INDEX"><h2>Table REPOSITORY_INDEX</h2></a>
+
+<p>Maps a repository name, as presented in the URL by an end-user or
+client application, into its internal repository_id value. This
+mapping allows the repository name to be quickly modified (e.g.
+renamed) without needing to update the larger data rows of the
+repository.</p>
+
+<p>The exact semantics of the repository_name format is left as a
+deployment decision, but DNS hostname, '/', repository name would be
+one common usage.</p>
+
+<h3>Row Key</h3>
+<div class='rowkey'>
+ <div>
+ <span class='header'>Row Key:</span>
+ <span class='var'>repository_name</span>
+ </div>
+
+ <p>Human readable name of the repository, typically derived from the
+ HTTP <code>Host</code> header and path in the URL.</p>
+
+ <p>Examples:</p>
+ <ul>
+ <li><span class='example'>com.example.git/pub/git/foo.git</span></li>
+ <li><span class='example'>com.example.git/~user/mystuff.git</span></li>
+ </ul>
+</div>
+
+<h3>Columns</h3>
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="REPOSITORY_INDEX.id"><span class='lit'>id:</span></a>
+ </div>
+
+ <p>The repository_id, as an 8-digit hex ASCII string.</p>
+</div>
+
+<h3>Size Estimate</h3>
+
+<p>Less than 100,000 rows. More likely estimate is 1,000 rows.
+Total disk usage under 512 KiB, assuming 1,000 names and 256
+characters per name.</p>
+
+<h3>Updates</h3>
+
+<p>Only on repository creation or rename, which is infrequent (&lt;10
+rows/month). Updates are performed in a row-level transaction, to
+ensure a name is either assigned uniquely, or fails.</p>
+
+<h3>Reads</h3>
+
+<p>Reads are tried first against memcache, then against the DHT if the
+entry did not exist in memcache. Successful reads against the DHT are
+put back into memcache in the background.</p>
+
+<a name="REPOSITORY"><h2>Table REPOSITORY</h2></a>
+
+<p>Tracks top-level information about each repository.</p>
+
+<h3>Row Key</h3>
+<div class='rowkey'>
+ <div>
+ <span class='header'>Row Key:</span>
+ <span class='var'>repository_id</span>
+ </div>
+
+ <p>The repository_id, as an 8-digit hex ASCII string.</p>
+</div>
+
+<p>Typically this is assigned sequentially, then has the bits reversed
+to evenly spread repositories throughout the DHT. For example the
+first repository is <code>80000000</code>, and the second is
+<code>40000000</code>.</p>
+
+<h3>Columns</h3>
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="REPOSITORY.chunk-info"><span class='lit'>chunk-info:</span></a>
+ <span class='var'>chunk_key[short]</span>
+ </div>
+
+ <p>Cell value is the protocol message <a
+ href="#message_ChunkInfo">ChunkInfo</a> describing the chunk's
+ contents. Most of the message's fields are only useful for quota
+ accounting and reporting.</p>
+</div>
+
+<p>This column exists to track all of the chunks that make up a
+repository's object set. Garbage collection and quota accounting tasks
+can primarily drive off this column, rather than scanning the much
+larger <a href="#CHUNK">CHUNK</a> table with a regular expression on
+the chunk row key.</p>
+
+<p>As each chunk averages 1 MiB in size, the linux-2.6 repository
+(at 373 MiB) has about 400 chunks and thus about 400 chunk-info
+cells. The chromium repository (at 1 GiB) has about 1000 chunk-info
+cells. It would not be uncommon to have 2000 chunk-info cells.</p>
+
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="REPOSITORY.cached-pack"><span class='lit'>cached-pack:</span></a>
+ <span class='var'>NNNNx38</span>
+ <span class='lit'>.</span>
+ <span class='var'>VVVVx38</span>
+ </div>
+
+ <p>Variables:</p>
+ <ul>
+ <li><span class='var'>NNNNx38</span> = 40 hex digit name of the cached pack</li>
+ <li><span class='var'>VVVVx38</span> = 40 hex digit version of the cached pack</li>
+ </ul>
+
+ <p>Examples:</p>
+ <ul>
+ <li><span class='example'>4e32fb97103981e7dd53dcc786640fa4fdb444b8.8975104a03d22e54f7060502e687599d1a2c2516</span></li>
+ </ul>
+
+ <p>Cell value is the protocol message <a
+ href="#message_CachedPackInfo">CachedPackInfo</a> describing the
+ chunks that make up a cached pack.</p>
+</div>
+
+<p>The <code>cached-pack</code> column family describes large lists of
+chunks that when combined together in a specific order create a valid
+Git pack file directly streamable to a client. This avoids needing to
+enumerate and pack the entire repository on each request.</p>
+
+<p>The cached-pack name (NNNNx38 above) is the SHA-1 of the objects
+contained within the pack, in binary, sorted. This is the standard
+naming convention for pack files on the local filesystem. The version
+(VVVVx38 above) is the SHA-1 of the chunk keys, sorted. The version
+makes the cached-pack cell unique, if any single bit in the compressed
+data is modified a different version will be generated, and a
+different cell will be used to describe the alternate version of the
+same data. The version is necessary to prevent repacks of the same
+object set (but with different compression settings or results) from
+stepping on active readers.</p>
+
+<h2>Size Estimate</h2>
+
+<p>1 row per repository (~1,000 repositories), however the majority of
+the storage cost is in the <code>chunk-info</code> column family,
+which can have more than 2000 cells per repository.</p>
+
+<p>Each <code>chunk-info</code> cell is on average 147 bytes. For a
+large repository like chromium.git (over 1000 chunks) this is only 147
+KiB for the entire row.</p>
+
+<p>Each <code>cached-pack</code> cell is on average 5350 bytes. Most
+repositories have 1 of these cells, 2 while the repository is being
+repacked on the server side to update the cached-pack data.</p>
+
+<h2>Updates</h2>
+
+<p>Information about each ~1 MiB chunk of pack data received over the
+network is stored as a unique column in the <code>chunk-info</code>
+column family.</p>
+
+<p>Most pushes are at least 2 chunks (commit, tree), with 50 pushes
+per repository per day being possible (50,000 new cells/day).</p>
+
+<p><b>TODO:</b> Average push rates?</p>
+
+<h2>Reads</h2>
+
+<p><i>Serving clients:</i> Read all cells in the
+<code>cached-pack</code> column family, typically only 1-5 cells. The
+cells are cached in memcache and read from there first.</p>
+
+<p><i>Garbage collection:</i> Read all cells in the
+<code>chunk-info</code> column family to determine which chunks are
+owned by this repository, without scanning the <a href="#CHUNK">CHUNK</a> table.
+Delete <code>chunk-info</code> after the corresponding <a href="#CHUNK">CHUNK</a>
+row has been deleted. Unchanged chunks have their info left alone.</p>
+
+<a name="REF"><h2>Table REF</h2></a>
+
+<p>Associates a human readable branch (e.g.
+<code>refs/heads/master</code>) or tag (e.g.
+<code>refs/tags/v1.0</code>) name to the Git
+object that represents that current state of
+the repository.</p>
+
+<h3>Row Key</h3>
+<div class='rowkey'>
+ <div>
+ <span class='header'>Row Key:</span>
+ <span class='var'>repository_id</span>
+ <span class='lit'>:</span>
+ <span class='var'>ref_name</span>
+ </div>
+
+ <p>Variables:</p>
+ <ul>
+ <li><span class='var'>repository_id</span> = Repository owning the reference (see above)</li>
+ <li><span class='var'>ref_name</span> = Name of the reference, UTF-8 string</li>
+ </ul>
+
+ <p>Examples:</p>
+ <ul>
+ <li><span class='example'>80000000:HEAD</span></li>
+ <li><span class='example'>80000000:refs/heads/master</span></li>
+ <br />
+ <li><span class='example'>40000000:HEAD</span></li>
+ <li><span class='example'>40000000:refs/heads/master</span></li>
+ </ul>
+</div>
+
+<p>The separator <code>:</code> used in the row key was chosen because
+this character is not permitted in a Git reference name.</p>
+
+<h3>Columns</h3>
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="REF.target"><span class='lit'>target:</span></a>
+ </div>
+
+ <p>Cell value is the protocol message
+ <a href="#message_RefData">RefData</a> describing the
+ current SHA-1 the reference points to, and the chunk
+ it was last observed in. The chunk hint allows skipping
+ a read of <a href="#OBJECT_INDEX">OBJECT_INDEX</a>.</p>
+
+ <p>Several versions (5) are stored for emergency rollbacks.
+ Additional versions beyond 5 are cleaned up during table garbage
+ collection as managed by the DHT's cell GC.</p>
+</div>
+
+<h3>Size Estimate</h3>
+
+<p><i>Normal Git usage:</i> ~10 branches per repository, ~200 tags.
+For 1,000 repositories, about 200,000 rows total. Average row size is
+about 240 bytes/row before compression (67 after), or 48M total.</p>
+
+<p><i>Gerrit Code Review usage:</i> More than 300 new rows per day.
+Each snapshot of each change under review is one reference.</p>
+
+<h3>Updates</h3>
+
+<p>Writes are performed by doing an atomic compare-and-swap (through a
+transaction), changing the RefData protocol buffer.</p>
+
+<h3>Reads</h3>
+
+<p>Reads perform prefix scan for all rows starting with
+<code>repository_id:</code>. Plans exist to cache these reads within a
+custom service, avoiding most DHT queries.</p>
+
+<a name="OBJECT_INDEX"><h2>Table OBJECT_INDEX</h2></a>
+
+<p>The Git network protocol has clients sending object SHA-1s to the
+server, with no additional context or information. End-users may also
+type a SHA-1 into a web search box. This table provides a mapping of
+the object SHA-1 to which chunk(s) store the object's data. The table
+is sometimes also called the 'global index', since it names where
+every single object is stored.</p>
+
+<h3>Row Key</h3>
+<div class='rowkey'>
+ <div>
+ <span class='header'>Row Key:</span>
+ <span class='var'>NN</span>
+ <span class='lit'>.</span>
+ <span class='var'>repository_id</span>
+ <span class='lit'>.</span>
+ <span class='var'>NNx40</span>
+ </div>
+
+ <p>Variables:</p>
+ <ul>
+ <li><span class='var'>NN</span> = First 2 hex digits of object SHA-1</li>
+ <li><span class='var'>repository_id</span> = Repository owning the object (see above)</li>
+ <li><span class='var'>NNx40</span> = Complete object SHA-1 name, in hex</li>
+ </ul>
+
+ <p>Examples:</p>
+ <ul>
+ <li><span class='example'>2b.80000000.2b5c9037c81c38b3b9abc29a3a87a4abcd665ed4</span></li>
+ <li><span class='example'>8f.40000000.8f270a441569b127cc4af8a6ef601d94d9490efb</span></li>
+ </ul>
+</div>
+
+<p>The first 2 hex digits (<code>NN</code>) distribute object keys
+within the same repository around the DHT keyspace, preventing a busy
+repository from creating too much of a hot-spot within the DHT. To
+simplify key generation, these 2 digits are repeated after the
+repository_id, as part of the 40 hex digit object name.</p>
+
+<p>Keys must be clustered by repository_id to support extending
+abbreviations. End-users may supply an abbreviated SHA-1 of 4 or more
+digits (up to 39) and ask the server to complete them to a full 40
+digit SHA-1 if the server has the relevant object within the
+repository's object set.</p>
+
+<p>A schema variant that did not include the repository_id as part of
+the row key was considered, but discarded because completing a short
+4-6 digit abbreviated SHA-1 would be impractical once there were
+billions of objects stored in the DHT. Git end-users expect to be able
+to use 4 or 6 digit abbreviations on very small repositories, as the
+number of objects is low and thus the number of bits required to
+uniquely name an object within that object set is small.</p>
+
+<h3>Columns</h3>
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="OBJECT_INDEX.info"><span class='lit'>info:</span></a>
+ <span class='var'>chunk_key[short]</span>
+ </div>
+
+ <p>Cell value is the protocol message
+ <a href="#message_ObjectInfo">ObjectInfo</a> describing how the object
+ named by the row key is stored in the chunk named by the column name.</p>
+
+ <p>Cell timestamp matters. The <b>oldest cell</b> within the
+ entire column family is favored during reads. As chunk_key is
+ unique, versions within a single column aren't relevant.</p>
+</div>
+
+<h3>Size Estimate</h3>
+
+<p>Average row size per object/chunk pair is 144 bytes uncompressed
+(87 compressed), based on the linux-2.6 repository. The linux-2.6
+repository has 1.8 million objects, and is growing at a rate of about
+300,000 objects/year. Total usage for linux-2.6 is above 154M.</p>
+
+<p>Most rows contain only 1 cell, as the object appears in only 1
+chunk within that repository.</p>
+
+<p><i>Worst case:</i> 1.8 million rows/repository * 1,000 repositories
+is around 1.8 billion rows and 182G.</p>
+
+<h3>Updates</h3>
+
+<p>One write per object received over the network; typically performed
+as part of an asynchronous batch. Each batch is sized around 512 KiB
+(about 3000 rows). Because of SHA-1's uniform distribution, row keys
+are first sorted and then batched into buckets of about 3000 rows. To
+prevent too much activity from going to one table segment at a time
+the complete object list is segmented into up to 32 groups which are
+written in round-robin order.</p>
+
+<p>A full push of the linux-2.6 repository writes 1.8 million
+rows as there are 1.8 million objects in the pack stream.</p>
+
+<p>During normal insert or receive operations, each received object is
+a blind write to add one new <code>info:chunk_key[short]</code> cell
+to the row. During repack, all cells in the <code>info</code> column
+family are replaced with a single cell.</p>
+
+<h3>Reads</h3>
+
+<p>During common ancestor negotiation reads occur in batches of 64-128
+full row keys, uniformly distributed throughout the key space. Most of
+these reads are misses, the OBJECT_INDEX table does not contain the
+key offered by the client. A successful negotation for most developers
+requires at least two rounds of 64 objects back-to-back over HTTP. Due
+to the high miss rate on this table, an in-memory bloom filter may be
+important for performance.</p>
+
+<p>To support the high read-rate (and high miss-rate) during common
+ancestor negotation, an alternative to an in-memory bloom filter
+within the DHT is to downoad the entire set of keys into an alternate
+service job for recently accessed repositories. This service can only
+be used if <i>all</i> of the keys for the same repository_id are
+hosted within the service. Given this is under 36 GiB for the worst
+case 1.8 billion rows mentioned above, this may be feasible. Loading
+the table can be performed by fetching <a
+href="#REPOSITORY.chunk-info">REPOSITORY.chunk-info</a> and then
+performing parallel gets for the <a
+href="#CHUNK.index">CHUNK.index</a> column, and scanning the local
+indexes to construct the list of known objects.</p>
+
+<p>During repacking with no delta reuse, worst case scenario requires
+reading all records with the same repository_id (for linux-2.6 this
+is 1.8 million rows). Reads are made in a configurable batch size,
+right now this is set at 2048 keys/batch, with 4 concurrent batches in
+flight at a time.</p>
+
+<p>Reads are tried first against memcache, then against the DHT if the
+entry did not exist in memcache. Successful reads against the DHT are
+put back into memcache in the background.</p>
+
+<a name="CHUNK"><h2>Table CHUNK</h2></a>
+
+<p>Stores the object data for a repository, containing commit history,
+directory structure, and file revisions. Each chunk is typically 1 MiB
+in size, excluding the index and meta columns.</p>
+
+<h3>Row Key</h3>
+<div class='rowkey'>
+ <div>
+ <span class='header'>Row Key:</span>
+ <span class='var'>HH</span>
+ <span class='lit'>.</span>
+ <span class='var'>repository_id</span>
+ <span class='lit'>.</span>
+ <span class='var'>HHx40</span>
+ </div>
+
+ <p>Variables:</p>
+ <ul>
+ <li><span class='var'>HH</span> = First 2 hex digits of chunk SHA-1</li>
+ <li><span class='var'>repository_id</span> = Repository owning the chunk (see above)</li>
+ <li><span class='var'>HHx40</span> = Complete chunk SHA-1, in hex</li>
+ </ul>
+
+ <p>Examples:</p>
+ <ul>
+ <li><span class='example'>09.80000000.09e0eb57543be633b004b672cbebdf335aa4d53f</span> <i>(full key)</i></li>
+ </ul>
+</div>
+
+<p>Chunk keys are computed by first computing the SHA-1 of the
+<code>chunk:</code> column, which is the compressed object contents
+stored within the chunk. As the chunk data includes a 32 bit salt in
+the trailing 4 bytes, this value is random even for the exact same
+object input.</p>
+
+<p>The leading 2 hex digit <code>HH</code> component distributes
+chunks for the same repository (and over the same time period) evenly
+around the DHT keyspace, preventing any portion from becoming too
+hot.</p>
+
+<h3>Columns</h3>
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="CHUNK.chunk"><span class='lit'>chunk:</span></a>
+ </div>
+
+ <p>Multiple objects in Git pack-file format, about 1 MiB in size.
+ The data is already very highly compressed by Git and is not further
+ compressable by the DHT.</p>
+</div>
+
+<p>This column is essentially the standard Git pack-file format,
+without the standard header or trailer. Objects can be stored in
+either whole format (object content is simply deflated inline)
+or in delta format (reference to a delta base is followed by
+deflated sequence of copy and/or insert instructions to recreate
+the object content). The OBJ_OFS_DELTA format is preferred
+for deltas, since it tends to use a shorter encoding than the
+OBJ_REF_DELTA format. Offsets beyond the start of the chunk are
+actually offsets to other chunks, and must be resolved using the
+<code>meta.base_chunk.relative_start</code> field.</p>
+
+<p>Because the row key is derived from the SHA-1 of this column, the
+trailing 4 bytes is randomly generated at insertion time, to make it
+impractical for remote clients to predict the name of the chunk row.
+This allows the stream parser to bindly insert rows without first
+checking for row existance, or worry about replacing an existing
+row and causing data corruption.</p>
+
+<p>This column value is essentially opaque to the DHT.</p>
+
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="CHUNK.index"><span class='lit'>index:</span></a>
+ </div>
+
+ <p>Binary searchable table listing object SHA-1 and starting offset
+ of that object within the <code>chunk:</code> data stream. The data
+ in this index is essentially random (due to the SHA-1s stored in
+ binary) and thus is not compressable.</p>
+</div>
+
+<p>Sorted list of SHA-1s of each object that is stored in this chunk,
+along with the offset. This column allows efficient random access to
+any object within the chunk, without needing to perform a remote read
+against <a href="#OBJECT_INDEX">OBJECT_INDEX</a> table. The column is
+very useful at read time, where pointers within Git objects will
+frequently reference other objects stored in the same chunk.</p>
+
+<p>This column is sometimes called the local index, since it is local
+only to the chunk and thus differs from the global index stored in the
+<a href="#OBJECT_INDEX">OBJECT_INDEX</a> table.</p>
+
+<p>The column size is 24 bytes per object stored in the chunk. Commit
+chunks store on average 2200 commits/chunk, so a commit chunk index is
+about 52,800 bytes.</p>
+
+<p>This column value is essentially opaque to the DHT.</p>
+
+<div class='colfamily'>
+ <div>
+ <span class='header'>Column:</span>
+ <a name="CHUNK.meta"><span class='lit'>meta:</span></a>
+ </div>
+
+ <p>Cell value is the protocol message
+ <a href="#message_ChunkMeta">ChunkMeta</a> describing prefetch
+ hints, object fragmentation, and delta base locations. Unlike
+ <code>chunk:</code> and <code>index:</code>, this column is
+ somewhat compressable.</p>
+</div>
+
+<p>The meta column provides information critical for reading the
+chunk's data. (Unlike <a href="#message_ChunkInfo">ChunkInfo</a> in
+the <a href="#REPOSITORY">REPOSITORY</a> table, which is used only for
+accounting.)</p>
+
+<p>The most important element is the BaseChunk nested message,
+describing a chunk that contains a base object required to inflate
+an object that is stored in this chunk as a delta.</p>
+
+<h3>Chunk Contents</h3>
+
+<p>Chunks try to store only a single object type, however mixed object
+type chunks are supported. The rule to store only one object type per
+chunk improves data locality, reducing the number of chunks that need
+to be accessed from the DHT in order to perform a particular Git
+operation. Clustering commits together into a 'commit chunk' improves
+data locality during log/history walking operations, while clustering
+trees together into a 'tree chunk' improves data locality during the
+early stages of packing or difference generation.</p>
+
+<p>Chunks reuse the standard Git pack data format to support direct
+streaming of a chunk's <code>chunk:</code> column to clients, without
+needing to perform any data manipulation on the server. This enables
+high speed data transfer from the DHT to the client.</p>
+
+<h3>Large Object Fragmentation</h3>
+
+<p>If a chunk contains more than one object, all objects within the
+chunk must store their complete compressed form within the chunk. This
+limits an object to less than 1 MiB of compressed data.</p>
+
+<p>Larger objects whose compressed size is bigger than 1 MiB are
+fragmented into multiple chunks. The first chunk contains the object's
+pack header, and the first 1 MiB of compressed data. Subsequent data
+is stored in additional chunks. The additional chunk keys are stored
+in the <code>meta.fragment</code> field. Each chunk that is part of
+the same large object redundantly stores the exact same meta
+value.</p>
+
+<h3>Size Estimate</h3>
+
+<p>Approximately the same size if the repository was stored on the
+local filesystem. For the linux-2.6 repository (373M / 1.8 million
+objects), about 417M (373.75M in <code>chunk:</code>, 42.64M in
+<code>index:</code>, 656K in <code>meta:</code>).</p>
+
+<p>Row count is close to size / 1M (373M / 1M = 373 rows), but may be
+slightly higher (e.g. 402) due to fractional chunks on the end of
+large fragmented objects, or where the single object type rule caused a
+chunk to close before it was full.</p>
+
+<p>For the complete Android repository set, disk usage is ~13G.</p>
+
+<h3>Updates</h3>
+
+<p>This table is (mostly) append-only. Write operations blast in ~1
+MiB chunks, as the key format assures writers the new row does not
+already exist. Chunks are randomly scattered by the hashing function,
+and are not buffered very deep by writers.</p>
+
+<p><i>Interactive writes:</i> Small operations impacting only 1-5
+chunks will write all columns in a single operation. Most chunks of
+this varity will be very small, 1-10 objects per chunk and about 1-10
+KiB worth of compressed data inside of the <code>chunk:</code> column.
+This class of write represents a single change made by one developer
+that must be shared back out immediately.</p>
+
+<p><i>Large pushes:</i> Large operations impacting tens to hundreds of
+chunks will first write the <code>chunk:</code> column, then come back
+later and populate the <code>index:</code> and <code>meta:</code>
+columns once all chunks have been written. The delayed writing of
+index and meta during large operations is required because the
+information for these columns is not available until the entire data
+stream from the Git client has been received and scanned. As the Git
+server may not have sufficient memory to store all chunk data (373M or
+1G!), its written out first to free up memory.</p>
+
+<p><i>Garbage collection:</i> Chunks that are not optimally sized
+(less than the target ~1 MiB), optimally localized (too many graph
+pointers outside of the chunk), or compressed (Git found a smaller way
+to store the same content) will be replaced by first writing new
+chunks, and then deleting the old chunks.</p>
+
+<p>Worst case, this could churn as many as 402 rows and 373M worth of
+data for the linux-2.6 repository. Special consideration will be made
+to try and avoid replacing chunks whose <code>WWWW</code> key
+component is 'sufficiently old' and whose content is already
+sufficiently sized and compressed. This will help to limit churn to
+only more recently dated chunks, which are smaller in size.</p>
+
+<h3>Reads</h3>
+
+<p>All columns are read together as a unit. Memcache is checked first,
+with reads falling back to the DHT if the cache does not have the
+chunk.</p>
+
+<p>Reasonably accurate prefetching is supported through background
+threads and prefetching metadata embedded in the <a
+href="#message_CachedPackInfo">CachedPackInfo</a> and <a
+href="#message_ChunkMeta">ChunkMeta</a> protocol messages used by
+readers.</p>
+
+<hr />
+<h2>Protocol Messages</h2>
+
+<pre>
+package git_store;
+option java_package = "org.eclipse.jgit.storage.dht.proto";
+
+
+ // Entry in RefTable describing the target of the reference.
+ // Either symref *OR* target must be populated, but never both.
+ //
+<a name="message_RefData">message RefData</a> {
+ // An ObjectId with an optional hint about where it can be found.
+ //
+ message Id {
+ required string object_name = 1;
+ optional string chunk_key = 2;
+ }
+
+ // Name of another reference this reference inherits its target
+ // from. The target is inherited on-the-fly at runtime by reading
+ // the other reference. Typically only "HEAD" uses symref.
+ //
+ optional string symref = 1;
+
+ // ObjectId this reference currently points at.
+ //
+ optional Id target = 2;
+
+ // True if the correct value for peeled is stored.
+ //
+ optional bool is_peeled = 3;
+
+ // If is_peeled is true, this field is accurate. This field
+ // exists only if target points to annotated tag object, then
+ // this field stores the "object" field for that tag.
+ //
+ optional Id peeled = 4;
+}
+
+
+ // Entry in ObjectIndexTable, describes how an object appears in a chunk.
+ //
+<a name="message_ObjectInfo">message ObjectInfo</a> {
+ // Type of Git object.
+ //
+ enum ObjectType {
+ COMMIT = 1;
+ TREE = 2;
+ BLOB = 3;
+ TAG = 4;
+ }
+ optional ObjectType object_type = 1;
+
+ // Position of the object's header within its chunk.
+ //
+ required int32 offset = 2;
+
+ // Total number of compressed data bytes, not including the pack
+ // header. For fragmented objects this is the sum of all chunks.
+ //
+ required int64 packed_size = 3;
+
+ // Total number of bytes of the uncompressed object. For a
+ // delta this is the size after applying the delta onto its base.
+ //
+ required int64 inflated_size = 4;
+
+ // ObjectId of the delta base, if this object is stored as a delta.
+ // The base is stored in raw binary.
+ //
+ optional bytes delta_base = 5;
+}
+
+
+ // Describes at a high-level the information about a chunk.
+ // A repository can use this summary to determine how much
+ // data is stored, or when garbage collection should occur.
+ //
+<a name="message_ChunkInfo">message ChunkInfo</a> {
+ // Source of the chunk (what code path created it).
+ //
+ enum Source {
+ RECEIVE = 1; // Came in over the network from external source.
+ INSERT = 2; // Created in this repository (e.g. a merge).
+ REPACK = 3; // Generated during a repack of this repository.
+ }
+ optional Source source = 1;
+
+ // Type of Git object stored in this chunk.
+ //
+ enum ObjectType {
+ MIXED = 0;
+ COMMIT = 1;
+ TREE = 2;
+ BLOB = 3;
+ TAG = 4;
+ }
+ optional ObjectType object_type = 2;
+
+ // True if this chunk is a member of a fragmented object.
+ //
+ optional bool is_fragment = 3;
+
+ // If present, key of the CachedPackInfo object
+ // that this chunk is a member of.
+ //
+ optional string cached_pack_key = 4;
+
+ // Summary description of the objects stored here.
+ //
+ message ObjectCounts {
+ // Number of objects stored in this chunk.
+ //
+ optional int32 total = 1;
+
+ // Number of objects stored in whole (non-delta) form.
+ //
+ optional int32 whole = 2;
+
+ // Number of objects stored in OFS_DELTA format.
+ // The delta base appears in the same chunk, or
+ // may appear in an earlier chunk through the
+ // ChunkMeta.base_chunk link.
+ //
+ optional int32 ofs_delta = 3;
+
+ // Number of objects stored in REF_DELTA format.
+ // The delta base is at an unknown location.
+ //
+ optional int32 ref_delta = 4;
+ }
+ optional ObjectCounts object_counts = 5;
+
+ // Size in bytes of the chunk's compressed data column.
+ //
+ optional int32 chunk_size = 6;
+
+ // Size in bytes of the chunk's index.
+ //
+ optional int32 index_size = 7;
+
+ // Size in bytes of the meta information.
+ //
+ optional int32 meta_size = 8;
+}
+
+
+ // Describes meta information about a chunk, stored inline with it.
+ //
+<a name="message_ChunkMeta">message ChunkMeta</a> {
+ // Enumerates the other chunks this chunk depends upon by OFS_DELTA.
+ // Entries are sorted by relative_start ascending, enabling search. Thus
+ // the earliest chunk is at the end of the list.
+ //
+ message BaseChunk {
+ // Bytes between start of the base chunk and start of this chunk.
+ // Although the value is positive, its a negative offset.
+ //
+ required int64 relative_start = 1;
+ required string chunk_key = 2;
+ }
+ repeated BaseChunk base_chunk = 1;
+
+ // If this chunk is part of a fragment, key of every chunk that
+ // makes up the fragment, including this chunk.
+ //
+ repeated string fragment = 2;
+
+ // Chunks that should be prefetched if reading the current chunk.
+ //
+ message PrefetchHint {
+ repeated string edge = 1;
+ repeated string sequential = 2;
+ }
+ optional PrefetchHint commit_prefetch = 51;
+ optional PrefetchHint tree_prefetch = 52;
+}
+
+
+ // Describes a CachedPack, for efficient bulk clones.
+ //
+<a name="message_CachedPackInfo">message CachedPackInfo</a> {
+ // Unique name of the cached pack. This is the SHA-1 hash of
+ // all of the objects that make up the cached pack, sorted and
+ // in binary form. (Same rules as Git on the filesystem.)
+ //
+ required string name = 1;
+
+ // SHA-1 of all chunk keys, which are themselves SHA-1s of the
+ // raw chunk data. If any bit differs in compression (due to
+ // repacking) the version will differ.
+ //
+ required string version = 2;
+
+ // Total number of objects in the cached pack. This must be known
+ // in order to set the final resulting pack header correctly before it
+ // is sent to clients.
+ //
+ required int64 objects_total = 3;
+
+ // Number of objects stored as deltas, rather than deflated whole.
+ //
+ optional int64 objects_delta = 4;
+
+ // Total size of the chunks, in bytes, not including the chunk footer.
+ //
+ optional int64 bytes_total = 5;
+
+ // Objects this pack starts from.
+ //
+ message TipObjectList {
+ repeated string object_name = 1;
+ }
+ required TipObjectList tip_list = 6;
+
+ // Chunks, in order of occurrence in the stream.
+ //
+ message ChunkList {
+ repeated string chunk_key = 1;
+ }
+ required ChunkList chunk_list = 7;
+}
+</pre>
+
+</body>
+</html>
diff --git a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/git_store.proto b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/git_store.proto
new file mode 100644
index 0000000000..d6674055a1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/git_store.proto
@@ -0,0 +1,264 @@
+// Copyright (C) 2011, Google Inc.
+// and other copyright owners as documented in the project's IP log.
+//
+// This program and the accompanying materials are made available
+// under the terms of the Eclipse Distribution License v1.0 which
+// accompanies this distribution, is reproduced below, and is
+// available at http://www.eclipse.org/org/documents/edl-v10.php
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or
+// without modification, are permitted provided that the following
+// conditions are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// - Neither the name of the Eclipse Foundation, Inc. nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package git_store;
+option java_package = "org.eclipse.jgit.storage.dht.proto";
+
+
+ // Entry in RefTable describing the target of the reference.
+ // Either symref *OR* target must be populated, but never both.
+ //
+message RefData {
+ // An ObjectId with an optional hint about where it can be found.
+ //
+ message Id {
+ required string object_name = 1;
+ optional string chunk_key = 2;
+ }
+
+ // Name of another reference this reference inherits its target
+ // from. The target is inherited on-the-fly at runtime by reading
+ // the other reference. Typically only "HEAD" uses symref.
+ //
+ optional string symref = 1;
+
+ // ObjectId this reference currently points at.
+ //
+ optional Id target = 2;
+
+ // True if the correct value for peeled is stored.
+ //
+ optional bool is_peeled = 3;
+
+ // If is_peeled is true, this field is accurate. This field
+ // exists only if target points to annotated tag object, then
+ // this field stores the "object" field for that tag.
+ //
+ optional Id peeled = 4;
+}
+
+
+ // Entry in ObjectIndexTable, describes how an object appears in a chunk.
+ //
+message ObjectInfo {
+ // Type of Git object.
+ //
+ enum ObjectType {
+ COMMIT = 1;
+ TREE = 2;
+ BLOB = 3;
+ TAG = 4;
+ }
+ optional ObjectType object_type = 1;
+
+ // Position of the object's header within its chunk.
+ //
+ required int32 offset = 2;
+
+ // Total number of compressed data bytes, not including the pack
+ // header. For fragmented objects this is the sum of all chunks.
+ //
+ required int64 packed_size = 3;
+
+ // Total number of bytes of the uncompressed object. For a
+ // delta this is the size after applying the delta onto its base.
+ //
+ required int64 inflated_size = 4;
+
+ // ObjectId of the delta base, if this object is stored as a delta.
+ // The base is stored in raw binary.
+ //
+ optional bytes delta_base = 5;
+
+ // True if the object requires more than one chunk to be stored.
+ //
+ optional bool is_fragmented = 6;
+}
+
+
+ // Describes at a high-level the information about a chunk.
+ // A repository can use this summary to determine how much
+ // data is stored, or when garbage collection should occur.
+ //
+message ChunkInfo {
+ // Source of the chunk (what code path created it).
+ //
+ enum Source {
+ RECEIVE = 1; // Came in over the network from external source.
+ INSERT = 2; // Created in this repository (e.g. a merge).
+ REPACK = 3; // Generated during a repack of this repository.
+ }
+ optional Source source = 1;
+
+ // Type of Git object stored in this chunk.
+ //
+ enum ObjectType {
+ MIXED = 0;
+ COMMIT = 1;
+ TREE = 2;
+ BLOB = 3;
+ TAG = 4;
+ }
+ optional ObjectType object_type = 2;
+
+ // True if this chunk is a member of a fragmented object.
+ //
+ optional bool is_fragment = 3;
+
+ // If present, key of the CachedPackInfo object
+ // that this chunk is a member of.
+ //
+ optional string cached_pack_key = 4;
+
+ // Summary description of the objects stored here.
+ //
+ message ObjectCounts {
+ // Number of objects stored in this chunk.
+ //
+ optional int32 total = 1;
+
+ // Number of objects stored in whole (non-delta) form.
+ //
+ optional int32 whole = 2;
+
+ // Number of objects stored in OFS_DELTA format.
+ // The delta base appears in the same chunk, or
+ // may appear in an earlier chunk through the
+ // ChunkMeta.base_chunk link.
+ //
+ optional int32 ofs_delta = 3;
+
+ // Number of objects stored in REF_DELTA format.
+ // The delta base is at an unknown location.
+ //
+ optional int32 ref_delta = 4;
+ }
+ optional ObjectCounts object_counts = 5;
+
+ // Size in bytes of the chunk's compressed data column.
+ //
+ optional int32 chunk_size = 6;
+
+ // Size in bytes of the chunk's index.
+ //
+ optional int32 index_size = 7;
+
+ // Size in bytes of the meta information.
+ //
+ optional int32 meta_size = 8;
+}
+
+
+ // Describes meta information about a chunk, stored inline with it.
+ //
+message ChunkMeta {
+ // Enumerates the other chunks this chunk depends upon by OFS_DELTA.
+ // Entries are sorted by relative_start ascending, enabling search. Thus
+ // the earliest chunk is at the end of the list.
+ //
+ message BaseChunk {
+ // Bytes between start of the base chunk and start of this chunk.
+ // Although the value is positive, its a negative offset.
+ //
+ required int64 relative_start = 1;
+ required string chunk_key = 2;
+ }
+ repeated BaseChunk base_chunk = 1;
+
+ // If this chunk is part of a fragment, key of every chunk that
+ // makes up the fragment, including this chunk.
+ //
+ repeated string fragment = 2;
+
+ // Chunks that should be prefetched if reading the current chunk.
+ //
+ message PrefetchHint {
+ repeated string edge = 1;
+ repeated string sequential = 2;
+ }
+ optional PrefetchHint commit_prefetch = 51;
+ optional PrefetchHint tree_prefetch = 52;
+}
+
+
+ // Describes a CachedPack, for efficient bulk clones.
+ //
+message CachedPackInfo {
+ // Unique name of the cached pack. This is the SHA-1 hash of
+ // all of the objects that make up the cached pack, sorted and
+ // in binary form. (Same rules as Git on the filesystem.)
+ //
+ required string name = 1;
+
+ // SHA-1 of all chunk keys, which are themselves SHA-1s of the
+ // raw chunk data. If any bit differs in compression (due to
+ // repacking) the version will differ.
+ //
+ required string version = 2;
+
+ // Total number of objects in the cached pack. This must be known
+ // in order to set the final resulting pack header correctly before it
+ // is sent to clients.
+ //
+ required int64 objects_total = 3;
+
+ // Number of objects stored as deltas, rather than deflated whole.
+ //
+ optional int64 objects_delta = 4;
+
+ // Total size of the chunks, in bytes, not including the chunk footer.
+ //
+ optional int64 bytes_total = 5;
+
+ // Objects this pack starts from.
+ //
+ message TipObjectList {
+ repeated string object_name = 1;
+ }
+ required TipObjectList tip_list = 6;
+
+ // Chunks, in order of occurrence in the stream.
+ //
+ message ChunkList {
+ repeated string chunk_key = 1;
+ }
+ required ChunkList chunk_list = 7;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java
new file mode 100644
index 0000000000..a59e47bb86
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+/**
+ * Receives notification when an asynchronous operation has finished.
+ * <p>
+ * Many storage provider interface operations use this type to signal completion
+ * or failure status of an operation that runs asynchronously to the caller.
+ * <p>
+ * Only one of {@link #onSuccess(Object)} or {@link #onFailure(DhtException)}
+ * should be invoked.
+ *
+ * @param <T>
+ * type of object returned from the operation on success.
+ */
+public interface AsyncCallback<T> {
+ /**
+ * Notification the operation completed.
+ *
+ * @param result
+ * the result value from the operation.
+ */
+ public void onSuccess(T result);
+
+ /**
+ * Notification the operation failed.
+ *
+ * @param error
+ * a description of the error.
+ */
+ public void onFailure(DhtException error);
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java
new file mode 100644
index 0000000000..218bffc123
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.eclipse.jgit.lib.NullProgressMonitor;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ProgressMonitor;
+import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+abstract class BatchObjectLookup<T extends ObjectId> {
+ private final RepositoryKey repo;
+
+ private final Database db;
+
+ private final DhtReader reader;
+
+ private final ThreadSafeProgressMonitor progress;
+
+ private final Semaphore batches;
+
+ private final ReentrantLock resultLock;
+
+ private final AtomicReference<DhtException> error;
+
+ private final int concurrentBatches;
+
+ private final List<T> retry;
+
+ private final ArrayList<ObjectInfo> tmp;
+
+ private boolean retryMissingObjects;
+
+ private boolean cacheLoadedInfo;
+
+ BatchObjectLookup(DhtReader reader) {
+ this(reader, null);
+ }
+
+ BatchObjectLookup(DhtReader reader, ProgressMonitor monitor) {
+ this.repo = reader.getRepositoryKey();
+ this.db = reader.getDatabase();
+ this.reader = reader;
+
+ if (monitor != null && monitor != NullProgressMonitor.INSTANCE)
+ this.progress = new ThreadSafeProgressMonitor(monitor);
+ else
+ this.progress = null;
+
+ this.concurrentBatches = reader.getOptions()
+ .getObjectIndexConcurrentBatches();
+
+ this.batches = new Semaphore(concurrentBatches);
+ this.resultLock = new ReentrantLock();
+ this.error = new AtomicReference<DhtException>();
+ this.retry = new ArrayList<T>();
+ this.tmp = new ArrayList<ObjectInfo>(4);
+ }
+
+ void setRetryMissingObjects(boolean on) {
+ retryMissingObjects = on;
+ }
+
+ void setCacheLoadedInfo(boolean on) {
+ cacheLoadedInfo = on;
+ }
+
+ void select(Iterable<T> objects) throws IOException {
+ selectInBatches(Context.FAST_MISSING_OK, lookInCache(objects));
+
+ // Not all of the selection ran with fast options.
+ if (retryMissingObjects && !retry.isEmpty()) {
+ batches.release(concurrentBatches);
+ selectInBatches(Context.READ_REPAIR, retry);
+ }
+
+ if (progress != null)
+ progress.pollForUpdates();
+ }
+
+ private Iterable<T> lookInCache(Iterable<T> objects) {
+ RecentInfoCache infoCache = reader.getRecentInfoCache();
+ List<T> missing = null;
+ for (T obj : objects) {
+ List<ObjectInfo> info = infoCache.get(obj);
+ if (info != null) {
+ onResult(obj, info);
+ if (progress != null)
+ progress.update(1);
+ } else {
+ if (missing == null) {
+ if (objects instanceof List<?>)
+ missing = new ArrayList<T>(((List<?>) objects).size());
+ else
+ missing = new ArrayList<T>();
+ }
+ missing.add(obj);
+ }
+ }
+ if (missing != null)
+ return missing;
+ return Collections.emptyList();
+ }
+
+ private void selectInBatches(Context options, Iterable<T> objects)
+ throws DhtException {
+ final int batchSize = reader.getOptions()
+ .getObjectIndexBatchSize();
+
+ Map<ObjectIndexKey, T> batch = new HashMap<ObjectIndexKey, T>();
+ Iterator<T> otpItr = objects.iterator();
+ while (otpItr.hasNext()) {
+ T otp = otpItr.next();
+
+ batch.put(ObjectIndexKey.create(repo, otp), otp);
+
+ if (batch.size() < batchSize && otpItr.hasNext())
+ continue;
+
+ if (error.get() != null)
+ break;
+
+ try {
+ if (progress != null) {
+ while (!batches.tryAcquire(500, MILLISECONDS))
+ progress.pollForUpdates();
+ progress.pollForUpdates();
+ } else {
+ batches.acquire();
+ }
+ } catch (InterruptedException err) {
+ error.compareAndSet(null, new DhtTimeoutException(err));
+ break;
+ }
+
+ startQuery(options, batch);
+ batch = new HashMap<ObjectIndexKey, T>();
+ }
+
+ try {
+ if (progress != null) {
+ while (!batches.tryAcquire(concurrentBatches, 500, MILLISECONDS))
+ progress.pollForUpdates();
+ progress.pollForUpdates();
+ } else {
+ batches.acquire(concurrentBatches);
+ }
+ } catch (InterruptedException err) {
+ error.compareAndSet(null, new DhtTimeoutException(err));
+ }
+
+ if (error.get() != null)
+ throw error.get();
+
+ // Make sure retry changes are visible to us.
+ resultLock.lock();
+ resultLock.unlock();
+ }
+
+ private void startQuery(final Context context,
+ final Map<ObjectIndexKey, T> batch) {
+ final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> cb;
+
+ cb = new AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>() {
+ public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> r) {
+ resultLock.lock();
+ try {
+ processResults(context, batch, r);
+ } finally {
+ resultLock.unlock();
+ batches.release();
+ }
+ }
+
+ public void onFailure(DhtException e) {
+ error.compareAndSet(null, e);
+ batches.release();
+ }
+ };
+ db.objectIndex().get(context, batch.keySet(), cb);
+ }
+
+ private void processResults(Context context, Map<ObjectIndexKey, T> batch,
+ Map<ObjectIndexKey, Collection<ObjectInfo>> objects) {
+ for (T obj : batch.values()) {
+ Collection<ObjectInfo> matches = objects.get(obj);
+
+ if (matches == null || matches.isEmpty()) {
+ if (retryMissingObjects && context == Context.FAST_MISSING_OK)
+ retry.add(obj);
+ continue;
+ }
+
+ tmp.clear();
+ tmp.addAll(matches);
+ ObjectInfo.sort(tmp);
+ if (cacheLoadedInfo)
+ reader.getRecentInfoCache().put(obj, tmp);
+
+ onResult(obj, tmp);
+ }
+
+ if (progress != null)
+ progress.update(objects.size());
+ }
+
+ protected abstract void onResult(T obj, List<ObjectInfo> info);
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackInfo.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackInfo.java
new file mode 100644
index 0000000000..95a5857f1a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackInfo.java
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJECT_ID_STRING_LENGTH;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.eclipse.jgit.lib.ObjectId;
+
+/**
+ * Summary information about a cached pack owned by a repository.
+ */
+public class CachedPackInfo {
+ /**
+ * Parse info from the storage system.
+ *
+ * @param raw
+ * the raw encoding of the info.
+ * @return the info object.
+ */
+ public static CachedPackInfo fromBytes(byte[] raw) {
+ return fromBytes(TinyProtobuf.decode(raw));
+ }
+
+ /**
+ * Parse info from the storage system.
+ *
+ * @param d
+ * decoder for the message buffer.
+ * @return the info object.
+ */
+ public static CachedPackInfo fromBytes(TinyProtobuf.Decoder d) {
+ CachedPackInfo info = new CachedPackInfo();
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ info.name = d.stringObjectId();
+ continue;
+ case 2:
+ info.version = d.stringObjectId();
+ continue;
+ case 3:
+ info.objectsTotal = d.int64();
+ continue;
+ case 4:
+ info.objectsDelta = d.int64();
+ continue;
+ case 5:
+ info.bytesTotal = d.int64();
+ continue;
+ case 6: {
+ TinyProtobuf.Decoder m = d.message();
+ for (;;) {
+ switch (m.next()) {
+ case 0:
+ continue PARSE;
+ case 1:
+ info.tips.add(m.stringObjectId());
+ continue;
+ default:
+ m.skip();
+ continue;
+ }
+ }
+ }
+ case 7: {
+ TinyProtobuf.Decoder m = d.message();
+ for (;;) {
+ switch (m.next()) {
+ case 0:
+ continue PARSE;
+ case 1:
+ info.chunks.add(ChunkKey.fromBytes(m));
+ continue;
+ default:
+ m.skip();
+ continue;
+ }
+ }
+ }
+ default:
+ d.skip();
+ continue;
+ }
+ }
+ return info;
+ }
+
+ private static byte[] asBytes(CachedPackInfo info) {
+ int tipSize = (2 + OBJECT_ID_STRING_LENGTH) * info.tips.size();
+ TinyProtobuf.Encoder tipList = TinyProtobuf.encode(tipSize);
+ for (ObjectId tip : info.tips)
+ tipList.string(1, tip);
+
+ int chunkSize = (2 + ChunkKey.KEYLEN) * info.chunks.size();
+ TinyProtobuf.Encoder chunkList = TinyProtobuf.encode(chunkSize);
+ for (ChunkKey key : info.chunks)
+ chunkList.bytes(1, key.asBytes());
+
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(1024);
+ e.string(1, info.name);
+ e.string(2, info.version);
+ e.int64(3, info.objectsTotal);
+ e.int64IfNotZero(4, info.objectsDelta);
+ e.int64IfNotZero(5, info.bytesTotal);
+ e.message(6, tipList);
+ e.message(7, chunkList);
+ return e.asByteArray();
+ }
+
+ ObjectId name;
+
+ ObjectId version;
+
+ SortedSet<ObjectId> tips = new TreeSet<ObjectId>();
+
+ long objectsTotal;
+
+ long objectsDelta;
+
+ long bytesTotal;
+
+ List<ChunkKey> chunks = new ArrayList<ChunkKey>();
+
+ /** @return name of the information object. */
+ public CachedPackKey getRowKey() {
+ return new CachedPackKey(name, version);
+ }
+
+ /** @return number of objects stored in the cached pack. */
+ public long getObjectsTotal() {
+ return objectsTotal;
+ }
+
+ /** @return number of objects stored in delta format. */
+ public long getObjectsDelta() {
+ return objectsDelta;
+ }
+
+ /** @return number of bytes in the cached pack. */
+ public long getTotalBytes() {
+ return bytesTotal;
+ }
+
+ /** @return list of all chunks that make up this pack, in order. */
+ public List<ChunkKey> getChunkKeys() {
+ return Collections.unmodifiableList(chunks);
+ }
+
+ /**
+ * Convert this information into a byte array for storage.
+ *
+ * @return the data, encoded as a byte array. This does not include the key,
+ * callers must store that separately.
+ */
+ public byte[] asBytes() {
+ return asBytes(this);
+ }
+
+ @Override
+ public String toString() {
+ return getRowKey().toString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java
new file mode 100644
index 0000000000..0fc14f9e23
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+import java.text.MessageFormat;
+
+import org.eclipse.jgit.lib.ObjectId;
+
+/** Unique identifier of a {@link CachedPackInfo} in the DHT. */
+public final class CachedPackKey implements RowKey {
+ static final int KEYLEN = 81;
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static CachedPackKey fromBytes(byte[] key) {
+ return fromBytes(key, 0, key.length);
+ }
+
+ /**
+ * @param d
+ * decoder to read key from current field from.
+ * @return the key
+ */
+ public static CachedPackKey fromBytes(TinyProtobuf.Decoder d) {
+ int len = d.bytesLength();
+ int ptr = d.bytesOffset();
+ byte[] buf = d.bytesArray();
+ return fromBytes(buf, ptr, len);
+ }
+
+ /**
+ * @param key
+ * @param ptr
+ * @param len
+ * @return the key
+ */
+ public static CachedPackKey fromBytes(byte[] key, int ptr, int len) {
+ if (len != KEYLEN)
+ throw new IllegalArgumentException(MessageFormat.format(
+ DhtText.get().invalidChunkKey, decode(key, ptr, ptr + len)));
+
+ ObjectId name = ObjectId.fromString(key, ptr);
+ ObjectId vers = ObjectId.fromString(key, ptr + 41);
+ return new CachedPackKey(name, vers);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static CachedPackKey fromString(String key) {
+ int d = key.indexOf('.');
+ ObjectId name = ObjectId.fromString(key.substring(0, d));
+ ObjectId vers = ObjectId.fromString(key.substring(d + 1));
+ return new CachedPackKey(name, vers);
+ }
+
+ private final ObjectId name;
+
+ private final ObjectId version;
+
+ CachedPackKey(ObjectId name, ObjectId version) {
+ this.name = name;
+ this.version = version;
+ }
+
+ /** @return unique SHA-1 name of the pack. */
+ public ObjectId getName() {
+ return name;
+ }
+
+ /** @return unique version of the pack. */
+ public ObjectId getVersion() {
+ return version;
+ }
+
+ public byte[] asBytes() {
+ byte[] r = new byte[KEYLEN];
+ name.copyTo(r, 0);
+ r[40] = '.';
+ version.copyTo(r, 41);
+ return r;
+ }
+
+ public String asString() {
+ return name.name() + "." + version.name();
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof CachedPackKey) {
+ CachedPackKey key = (CachedPackKey) other;
+ return name.equals(key.name) && version.equals(key.version);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "cached-pack:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCache.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCache.java
new file mode 100644
index 0000000000..64b169fa15
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCache.java
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2008-2011, Google Inc.
+ * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.SoftReference;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReferenceArray;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.storage.dht.DhtReader.ChunkAndOffset;
+
+/**
+ * Caches recently used {@link PackChunk} in memory for faster read access.
+ * <p>
+ * During a miss, older entries are evicted from the cache so long as
+ * {@link #isFull()} returns true.
+ * <p>
+ * Its too expensive during object access to be 100% accurate with a least
+ * recently used (LRU) algorithm. Strictly ordering every read is a lot of
+ * overhead that typically doesn't yield a corresponding benefit to the
+ * application.
+ * <p>
+ * This cache implements a loose LRU policy by randomly picking a window
+ * comprised of roughly 10% of the cache, and evicting the oldest accessed entry
+ * within that window.
+ * <p>
+ * Entities created by the cache are held under SoftReferences, permitting the
+ * Java runtime's garbage collector to evict entries when heap memory gets low.
+ * Most JREs implement a loose least recently used algorithm for this eviction.
+ * <p>
+ * The internal hash table does not expand at runtime, instead it is fixed in
+ * size at cache creation time. The internal lock table used to gate load
+ * invocations is also fixed in size.
+ * <p>
+ * To maintain higher concurrency workloads, during eviction only one thread
+ * performs the eviction work, while other threads can continue to insert new
+ * objects in parallel. This means that the cache can be temporarily over limit,
+ * especially if the nominated eviction thread is being starved relative to the
+ * other threads.
+ */
+public class ChunkCache {
+ private static final Random rng = new Random();
+
+ private static volatile ChunkCache cache;
+
+ static {
+ cache = new ChunkCache(new ChunkCacheConfig());
+ }
+
+ /**
+ * Modify the configuration of the chunk cache.
+ * <p>
+ * The new configuration is applied immediately. If the new limits are
+ * smaller than what what is currently cached, older entries will be purged
+ * as soon as possible to allow the cache to meet the new limit.
+ *
+ * @param cfg
+ * the new chunk cache configuration.
+ * @throws IllegalArgumentException
+ * the cache configuration contains one or more invalid
+ * settings, usually too low of a limit.
+ */
+ public static void reconfigure(ChunkCacheConfig cfg) {
+ ChunkCache nc = new ChunkCache(cfg);
+ cache = nc;
+ }
+
+ static ChunkCache get() {
+ return cache;
+ }
+
+ /** ReferenceQueue to cleanup released and garbage collected windows. */
+ private final ReferenceQueue<PackChunk> queue;
+
+ /** Number of entries in {@link #table}. */
+ private final int tableSize;
+
+ /** Access clock for loose LRU. */
+ private final AtomicLong clock;
+
+ /** Hash bucket directory; entries are chained below. */
+ private final AtomicReferenceArray<Entry> table;
+
+ /** Locks to prevent concurrent loads for same (ChunkKey,position). */
+ private final Lock[] locks;
+
+ /** Lock to elect the eviction thread after a load occurs. */
+ private final ReentrantLock evictLock;
+
+ /** Number of {@link #table} buckets to scan for an eviction window. */
+ private final int evictBatch;
+
+ private final long maxBytes;
+
+ private final AtomicLong openBytes;
+
+ private ChunkCache(ChunkCacheConfig cfg) {
+ tableSize = tableSize(cfg);
+ final int lockCount = lockCount(cfg);
+ if (tableSize < 0)
+ throw new IllegalArgumentException();
+ if (lockCount < 0)
+ throw new IllegalArgumentException();
+
+ queue = new ReferenceQueue<PackChunk>();
+ clock = new AtomicLong(1);
+ table = new AtomicReferenceArray<Entry>(tableSize);
+ locks = new Lock[lockCount];
+ for (int i = 0; i < locks.length; i++)
+ locks[i] = new Lock();
+ evictLock = new ReentrantLock();
+
+ int eb = (int) (tableSize * .1);
+ if (64 < eb)
+ eb = 64;
+ else if (eb < 4)
+ eb = 4;
+ if (tableSize < eb)
+ eb = tableSize;
+ evictBatch = eb;
+
+ maxBytes = cfg.getChunkCacheLimit();
+ openBytes = new AtomicLong();
+ }
+
+ long getOpenBytes() {
+ return openBytes.get();
+ }
+
+ private Ref createRef(ChunkKey key, PackChunk v) {
+ final Ref ref = new Ref(key, v, queue);
+ openBytes.addAndGet(ref.size);
+ return ref;
+ }
+
+ private void clear(Ref ref) {
+ openBytes.addAndGet(-ref.size);
+ }
+
+ private boolean isFull() {
+ return maxBytes < openBytes.get();
+ }
+
+ private static int tableSize(ChunkCacheConfig cfg) {
+ final int csz = 1 * ChunkCacheConfig.MiB;
+ final long limit = cfg.getChunkCacheLimit();
+ if (limit == 0)
+ return 0;
+ if (csz <= 0)
+ throw new IllegalArgumentException();
+ if (limit < csz)
+ throw new IllegalArgumentException();
+ return (int) Math.min(5 * (limit / csz) / 2, 2000000000);
+ }
+
+ private static int lockCount(ChunkCacheConfig cfg) {
+ if (cfg.getChunkCacheLimit() == 0)
+ return 0;
+ return 32;
+ }
+
+ PackChunk get(ChunkKey chunkKey) {
+ if (tableSize == 0)
+ return null;
+ return scan(table.get(slot(chunkKey)), chunkKey);
+ }
+
+ ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) {
+ // TODO(spearce) This method violates our no-collision rules.
+ // Its possible for a duplicate object to be uploaded into a new
+ // chunk, and have that get used if the new chunk is pulled into
+ // the process cache for a different object.
+
+ for (int slot = 0; slot < tableSize; slot++) {
+ for (Entry e = table.get(slot); e != null; e = e.next) {
+ PackChunk chunk = e.ref.get();
+ if (chunk != null) {
+ int pos = chunk.findOffset(repo, objId);
+ if (0 <= pos) {
+ hit(e.ref);
+ return new ChunkAndOffset(chunk, pos);
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ PackChunk put(PackChunk chunk) {
+ if (tableSize == 0)
+ return chunk;
+
+ final ChunkKey chunkKey = chunk.getChunkKey();
+ final int slot = slot(chunkKey);
+ final Entry e1 = table.get(slot);
+ PackChunk v = scan(e1, chunkKey);
+ if (v != null)
+ return v;
+
+ synchronized (lock(chunkKey)) {
+ Entry e2 = table.get(slot);
+ if (e2 != e1) {
+ v = scan(e2, chunkKey);
+ if (v != null)
+ return v;
+ }
+
+ v = chunk;
+ final Ref ref = createRef(chunkKey, v);
+ hit(ref);
+ for (;;) {
+ final Entry n = new Entry(clean(e2), ref);
+ if (table.compareAndSet(slot, e2, n))
+ break;
+ e2 = table.get(slot);
+ }
+ }
+
+ if (evictLock.tryLock()) {
+ try {
+ gc();
+ evict();
+ } finally {
+ evictLock.unlock();
+ }
+ }
+
+ return v;
+ }
+
+ private PackChunk scan(Entry n, ChunkKey chunk) {
+ for (; n != null; n = n.next) {
+ Ref r = n.ref;
+ if (r.chunk.equals(chunk)) {
+ PackChunk v = r.get();
+ if (v != null) {
+ hit(r);
+ return v;
+ }
+ n.kill();
+ break;
+ }
+ }
+ return null;
+ }
+
+ private void hit(final Ref r) {
+ // We don't need to be 100% accurate here. Its sufficient that at least
+ // one thread performs the increment. Any other concurrent access at
+ // exactly the same time can simply use the same clock value.
+ //
+ // Consequently we attempt the set, but we don't try to recover should
+ // it fail. This is why we don't use getAndIncrement() here.
+ //
+ final long c = clock.get();
+ clock.compareAndSet(c, c + 1);
+ r.lastAccess = c;
+ }
+
+ private void evict() {
+ while (isFull()) {
+ int ptr = rng.nextInt(tableSize);
+ Entry old = null;
+ int slot = 0;
+ for (int b = evictBatch - 1; b >= 0; b--, ptr++) {
+ if (tableSize <= ptr)
+ ptr = 0;
+ for (Entry e = table.get(ptr); e != null; e = e.next) {
+ if (e.dead)
+ continue;
+ if (old == null || e.ref.lastAccess < old.ref.lastAccess) {
+ old = e;
+ slot = ptr;
+ }
+ }
+ }
+ if (old != null) {
+ old.kill();
+ gc();
+ final Entry e1 = table.get(slot);
+ table.compareAndSet(slot, e1, clean(e1));
+ }
+ }
+ }
+
+ private void gc() {
+ Ref r;
+ while ((r = (Ref) queue.poll()) != null) {
+ // Sun's Java 5 and 6 implementation have a bug where a Reference
+ // can be enqueued and dequeued twice on the same reference queue
+ // due to a race condition within ReferenceQueue.enqueue(Reference).
+ //
+ // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6837858
+ //
+ // We CANNOT permit a Reference to come through us twice, as it will
+ // skew the resource counters we maintain. Our canClear() check here
+ // provides a way to skip the redundant dequeues, if any.
+ //
+ if (r.canClear()) {
+ clear(r);
+
+ boolean found = false;
+ final int s = slot(r.chunk);
+ final Entry e1 = table.get(s);
+ for (Entry n = e1; n != null; n = n.next) {
+ if (n.ref == r) {
+ n.dead = true;
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ table.compareAndSet(s, e1, clean(e1));
+ }
+ }
+ }
+
+ private int slot(ChunkKey chunk) {
+ return (chunk.hashCode() >>> 1) % tableSize;
+ }
+
+ private Lock lock(ChunkKey chunk) {
+ return locks[(chunk.hashCode() >>> 1) % locks.length];
+ }
+
+ private static Entry clean(Entry top) {
+ while (top != null && top.dead) {
+ top.ref.enqueue();
+ top = top.next;
+ }
+ if (top == null)
+ return null;
+ final Entry n = clean(top.next);
+ return n == top.next ? top : new Entry(n, top.ref);
+ }
+
+ private static class Entry {
+ /** Next entry in the hash table's chain list. */
+ final Entry next;
+
+ /** The referenced object. */
+ final Ref ref;
+
+ /**
+ * Marked true when ref.get() returns null and the ref is dead.
+ * <p>
+ * A true here indicates that the ref is no longer accessible, and that
+ * we therefore need to eventually purge this Entry object out of the
+ * bucket's chain.
+ */
+ volatile boolean dead;
+
+ Entry(final Entry n, final Ref r) {
+ next = n;
+ ref = r;
+ }
+
+ final void kill() {
+ dead = true;
+ ref.enqueue();
+ }
+ }
+
+ /** A soft reference wrapped around a cached object. */
+ private static class Ref extends SoftReference<PackChunk> {
+ final ChunkKey chunk;
+
+ final int size;
+
+ long lastAccess;
+
+ private boolean cleared;
+
+ Ref(ChunkKey chunk, PackChunk v, ReferenceQueue<PackChunk> queue) {
+ super(v, queue);
+ this.chunk = chunk;
+ this.size = v.getTotalSize();
+ }
+
+ final synchronized boolean canClear() {
+ if (cleared)
+ return false;
+ cleared = true;
+ return true;
+ }
+ }
+
+ private static final class Lock {
+ // Used only for its implicit monitor.
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCacheConfig.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCacheConfig.java
new file mode 100644
index 0000000000..3880506cf5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkCacheConfig.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.lib.Config;
+
+/** Configuration parameters for {@link ChunkCache}. */
+public class ChunkCacheConfig {
+ /** 1024 (number of bytes in one kibibyte/kilobyte) */
+ public static final int KiB = 1024;
+
+ /** 1024 {@link #KiB} (number of bytes in one mebibyte/megabyte) */
+ public static final int MiB = 1024 * KiB;
+
+ private long chunkCacheLimit;
+
+ /** Create a default configuration. */
+ public ChunkCacheConfig() {
+ setChunkCacheLimit(10 * MiB);
+ }
+
+ /**
+ * @return maximum number bytes of heap memory to dedicate to caching pack
+ * file data. If the limit is configured to 0, the chunk cache is
+ * disabled. <b>Default is 10 MB.</b>
+ */
+ public long getChunkCacheLimit() {
+ return chunkCacheLimit;
+ }
+
+ /**
+ * @param newLimit
+ * maximum number bytes of heap memory to dedicate to caching
+ * pack file data.
+ * @return {@code this}
+ */
+ public ChunkCacheConfig setChunkCacheLimit(final long newLimit) {
+ chunkCacheLimit = Math.max(0, newLimit);
+ return this;
+ }
+
+ /**
+ * Update properties by setting fields from the configuration.
+ * <p>
+ * If a property is not defined in the configuration, then it is left
+ * unmodified.
+ *
+ * @param rc
+ * configuration to read properties from.
+ * @return {@code this}
+ */
+ public ChunkCacheConfig fromConfig(final Config rc) {
+ setChunkCacheLimit(rc.getLong("core", "dht", "chunkCacheLimit", getChunkCacheLimit()));
+ return this;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java
new file mode 100644
index 0000000000..27c520bc9c
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.Deflater;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.storage.dht.ChunkMeta.BaseChunk;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.transport.PackedObjectInfo;
+import org.eclipse.jgit.util.NB;
+
+/**
+ * Formats one {@link PackChunk} for storage in the DHT.
+ * <p>
+ * Each formatter instance can be used only once.
+ */
+class ChunkFormatter {
+ static final int TRAILER_SIZE = 4;
+
+ private final RepositoryKey repo;
+
+ private final DhtInserterOptions options;
+
+ private final byte[] varIntBuf;
+
+ private final ChunkInfo info;
+
+ private final int maxObjects;
+
+ private Map<ChunkKey, BaseChunkInfo> baseChunks;
+
+ private List<StoredObject> objectList;
+
+ private byte[] chunkData;
+
+ private int ptr;
+
+ private int mark;
+
+ private int currentObjectType;
+
+ private BaseChunkInfo currentObjectBase;
+
+ private PackChunk.Members builder;
+
+ ChunkFormatter(RepositoryKey repo, DhtInserterOptions options) {
+ this.repo = repo;
+ this.options = options;
+ this.varIntBuf = new byte[32];
+ this.info = new ChunkInfo();
+ this.chunkData = new byte[options.getChunkSize()];
+ this.maxObjects = options.getMaxObjectCount();
+ }
+
+ void setSource(ChunkInfo.Source src) {
+ info.source = src;
+ }
+
+ void setObjectType(int type) {
+ info.objectType = type;
+ }
+
+ void setFragment() {
+ info.fragment = true;
+ }
+
+ ChunkKey getChunkKey() {
+ return getChunkInfo().getChunkKey();
+ }
+
+ ChunkInfo getChunkInfo() {
+ return info;
+ }
+
+ ChunkMeta getChunkMeta() {
+ return builder.getMeta();
+ }
+
+ PackChunk getPackChunk() throws DhtException {
+ return builder.build();
+ }
+
+ void setChunkIndex(List<PackedObjectInfo> objs) {
+ builder.setChunkIndex(ChunkIndex.create(objs));
+ }
+
+ ChunkKey end(MessageDigest md) {
+ if (md == null)
+ md = Constants.newMessageDigest();
+
+ // Embed a small amount of randomness into the chunk content,
+ // and thus impact its name. This prevents malicious clients from
+ // being able to predict what a chunk is called, which keeps them
+ // from replacing an existing chunk.
+ //
+ chunkData = cloneArray(chunkData, ptr + TRAILER_SIZE);
+ NB.encodeInt32(chunkData, ptr, options.nextChunkSalt());
+ ptr += 4;
+
+ md.update(chunkData, 0, ptr);
+ info.chunkKey = ChunkKey.create(repo, ObjectId.fromRaw(md.digest()));
+ info.chunkSize = chunkData.length;
+
+ builder = new PackChunk.Members();
+ builder.setChunkKey(info.chunkKey);
+ builder.setChunkData(chunkData);
+
+ ChunkMeta meta = new ChunkMeta(info.chunkKey);
+ if (baseChunks != null) {
+ meta.baseChunks = new ArrayList<BaseChunk>(baseChunks.size());
+ for (BaseChunkInfo b : baseChunks.values()) {
+ if (0 < b.useCount)
+ meta.baseChunks.add(new BaseChunk(b.relativeStart, b.key));
+ }
+ Collections.sort(meta.baseChunks, new Comparator<BaseChunk>() {
+ public int compare(BaseChunk a, BaseChunk b) {
+ return Long.signum(a.relativeStart - b.relativeStart);
+ }
+ });
+ }
+ if (!meta.isEmpty()) {
+ builder.setMeta(meta);
+ info.metaSize = meta.asBytes().length;
+ }
+
+ if (objectList != null && !objectList.isEmpty()) {
+ byte[] index = ChunkIndex.create(objectList);
+ builder.setChunkIndex(index);
+ info.indexSize = index.length;
+ }
+
+ return getChunkKey();
+ }
+
+ /**
+ * Safely put the chunk to the database.
+ * <p>
+ * This method is slow. It first puts the chunk info, waits for success,
+ * then puts the chunk itself, waits for success, and finally queues up the
+ * object index with its chunk links in the supplied buffer.
+ *
+ * @param db
+ * @param dbWriteBuffer
+ * @throws DhtException
+ */
+ void safePut(Database db, WriteBuffer dbWriteBuffer) throws DhtException {
+ WriteBuffer chunkBuf = db.newWriteBuffer();
+
+ db.repository().put(repo, info, chunkBuf);
+ chunkBuf.flush();
+
+ db.chunk().put(builder, chunkBuf);
+ chunkBuf.flush();
+
+ linkObjects(db, dbWriteBuffer);
+ }
+
+ void unsafePut(Database db, WriteBuffer dbWriteBuffer) throws DhtException {
+ db.repository().put(repo, info, dbWriteBuffer);
+ db.chunk().put(builder, dbWriteBuffer);
+ linkObjects(db, dbWriteBuffer);
+ }
+
+ private void linkObjects(Database db, WriteBuffer dbWriteBuffer)
+ throws DhtException {
+ if (objectList != null && !objectList.isEmpty()) {
+ for (StoredObject obj : objectList) {
+ db.objectIndex().add(ObjectIndexKey.create(repo, obj),
+ obj.link(getChunkKey()), dbWriteBuffer);
+ }
+ }
+ }
+
+ boolean whole(Deflater def, int type, byte[] data, int off, final int size,
+ ObjectId objId) {
+ if (free() < 10 || maxObjects <= info.objectsTotal)
+ return false;
+
+ header(type, size);
+ info.objectsWhole++;
+ currentObjectType = type;
+
+ int endOfHeader = ptr;
+ def.setInput(data, off, size);
+ def.finish();
+ do {
+ int left = free();
+ if (left == 0) {
+ rollback();
+ return false;
+ }
+
+ int n = def.deflate(chunkData, ptr, left);
+ if (n == 0) {
+ rollback();
+ return false;
+ }
+
+ ptr += n;
+ } while (!def.finished());
+
+ if (objectList == null)
+ objectList = new ArrayList<StoredObject>();
+
+ final int packedSize = ptr - endOfHeader;
+ objectList.add(new StoredObject(objId, type, mark, packedSize, size));
+
+ if (info.objectType < 0)
+ info.objectType = type;
+ else if (info.objectType != type)
+ info.objectType = ChunkInfo.OBJ_MIXED;
+
+ return true;
+ }
+
+ boolean whole(int type, long inflatedSize) {
+ if (free() < 10 || maxObjects <= info.objectsTotal)
+ return false;
+
+ header(type, inflatedSize);
+ info.objectsWhole++;
+ currentObjectType = type;
+ return true;
+ }
+
+ boolean ofsDelta(long inflatedSize, long negativeOffset) {
+ final int ofsPtr = encodeVarInt(negativeOffset);
+ final int ofsLen = varIntBuf.length - ofsPtr;
+ if (free() < 10 + ofsLen || maxObjects <= info.objectsTotal)
+ return false;
+
+ header(Constants.OBJ_OFS_DELTA, inflatedSize);
+ info.objectsOfsDelta++;
+ currentObjectType = Constants.OBJ_OFS_DELTA;
+ currentObjectBase = null;
+
+ if (append(varIntBuf, ofsPtr, ofsLen))
+ return true;
+
+ rollback();
+ return false;
+ }
+
+ boolean refDelta(long inflatedSize, AnyObjectId baseId) {
+ if (free() < 30 || maxObjects <= info.objectsTotal)
+ return false;
+
+ header(Constants.OBJ_REF_DELTA, inflatedSize);
+ info.objectsRefDelta++;
+ currentObjectType = Constants.OBJ_REF_DELTA;
+
+ baseId.copyRawTo(chunkData, ptr);
+ ptr += 20;
+ return true;
+ }
+
+ void useBaseChunk(long relativeStart, ChunkKey baseChunkKey) {
+ if (baseChunks == null)
+ baseChunks = new HashMap<ChunkKey, BaseChunkInfo>();
+
+ BaseChunkInfo base = baseChunks.get(baseChunkKey);
+ if (base == null) {
+ base = new BaseChunkInfo(relativeStart, baseChunkKey);
+ baseChunks.put(baseChunkKey, base);
+ }
+ base.useCount++;
+ currentObjectBase = base;
+ }
+
+ void appendDeflateOutput(Deflater def) {
+ while (!def.finished()) {
+ int left = free();
+ if (left == 0)
+ return;
+ int n = def.deflate(chunkData, ptr, left);
+ if (n == 0)
+ return;
+ ptr += n;
+ }
+ }
+
+ boolean append(byte[] data, int off, int len) {
+ if (free() < len)
+ return false;
+
+ System.arraycopy(data, off, chunkData, ptr, len);
+ ptr += len;
+ return true;
+ }
+
+ boolean isEmpty() {
+ return ptr == 0;
+ }
+
+ int getObjectCount() {
+ return info.objectsTotal;
+ }
+
+ int position() {
+ return ptr;
+ }
+
+ int size() {
+ return ptr;
+ }
+
+ int free() {
+ return (chunkData.length - TRAILER_SIZE) - ptr;
+ }
+
+ byte[] getRawChunkDataArray() {
+ return chunkData;
+ }
+
+ int getCurrentObjectType() {
+ return currentObjectType;
+ }
+
+ void rollback() {
+ ptr = mark;
+ adjustObjectCount(-1, currentObjectType);
+ }
+
+ void adjustObjectCount(int delta, int type) {
+ info.objectsTotal += delta;
+
+ switch (type) {
+ case Constants.OBJ_COMMIT:
+ case Constants.OBJ_TREE:
+ case Constants.OBJ_BLOB:
+ case Constants.OBJ_TAG:
+ info.objectsWhole += delta;
+ break;
+
+ case Constants.OBJ_OFS_DELTA:
+ info.objectsOfsDelta += delta;
+ if (currentObjectBase != null && --currentObjectBase.useCount == 0)
+ baseChunks.remove(currentObjectBase.key);
+ currentObjectBase = null;
+ break;
+
+ case Constants.OBJ_REF_DELTA:
+ info.objectsRefDelta += delta;
+ break;
+ }
+ }
+
+ private void header(int type, long inflatedSize) {
+ mark = ptr;
+ info.objectsTotal++;
+
+ long nextLength = inflatedSize >>> 4;
+ chunkData[ptr++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (inflatedSize & 0x0F));
+ inflatedSize = nextLength;
+ while (inflatedSize > 0) {
+ nextLength >>>= 7;
+ chunkData[ptr++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (inflatedSize & 0x7F));
+ inflatedSize = nextLength;
+ }
+ }
+
+ private int encodeVarInt(long value) {
+ int n = varIntBuf.length - 1;
+ varIntBuf[n] = (byte) (value & 0x7F);
+ while ((value >>= 7) > 0)
+ varIntBuf[--n] = (byte) (0x80 | (--value & 0x7F));
+ return n;
+ }
+
+ private static byte[] cloneArray(byte[] src, int len) {
+ byte[] dst = new byte[len];
+ System.arraycopy(src, 0, dst, 0, len);
+ return dst;
+ }
+
+ private static class BaseChunkInfo {
+ final long relativeStart;
+
+ final ChunkKey key;
+
+ int useCount;
+
+ BaseChunkInfo(long relativeStart, ChunkKey key) {
+ this.relativeStart = relativeStart;
+ this.key = key;
+ }
+ }
+
+ private static class StoredObject extends PackedObjectInfo {
+ private final int type;
+
+ private final int packed;
+
+ private final int inflated;
+
+ StoredObject(AnyObjectId id, int type, int offset, int packed, int size) {
+ super(id);
+ setOffset(offset);
+ this.type = type;
+ this.packed = packed;
+ this.inflated = size;
+ }
+
+ ObjectInfo link(ChunkKey key) {
+ final int ptr = (int) getOffset();
+ return new ObjectInfo(key, -1, type, ptr, packed, inflated, null, false);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java
new file mode 100644
index 0000000000..91a2d0efcf
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.text.MessageFormat;
+import java.util.Collections;
+import java.util.List;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import static org.eclipse.jgit.lib.Constants.*;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.transport.PackedObjectInfo;
+import org.eclipse.jgit.util.NB;
+
+/** Index into a {@link PackChunk}. */
+public abstract class ChunkIndex {
+ private static final int V1 = 0x01;
+
+ static ChunkIndex fromBytes(ChunkKey key, byte[] index, int ptr, int len)
+ throws DhtException {
+ int v = index[ptr] & 0xff;
+ switch (v) {
+ case V1: {
+ final int offsetFormat = index[ptr + 1] & 7;
+ switch (offsetFormat) {
+ case 1:
+ return new Offset1(index, ptr, len, key);
+ case 2:
+ return new Offset2(index, ptr, len, key);
+ case 3:
+ return new Offset3(index, ptr, len, key);
+ case 4:
+ return new Offset4(index, ptr, len, key);
+ default:
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().unsupportedChunkIndex,
+ Integer.toHexString(NB.decodeUInt16(index, ptr)), key));
+ }
+ }
+ default:
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().unsupportedChunkIndex,
+ Integer.toHexString(v), key));
+ }
+ }
+
+ /**
+ * Format the chunk index and return its binary representation.
+ *
+ * @param list
+ * the list of objects that appear in the chunk. This list will
+ * be sorted in-place if it has more than 1 element.
+ * @return binary representation of the chunk's objects and their starting
+ * offsets. The format is private to this class.
+ */
+ @SuppressWarnings("null")
+ static byte[] create(List<? extends PackedObjectInfo> list) {
+ int cnt = list.size();
+ sortObjectList(list);
+
+ int fanoutFormat = 0;
+ int[] buckets = null;
+ if (64 < cnt) {
+ buckets = new int[256];
+ for (PackedObjectInfo oe : list)
+ buckets[oe.getFirstByte()]++;
+ fanoutFormat = selectFanoutFormat(buckets);
+ }
+
+ int offsetFormat = selectOffsetFormat(list);
+ byte[] index = new byte[2 // header
+ + 256 * fanoutFormat // (optional) fanout
+ + cnt * OBJECT_ID_LENGTH // ids
+ + cnt * offsetFormat // offsets
+ ];
+ index[0] = V1;
+ index[1] = (byte) ((fanoutFormat << 3) | offsetFormat);
+
+ int ptr = 2;
+
+ switch (fanoutFormat) {
+ case 0:
+ break;
+ case 1:
+ for (int i = 0; i < 256; i++, ptr++)
+ index[ptr] = (byte) buckets[i];
+ break;
+ case 2:
+ for (int i = 0; i < 256; i++, ptr += 2)
+ NB.encodeInt16(index, ptr, buckets[i]);
+ break;
+ case 3:
+ for (int i = 0; i < 256; i++, ptr += 3)
+ encodeUInt24(index, ptr, buckets[i]);
+ break;
+ case 4:
+ for (int i = 0; i < 256; i++, ptr += 4)
+ NB.encodeInt32(index, ptr, buckets[i]);
+ break;
+ }
+
+ for (PackedObjectInfo oe : list) {
+ oe.copyRawTo(index, ptr);
+ ptr += OBJECT_ID_LENGTH;
+ }
+
+ switch (offsetFormat) {
+ case 1:
+ for (PackedObjectInfo oe : list)
+ index[ptr++] = (byte) oe.getOffset();
+ break;
+
+ case 2:
+ for (PackedObjectInfo oe : list) {
+ NB.encodeInt16(index, ptr, (int) oe.getOffset());
+ ptr += 2;
+ }
+ break;
+
+ case 3:
+ for (PackedObjectInfo oe : list) {
+ encodeUInt24(index, ptr, (int) oe.getOffset());
+ ptr += 3;
+ }
+ break;
+
+ case 4:
+ for (PackedObjectInfo oe : list) {
+ NB.encodeInt32(index, ptr, (int) oe.getOffset());
+ ptr += 4;
+ }
+ break;
+ }
+
+ return index;
+ }
+
+ private static int selectFanoutFormat(int[] buckets) {
+ int fmt = 1;
+ int max = 1 << (8 * fmt);
+
+ for (int cnt : buckets) {
+ while (max <= cnt && fmt < 4) {
+ if (++fmt == 4)
+ return fmt;
+ max = 1 << (8 * fmt);
+ }
+ }
+ return fmt;
+ }
+
+ private static int selectOffsetFormat(List<? extends PackedObjectInfo> list) {
+ int fmt = 1;
+ int max = 1 << (8 * fmt);
+
+ for (PackedObjectInfo oe : list) {
+ while (max <= oe.getOffset() && fmt < 4) {
+ if (++fmt == 4)
+ return fmt;
+ max = 1 << (8 * fmt);
+ }
+ }
+ return fmt;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void sortObjectList(List<? extends PackedObjectInfo> list) {
+ Collections.sort(list);
+ }
+
+ private final byte[] indexBuf;
+
+ private final int indexPtr;
+
+ private final int indexLen;
+
+ private final int[] fanout;
+
+ private final int idTable;
+
+ private final int offsetTable;
+
+ private final int count;
+
+ ChunkIndex(byte[] indexBuf, int ptr, int len, ChunkKey key)
+ throws DhtException {
+ final int ctl = indexBuf[ptr + 1];
+ final int fanoutFormat = (ctl >>> 3) & 7;
+ final int offsetFormat = ctl & 7;
+
+ switch (fanoutFormat) {
+ case 0:
+ fanout = null; // no fanout, too small
+ break;
+
+ case 1: {
+ int last = 0;
+ fanout = new int[256];
+ for (int i = 0; i < 256; i++) {
+ last += indexBuf[ptr + 2 + i] & 0xff;
+ fanout[i] = last;
+ }
+ break;
+ }
+ case 2: {
+ int last = 0;
+ fanout = new int[256];
+ for (int i = 0; i < 256; i++) {
+ last += NB.decodeUInt16(indexBuf, ptr + 2 + i * 2);
+ fanout[i] = last;
+ }
+ break;
+ }
+ case 3: {
+ int last = 0;
+ fanout = new int[256];
+ for (int i = 0; i < 256; i++) {
+ last += decodeUInt24(indexBuf, ptr + 2 + i * 3);
+ fanout[i] = last;
+ }
+ break;
+ }
+ case 4: {
+ int last = 0;
+ fanout = new int[256];
+ for (int i = 0; i < 256; i++) {
+ last += NB.decodeInt32(indexBuf, ptr + 2 + i * 4);
+ fanout[i] = last;
+ }
+ break;
+ }
+ default:
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().unsupportedChunkIndex,
+ Integer.toHexString(NB.decodeUInt16(indexBuf, ptr)), key));
+ }
+
+ this.indexBuf = indexBuf;
+ this.indexPtr = ptr;
+ this.indexLen = len;
+ this.idTable = indexPtr + 2 + 256 * fanoutFormat;
+
+ int recsz = OBJECT_ID_LENGTH + offsetFormat;
+ this.count = (indexLen - (idTable - indexPtr)) / recsz;
+ this.offsetTable = idTable + count * OBJECT_ID_LENGTH;
+ }
+
+ /**
+ * Get the total number of objects described by this index.
+ *
+ * @return number of objects in this index and its associated chunk.
+ */
+ public final int getObjectCount() {
+ return count;
+ }
+
+ /**
+ * Get an ObjectId from this index.
+ *
+ * @param nth
+ * the object to return. Must be in range [0, getObjectCount).
+ * @return the object id.
+ */
+ public final ObjectId getObjectId(int nth) {
+ return ObjectId.fromRaw(indexBuf, idPosition(nth));
+ }
+
+ /**
+ * Get the offset of an object in the chunk.
+ *
+ * @param nth
+ * offset to return. Must be in range [0, getObjectCount).
+ * @return the offset.
+ */
+ public final int getOffset(int nth) {
+ return getOffset(indexBuf, offsetTable, nth);
+ }
+
+ /** @return the size of this index, in bytes. */
+ int getIndexSize() {
+ int sz = indexBuf.length;
+ if (fanout != null)
+ sz += 12 + 256 * 4;
+ return sz;
+ }
+
+ /**
+ * Search for an object in the index.
+ *
+ * @param objId
+ * the object to locate.
+ * @return offset of the object in the corresponding chunk; -1 if not found.
+ */
+ final int findOffset(AnyObjectId objId) {
+ int hi, lo;
+
+ if (fanout != null) {
+ int fb = objId.getFirstByte();
+ lo = fb == 0 ? 0 : fanout[fb - 1];
+ hi = fanout[fb];
+ } else {
+ lo = 0;
+ hi = count;
+ }
+
+ while (lo < hi) {
+ final int mid = (lo + hi) >>> 1;
+ final int cmp = objId.compareTo(indexBuf, idPosition(mid));
+ if (cmp < 0)
+ hi = mid;
+ else if (cmp == 0)
+ return getOffset(mid);
+ else
+ lo = mid + 1;
+ }
+ return -1;
+ }
+
+ abstract int getOffset(byte[] indexArray, int offsetTableStart, int nth);
+
+ private int idPosition(int nth) {
+ return idTable + (nth * OBJECT_ID_LENGTH);
+ }
+
+ private static class Offset1 extends ChunkIndex {
+ Offset1(byte[] index, int ptr, int len, ChunkKey key)
+ throws DhtException {
+ super(index, ptr, len, key);
+ }
+
+ int getOffset(byte[] index, int offsetTable, int nth) {
+ return index[offsetTable + nth] & 0xff;
+ }
+ }
+
+ private static class Offset2 extends ChunkIndex {
+ Offset2(byte[] index, int ptr, int len, ChunkKey key)
+ throws DhtException {
+ super(index, ptr, len, key);
+ }
+
+ int getOffset(byte[] index, int offsetTable, int nth) {
+ return NB.decodeUInt16(index, offsetTable + (nth * 2));
+ }
+ }
+
+ private static class Offset3 extends ChunkIndex {
+ Offset3(byte[] index, int ptr, int len, ChunkKey key)
+ throws DhtException {
+ super(index, ptr, len, key);
+ }
+
+ int getOffset(byte[] index, int offsetTable, int nth) {
+ return decodeUInt24(index, offsetTable + (nth * 3));
+ }
+ }
+
+ private static class Offset4 extends ChunkIndex {
+ Offset4(byte[] index, int ptr, int len, ChunkKey key)
+ throws DhtException {
+ super(index, ptr, len, key);
+ }
+
+ int getOffset(byte[] index, int offsetTable, int nth) {
+ return NB.decodeInt32(index, offsetTable + (nth * 4));
+ }
+ }
+
+ private static void encodeUInt24(byte[] intbuf, int offset, int v) {
+ intbuf[offset + 2] = (byte) v;
+ v >>>= 8;
+
+ intbuf[offset + 1] = (byte) v;
+ v >>>= 8;
+
+ intbuf[offset] = (byte) v;
+ }
+
+ private static int decodeUInt24(byte[] intbuf, int offset) {
+ int r = (intbuf[offset] & 0xff) << 8;
+
+ r |= intbuf[offset + 1] & 0xff;
+ r <<= 8;
+
+ r |= intbuf[offset + 2] & 0xff;
+ return r;
+ }
+} \ No newline at end of file
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java
new file mode 100644
index 0000000000..5282a1d4ee
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.lib.Constants;
+
+/**
+ * Summary information about a chunk owned by a repository.
+ */
+public class ChunkInfo {
+ /** Source the chunk (what code path created it). */
+ public static enum Source implements TinyProtobuf.Enum {
+ /** Came in over the network from an external source */
+ RECEIVE(1),
+ /** Created in this repository (e.g. a merge). */
+ INSERT(2),
+ /** Generated during a repack of this repository. */
+ REPACK(3);
+
+ private final int value;
+
+ Source(int val) {
+ this.value = val;
+ }
+
+ public int value() {
+ return value;
+ }
+ }
+
+ /** Mixed objects are stored in the chunk (instead of single type). */
+ public static final int OBJ_MIXED = 0;
+
+ /**
+ * Parse info from the storage system.
+ *
+ * @param chunkKey
+ * the chunk the link points to.
+ * @param raw
+ * the raw encoding of the info.
+ * @return the info object.
+ */
+ public static ChunkInfo fromBytes(ChunkKey chunkKey, byte[] raw) {
+ ChunkInfo info = new ChunkInfo();
+ info.chunkKey = chunkKey;
+
+ TinyProtobuf.Decoder d = TinyProtobuf.decode(raw);
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ info.source = d.intEnum(Source.values());
+ continue;
+ case 2:
+ info.objectType = d.int32();
+ continue;
+ case 3:
+ info.fragment = d.bool();
+ continue;
+ case 4:
+ info.cachedPack = CachedPackKey.fromBytes(d);
+ continue;
+
+ case 5: {
+ TinyProtobuf.Decoder m = d.message();
+ for (;;) {
+ switch (m.next()) {
+ case 0:
+ continue PARSE;
+ case 1:
+ info.objectsTotal = m.int32();
+ continue;
+ case 2:
+ info.objectsWhole = m.int32();
+ continue;
+ case 3:
+ info.objectsOfsDelta = m.int32();
+ continue;
+ case 4:
+ info.objectsRefDelta = m.int32();
+ continue;
+ default:
+ m.skip();
+ continue;
+ }
+ }
+ }
+ case 6:
+ info.chunkSize = d.int32();
+ continue;
+ case 7:
+ info.indexSize = d.int32();
+ continue;
+ case 8:
+ info.metaSize = d.int32();
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+ return info;
+ }
+
+ private static byte[] asBytes(ChunkInfo info) {
+ TinyProtobuf.Encoder objects = TinyProtobuf.encode(48);
+ objects.int32IfNotZero(1, info.objectsTotal);
+ objects.int32IfNotZero(2, info.objectsWhole);
+ objects.int32IfNotZero(3, info.objectsOfsDelta);
+ objects.int32IfNotZero(4, info.objectsRefDelta);
+
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(128);
+ e.intEnum(1, info.source);
+ e.int32IfNotNegative(2, info.objectType);
+ e.boolIfTrue(3, info.fragment);
+ e.string(4, info.cachedPack);
+ e.message(5, objects);
+ e.int32IfNotZero(6, info.chunkSize);
+ e.int32IfNotZero(7, info.indexSize);
+ e.int32IfNotZero(8, info.metaSize);
+ return e.asByteArray();
+ }
+
+ ChunkKey chunkKey;
+
+ Source source;
+
+ int objectType = -1;
+
+ boolean fragment;
+
+ CachedPackKey cachedPack;
+
+ int objectsTotal;
+
+ int objectsWhole;
+
+ int objectsOfsDelta;
+
+ int objectsRefDelta;
+
+ int chunkSize;
+
+ int indexSize;
+
+ int metaSize;
+
+ /** @return the repository that contains the chunk. */
+ public RepositoryKey getRepositoryKey() {
+ return chunkKey.getRepositoryKey();
+ }
+
+ /** @return the chunk this information describes. */
+ public ChunkKey getChunkKey() {
+ return chunkKey;
+ }
+
+ /** @return source of this chunk. */
+ public Source getSource() {
+ return source;
+ }
+
+ /** @return type of object in the chunk, or {@link #OBJ_MIXED}. */
+ public int getObjectType() {
+ return objectType;
+ }
+
+ /** @return true if this chunk is part of a large fragmented object. */
+ public boolean isFragment() {
+ return fragment;
+ }
+
+ /** @return cached pack this is a member of, or null. */
+ public CachedPackKey getCachedPack() {
+ return cachedPack;
+ }
+
+ /** @return size of the chunk's compressed data, in bytes. */
+ public int getChunkSizeInBytes() {
+ return chunkSize;
+ }
+
+ /** @return size of the chunk's index data, in bytes. */
+ public int getIndexSizeInBytes() {
+ return indexSize;
+ }
+
+ /** @return size of the chunk's meta data, in bytes. */
+ public int getMetaSizeInBytes() {
+ return metaSize;
+ }
+
+ /** @return number of objects stored in the chunk. */
+ public int getObjectsTotal() {
+ return objectsTotal;
+ }
+
+ /** @return number of whole objects stored in the chunk. */
+ public int getObjectsWhole() {
+ return objectsWhole;
+ }
+
+ /** @return number of OFS_DELTA objects stored in the chunk. */
+ public int getObjectsOffsetDelta() {
+ return objectsOfsDelta;
+ }
+
+ /** @return number of REF_DELTA objects stored in the chunk. */
+ public int getObjectsReferenceDelta() {
+ return objectsRefDelta;
+ }
+
+ /**
+ * Convert this link into a byte array for storage.
+ *
+ * @return the link data, encoded as a byte array. This does not include the
+ * ChunkKey, callers must store that separately.
+ */
+ public byte[] asBytes() {
+ return asBytes(this);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append("ChunkInfo:");
+ b.append(chunkKey);
+ b.append(" [");
+ if (getSource() != null)
+ b.append(" ").append(getSource());
+ if (isFragment())
+ b.append(" fragment");
+ if (getObjectType() != 0)
+ b.append(" ").append(Constants.typeString(getObjectType()));
+ if (0 < getObjectsTotal())
+ b.append(" objects=").append(getObjectsTotal());
+ if (0 < getChunkSizeInBytes())
+ b.append(" chunk=").append(getChunkSizeInBytes()).append("B");
+ if (0 < getIndexSizeInBytes())
+ b.append(" index=").append(getIndexSizeInBytes()).append("B");
+ b.append(" ]");
+ return b.toString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java
new file mode 100644
index 0000000000..e136df268a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.storage.dht.KeyUtils.format32;
+import static org.eclipse.jgit.storage.dht.KeyUtils.parse32;
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+import java.text.MessageFormat;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+
+/** Unique identifier of a {@link PackChunk} in the DHT. */
+public final class ChunkKey implements RowKey {
+ static final int KEYLEN = 52;
+
+ /**
+ * @param repo
+ * @param chunk
+ * @return the key
+ */
+ public static ChunkKey create(RepositoryKey repo, ObjectId chunk) {
+ return new ChunkKey(repo.asInt(), chunk);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static ChunkKey fromBytes(byte[] key) {
+ return fromBytes(key, 0, key.length);
+ }
+
+ /**
+ * @param d
+ * decoder to read key from current field from.
+ * @return the key
+ */
+ public static ChunkKey fromBytes(TinyProtobuf.Decoder d) {
+ int len = d.bytesLength();
+ int ptr = d.bytesOffset();
+ byte[] buf = d.bytesArray();
+ return fromBytes(buf, ptr, len);
+ }
+
+ /**
+ * @param key
+ * @param ptr
+ * @param len
+ * @return the key
+ */
+ public static ChunkKey fromBytes(byte[] key, int ptr, int len) {
+ if (len != KEYLEN)
+ throw new IllegalArgumentException(MessageFormat.format(
+ DhtText.get().invalidChunkKey, decode(key, ptr, ptr + len)));
+
+ int repo = parse32(key, ptr + 3);
+ ObjectId chunk = ObjectId.fromString(key, ptr + 12);
+ return new ChunkKey(repo, chunk);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static ChunkKey fromString(String key) {
+ return fromBytes(Constants.encodeASCII(key));
+ }
+
+ private final int repo;
+
+ private final ObjectId chunk;
+
+ ChunkKey(int repo, ObjectId chunk) {
+ this.repo = repo;
+ this.chunk = chunk;
+ }
+
+ /** @return the repository that contains the chunk. */
+ public RepositoryKey getRepositoryKey() {
+ return RepositoryKey.fromInt(repo);
+ }
+
+ int getRepositoryId() {
+ return repo;
+ }
+
+ /** @return unique SHA-1 describing the chunk. */
+ public ObjectId getChunkHash() {
+ return chunk;
+ }
+
+ public byte[] asBytes() {
+ byte[] r = new byte[KEYLEN];
+ chunk.copyTo(r, 12);
+ format32(r, 3, repo);
+ // bucket is the leading 2 digits of the SHA-1.
+ r[11] = '.';
+ r[2] = '.';
+ r[1] = r[12 + 1];
+ r[0] = r[12 + 0];
+ return r;
+ }
+
+ public String asString() {
+ return decode(asBytes());
+ }
+
+ @Override
+ public int hashCode() {
+ return chunk.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof ChunkKey) {
+ ChunkKey thisChunk = this;
+ ChunkKey otherChunk = (ChunkKey) other;
+ return thisChunk.repo == otherChunk.repo
+ && thisChunk.chunk.equals(otherChunk.chunk);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "chunk:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMeta.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMeta.java
new file mode 100644
index 0000000000..a02382b5c0
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMeta.java
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/** Metadata stored inline with each PackChunk. */
+public class ChunkMeta {
+ /**
+ * Convert from byte array.
+ *
+ * @param key
+ * the chunk key this meta object sits in.
+ * @param raw
+ * the raw byte array.
+ * @return the chunk meta.
+ */
+ public static ChunkMeta fromBytes(ChunkKey key, byte[] raw) {
+ return fromBytes(key, TinyProtobuf.decode(raw));
+ }
+
+ /**
+ * Convert from byte array.
+ *
+ * @param key
+ * the chunk key this meta object sits in.
+ * @param d
+ * the message decoder.
+ * @return the chunk meta.
+ */
+ public static ChunkMeta fromBytes(ChunkKey key, TinyProtobuf.Decoder d) {
+ List<BaseChunk> baseChunk = null;
+ List<ChunkKey> fragment = null;
+ PrefetchHint commit = null;
+ PrefetchHint tree = null;
+
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ if (baseChunk == null)
+ baseChunk = new ArrayList<BaseChunk>(4);
+ baseChunk.add(BaseChunk.fromBytes(d.message()));
+ continue;
+ case 2:
+ if (fragment == null)
+ fragment = new ArrayList<ChunkKey>(4);
+ fragment.add(ChunkKey.fromBytes(d));
+ continue;
+ case 51:
+ commit = PrefetchHint.fromBytes(d.message());
+ continue;
+ case 52:
+ tree = PrefetchHint.fromBytes(d.message());
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+
+ return new ChunkMeta(key, baseChunk, fragment, commit, tree);
+ }
+
+ private final ChunkKey chunkKey;
+
+ List<BaseChunk> baseChunks;
+
+ List<ChunkKey> fragments;
+
+ PrefetchHint commitPrefetch;
+
+ PrefetchHint treePrefetch;
+
+ ChunkMeta(ChunkKey key) {
+ this(key, null, null, null, null);
+ }
+
+ ChunkMeta(ChunkKey chunkKey, List<BaseChunk> baseChunk,
+ List<ChunkKey> fragment, PrefetchHint commit, PrefetchHint tree) {
+ this.chunkKey = chunkKey;
+ this.baseChunks = baseChunk;
+ this.fragments = fragment;
+ this.commitPrefetch = commit;
+ this.treePrefetch = tree;
+ }
+
+ /** @return key of the chunk this meta information is for. */
+ public ChunkKey getChunkKey() {
+ return chunkKey;
+ }
+
+ BaseChunk getBaseChunk(long position) throws DhtException {
+ // Chunks are sorted by ascending relative_start order.
+ // Thus for a pack sequence of: A B C, we have:
+ //
+ // -- C relative_start = 10,000
+ // -- B relative_start = 20,000
+ // -- A relative_start = 30,000
+ //
+ // Indicating that chunk C starts 10,000 bytes before us,
+ // chunk B starts 20,000 bytes before us (and 10,000 before C),
+ // chunk A starts 30,000 bytes before us (and 10,000 before B),
+ //
+ // If position falls within:
+ //
+ // -- C (10k), then position is between 0..10,000
+ // -- B (20k), then position is between 10,000 .. 20,000
+ // -- A (30k), then position is between 20,000 .. 30,000
+
+ int high = baseChunks.size();
+ int low = 0;
+ while (low < high) {
+ final int mid = (low + high) >>> 1;
+ final BaseChunk base = baseChunks.get(mid);
+
+ if (position > base.relativeStart) {
+ low = mid + 1;
+
+ } else if (mid == 0 || position == base.relativeStart) {
+ return base;
+
+ } else if (baseChunks.get(mid - 1).relativeStart < position) {
+ return base;
+
+ } else {
+ high = mid;
+ }
+ }
+
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().missingLongOffsetBase, chunkKey,
+ Long.valueOf(position)));
+ }
+
+ /** @return number of fragment chunks that make up the object. */
+ public int getFragmentCount() {
+ return fragments != null ? fragments.size() : 0;
+ }
+
+ /**
+ * Get the nth fragment key.
+ *
+ * @param nth
+ * @return the key.
+ */
+ public ChunkKey getFragmentKey(int nth) {
+ return fragments.get(nth);
+ }
+
+ /**
+ * Find the key of the fragment that occurs after this chunk.
+ *
+ * @param currentKey
+ * the current chunk key.
+ * @return next chunk after this; null if there isn't one.
+ */
+ public ChunkKey getNextFragment(ChunkKey currentKey) {
+ for (int i = 0; i < fragments.size() - 1; i++) {
+ if (fragments.get(i).equals(currentKey))
+ return fragments.get(i + 1);
+ }
+ return null;
+ }
+
+ /** @return chunks to visit. */
+ public PrefetchHint getCommitPrefetch() {
+ return commitPrefetch;
+ }
+
+ /** @return chunks to visit. */
+ public PrefetchHint getTreePrefetch() {
+ return treePrefetch;
+ }
+
+ /** @return true if there is no data in this object worth storing. */
+ boolean isEmpty() {
+ if (baseChunks != null && !baseChunks.isEmpty())
+ return false;
+ if (fragments != null && !fragments.isEmpty())
+ return false;
+ if (commitPrefetch != null && !commitPrefetch.isEmpty())
+ return false;
+ if (treePrefetch != null && !treePrefetch.isEmpty())
+ return false;
+ return true;
+ }
+
+ /** @return format as byte array for storage. */
+ public byte[] asBytes() {
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(256);
+
+ if (baseChunks != null) {
+ for (BaseChunk base : baseChunks)
+ e.message(1, base.asBytes());
+ }
+
+ if (fragments != null) {
+ for (ChunkKey key : fragments)
+ e.bytes(2, key.asBytes());
+ }
+
+ if (commitPrefetch != null)
+ e.message(51, commitPrefetch.asBytes());
+ if (treePrefetch != null)
+ e.message(52, treePrefetch.asBytes());
+
+ return e.asByteArray();
+ }
+
+ /** Describes other chunks that contain the bases for this chunk's deltas. */
+ public static class BaseChunk {
+ final long relativeStart;
+
+ private final ChunkKey chunk;
+
+ BaseChunk(long relativeStart, ChunkKey chunk) {
+ this.relativeStart = relativeStart;
+ this.chunk = chunk;
+ }
+
+ /** @return bytes backward from current chunk to start of base chunk. */
+ public long getRelativeStart() {
+ return relativeStart;
+ }
+
+ /** @return unique key of this chunk. */
+ public ChunkKey getChunkKey() {
+ return chunk;
+ }
+
+ TinyProtobuf.Encoder asBytes() {
+ int max = 11 + 2 + ChunkKey.KEYLEN;
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(max);
+ e.int64(1, relativeStart);
+ e.bytes(2, chunk.asBytes());
+ return e;
+ }
+
+ static BaseChunk fromBytes(TinyProtobuf.Decoder d) {
+ long relativeStart = -1;
+ ChunkKey chunk = null;
+
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ relativeStart = d.int64();
+ continue;
+ case 2:
+ chunk = ChunkKey.fromBytes(d);
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+
+ return new BaseChunk(relativeStart, chunk);
+ }
+ }
+
+ /** Describes the prefetching for a particular object type. */
+ public static class PrefetchHint {
+ private final List<ChunkKey> edge;
+
+ private final List<ChunkKey> sequential;
+
+ PrefetchHint(List<ChunkKey> edge, List<ChunkKey> sequential) {
+ if (edge == null)
+ edge = Collections.emptyList();
+ else
+ edge = Collections.unmodifiableList(edge);
+
+ if (sequential == null)
+ sequential = Collections.emptyList();
+ else
+ sequential = Collections.unmodifiableList(sequential);
+
+ this.edge = edge;
+ this.sequential = sequential;
+ }
+
+ /** @return chunks on the edge of this chunk. */
+ public List<ChunkKey> getEdge() {
+ return edge;
+ }
+
+ /** @return chunks according to sequential ordering. */
+ public List<ChunkKey> getSequential() {
+ return sequential;
+ }
+
+ boolean isEmpty() {
+ return edge.isEmpty() && sequential.isEmpty();
+ }
+
+ TinyProtobuf.Encoder asBytes() {
+ int max = 0;
+
+ max += (2 + ChunkKey.KEYLEN) * edge.size();
+ max += (2 + ChunkKey.KEYLEN) * sequential.size();
+
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(max);
+ for (ChunkKey key : edge)
+ e.bytes(1, key.asBytes());
+ for (ChunkKey key : sequential)
+ e.bytes(2, key.asBytes());
+ return e;
+ }
+
+ static PrefetchHint fromBytes(TinyProtobuf.Decoder d) {
+ ArrayList<ChunkKey> edge = null;
+ ArrayList<ChunkKey> sequential = null;
+
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ if (edge == null)
+ edge = new ArrayList<ChunkKey>(16);
+ edge.add(ChunkKey.fromBytes(d));
+ continue;
+ case 2:
+ if (sequential == null)
+ sequential = new ArrayList<ChunkKey>(16);
+ sequential.add(ChunkKey.fromBytes(d));
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+
+ if (edge != null)
+ edge.trimToSize();
+
+ if (sequential != null)
+ sequential.trimToSize();
+
+ return new PrefetchHint(edge, sequential);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java
new file mode 100644
index 0000000000..0bc1652f6f
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.lang.ref.SoftReference;
+
+/**
+ * Caches recently used objects for {@link DhtReader}.
+ * <p>
+ * This cache is not thread-safe. Each reader should have its own cache.
+ */
+final class DeltaBaseCache {
+ private final DhtReader.Statistics stats;
+
+ private int maxByteCount;
+
+ private final Slot[] table;
+
+ private Slot lruHead;
+
+ private Slot lruTail;
+
+ private int curByteCount;
+
+ DeltaBaseCache(DhtReader reader) {
+ stats = reader.getStatistics();
+
+ DhtReaderOptions options = reader.getOptions();
+ maxByteCount = options.getDeltaBaseCacheLimit();
+ table = new Slot[options.getDeltaBaseCacheSize()];
+ }
+
+ Entry get(ChunkKey key, int position) {
+ Slot e = table[hash(key, position)];
+ for (; e != null; e = e.tableNext) {
+ if (e.offset == position && key.equals(e.chunkKey)) {
+ Entry buf = e.data.get();
+ if (buf != null) {
+ moveToHead(e);
+ stats.deltaBaseCache_Hits++;
+ return buf;
+ }
+ }
+ }
+ stats.deltaBaseCache_Miss++;
+ return null;
+ }
+
+ void put(ChunkKey key, int offset, int objectType, byte[] data) {
+ if (data.length > maxByteCount)
+ return; // Too large to cache.
+
+ curByteCount += data.length;
+ releaseMemory();
+
+ int tableIdx = hash(key, offset);
+ Slot e = new Slot(key, offset, data.length);
+ e.data = new SoftReference<Entry>(new Entry(data, objectType));
+ e.tableNext = table[tableIdx];
+ table[tableIdx] = e;
+ moveToHead(e);
+ }
+
+ private void releaseMemory() {
+ while (curByteCount > maxByteCount && lruTail != null) {
+ Slot currOldest = lruTail;
+ Slot nextOldest = currOldest.lruPrev;
+
+ curByteCount -= currOldest.size;
+ unlink(currOldest);
+ removeFromTable(currOldest);
+
+ if (nextOldest == null)
+ lruHead = null;
+ else
+ nextOldest.lruNext = null;
+ lruTail = nextOldest;
+ }
+ }
+
+ private void removeFromTable(Slot e) {
+ int tableIdx = hash(e.chunkKey, e.offset);
+ Slot p = table[tableIdx];
+
+ if (p == e) {
+ table[tableIdx] = e.tableNext;
+ return;
+ }
+
+ for (; p != null; p = p.tableNext) {
+ if (p.tableNext == e) {
+ p.tableNext = e.tableNext;
+ return;
+ }
+ }
+ }
+
+ private void moveToHead(final Slot e) {
+ unlink(e);
+ e.lruPrev = null;
+ e.lruNext = lruHead;
+ if (lruHead != null)
+ lruHead.lruPrev = e;
+ else
+ lruTail = e;
+ lruHead = e;
+ }
+
+ private void unlink(final Slot e) {
+ Slot prev = e.lruPrev;
+ Slot next = e.lruNext;
+
+ if (prev != null)
+ prev.lruNext = next;
+ if (next != null)
+ next.lruPrev = prev;
+ }
+
+ private int hash(ChunkKey key, int position) {
+ return (((key.hashCode() & 0xfffff000) + position) >>> 1) % table.length;
+ }
+
+ static class Entry {
+ final byte[] data;
+
+ final int type;
+
+ Entry(final byte[] aData, final int aType) {
+ data = aData;
+ type = aType;
+ }
+ }
+
+ private static class Slot {
+ final ChunkKey chunkKey;
+
+ final int offset;
+
+ final int size;
+
+ Slot tableNext;
+
+ Slot lruPrev;
+
+ Slot lruNext;
+
+ SoftReference<Entry> data;
+
+ Slot(ChunkKey key, int offset, int size) {
+ this.chunkKey = key;
+ this.offset = offset;
+ this.size = size;
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java
new file mode 100644
index 0000000000..2ed22b7672
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.storage.pack.CachedPack;
+import org.eclipse.jgit.storage.pack.ObjectToPack;
+import org.eclipse.jgit.storage.pack.PackOutputStream;
+import org.eclipse.jgit.storage.pack.StoredObjectRepresentation;
+
+/** A cached pack stored by the DHT. */
+public class DhtCachedPack extends CachedPack {
+ private final CachedPackInfo info;
+
+ private Set<ChunkKey> chunkKeySet;
+
+ DhtCachedPack(CachedPackInfo info) {
+ this.info = info;
+ }
+
+ @Override
+ public Set<ObjectId> getTips() {
+ return Collections.unmodifiableSet(info.tips);
+ }
+
+ @Override
+ public long getObjectCount() {
+ return info.getObjectsTotal();
+ }
+
+ @Override
+ public long getDeltaCount() throws IOException {
+ return info.getObjectsDelta();
+ }
+
+ /** @return information describing this cached pack. */
+ public CachedPackInfo getCachedPackInfo() {
+ return info;
+ }
+
+ @Override
+ public boolean hasObject(ObjectToPack obj, StoredObjectRepresentation rep) {
+ DhtObjectRepresentation objrep = (DhtObjectRepresentation) rep;
+ if (chunkKeySet == null)
+ chunkKeySet = new HashSet<ChunkKey>(info.chunks);
+ return chunkKeySet.contains(objrep.getChunkKey());
+ }
+
+ void copyAsIs(PackOutputStream out, boolean validate, DhtReader ctx)
+ throws IOException {
+ Prefetcher p = new Prefetcher(ctx, 0);
+ p.setCacheLoadedChunks(false);
+ p.push(info.chunks);
+ copyPack(out, ctx, p, validate);
+ }
+
+ private void copyPack(PackOutputStream out, DhtReader ctx,
+ Prefetcher prefetcher, boolean validate) throws DhtException,
+ DhtMissingChunkException, IOException {
+ Map<ChunkKey, Long> startsAt = new HashMap<ChunkKey, Long>();
+ for (ChunkKey key : info.chunks) {
+ PackChunk chunk = prefetcher.get(key);
+
+ // The prefetcher should always produce the chunk for us, if not
+ // there is something seriously wrong with the ordering or
+ // within the prefetcher code and aborting is more sane than
+ // using slow synchronous lookups.
+ //
+ if (chunk == null)
+ throw new DhtMissingChunkException(key);
+
+ // Verify each long OFS_DELTA chunk appears at the right offset.
+ // This is a cheap validation that the cached pack hasn't been
+ // incorrectly created and would confuse the client.
+ //
+ long position = out.length();
+ if (chunk.getMeta() != null && chunk.getMeta().baseChunks != null) {
+ for (ChunkMeta.BaseChunk base : chunk.getMeta().baseChunks) {
+ Long act = startsAt.get(base.getChunkKey());
+ long exp = position - base.getRelativeStart();
+
+ if (act == null) {
+ throw new DhtException(MessageFormat.format(DhtText
+ .get().wrongChunkPositionInCachedPack, info
+ .getRowKey(), base.getChunkKey(),
+ "[not written]", key, exp));
+ }
+
+ if (act.longValue() != exp) {
+ throw new DhtException(MessageFormat.format(DhtText
+ .get().wrongChunkPositionInCachedPack, info
+ .getRowKey(), base.getChunkKey(), //
+ act, key, exp));
+ }
+ }
+ }
+
+ startsAt.put(key, Long.valueOf(position));
+ chunk.copyEntireChunkAsIs(out, null, validate);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java
new file mode 100644
index 0000000000..24963c7962
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.errors.ConfigInvalidException;
+import org.eclipse.jgit.lib.StoredConfig;
+
+final class DhtConfig extends StoredConfig {
+ @Override
+ public void load() throws IOException, ConfigInvalidException {
+ clear();
+ }
+
+ @Override
+ public void save() throws IOException {
+ // TODO actually store this configuration.
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java
new file mode 100644
index 0000000000..7fdd662e06
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+/** Any error caused by a {@link Database} operation. */
+public class DhtException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * @param message
+ */
+ public DhtException(String message) {
+ super(message);
+ }
+
+ /**
+ * @param cause
+ */
+ public DhtException(Throwable cause) {
+ super(cause.getMessage());
+ initCause(cause);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ */
+ public DhtException(String message, Throwable cause) {
+ super(message);
+ initCause(cause);
+ }
+
+ /** TODO: Remove this type and all of its locations. */
+ public static class TODO extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * @param what
+ */
+ public TODO(String what) {
+ super(what);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java
new file mode 100644
index 0000000000..997f4b4d21
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.zip.Deflater;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.transport.PackParser;
+import org.eclipse.jgit.transport.PackedObjectInfo;
+import org.eclipse.jgit.util.IO;
+
+class DhtInserter extends ObjectInserter {
+ private final DhtObjDatabase objdb;
+
+ private final RepositoryKey repo;
+
+ private final Database db;
+
+ private final DhtInserterOptions options;
+
+ private Deflater deflater;
+
+ private WriteBuffer dbWriteBuffer;
+
+ private ChunkFormatter activeChunk;
+
+ DhtInserter(DhtObjDatabase objdb) {
+ this.objdb = objdb;
+ this.repo = objdb.getRepository().getRepositoryKey();
+ this.db = objdb.getDatabase();
+ this.options = objdb.getInserterOptions();
+ }
+
+ @Override
+ public ObjectId insert(int type, long len, InputStream in)
+ throws IOException {
+ if (Integer.MAX_VALUE < len || mustFragmentSize() < len)
+ return insertStream(type, len, in);
+
+ byte[] tmp;
+ try {
+ tmp = new byte[(int) len];
+ } catch (OutOfMemoryError tooLarge) {
+ return insertStream(type, len, in);
+ }
+ IO.readFully(in, tmp, 0, tmp.length);
+ return insert(type, tmp, 0, tmp.length);
+ }
+
+ private ObjectId insertStream(final int type, final long inflatedSize,
+ final InputStream in) throws IOException {
+
+ // TODO Permit multiple chunks to be buffered here at once.
+ // It might be possible to compress and hold all chunks for
+ // an object, which would then allow them to write their
+ // ChunkInfo and chunks in parallel, as well as avoid the
+ // rewrite with the ChunkFragments at the end.
+
+ MessageDigest chunkDigest = Constants.newMessageDigest();
+ LinkedList<ChunkKey> fragmentList = new LinkedList<ChunkKey>();
+
+ ChunkFormatter chunk = newChunk();
+ int position = chunk.position();
+ if (!chunk.whole(type, inflatedSize))
+ throw new DhtException(DhtText.get().cannotInsertObject);
+
+ MessageDigest objDigest = digest();
+ objDigest.update(Constants.encodedTypeString(type));
+ objDigest.update((byte) ' ');
+ objDigest.update(Constants.encodeASCII(inflatedSize));
+ objDigest.update((byte) 0);
+
+ Deflater def = deflater();
+ byte[] inBuf = buffer();
+ long packedSize = 0;
+ long done = 0;
+ while (done < inflatedSize) {
+ if (done == 0 || def.needsInput()) {
+ int inAvail = in.read(inBuf);
+ if (inAvail <= 0)
+ throw new EOFException();
+ objDigest.update(inBuf, 0, inAvail);
+ def.setInput(inBuf, 0, inAvail);
+ done += inAvail;
+ }
+
+ if (chunk.free() == 0) {
+ packedSize += chunk.size();
+ chunk.setObjectType(type);
+ chunk.setFragment();
+ fragmentList.add(chunk.end(chunkDigest));
+ chunk.safePut(db, dbBuffer());
+ chunk = newChunk();
+ }
+ chunk.appendDeflateOutput(def);
+ }
+
+ def.finish();
+
+ while (!def.finished()) {
+ if (chunk.free() == 0) {
+ packedSize += chunk.size();
+ chunk.setObjectType(type);
+ chunk.setFragment();
+ fragmentList.add(chunk.end(chunkDigest));
+ chunk.safePut(db, dbBuffer());
+ chunk = newChunk();
+ }
+ chunk.appendDeflateOutput(def);
+ }
+
+ ObjectId objId = ObjectId.fromRaw(objDigest.digest());
+ PackedObjectInfo oe = new PackedObjectInfo(objId);
+ oe.setOffset(position);
+
+ if (!chunk.isEmpty()) {
+ packedSize += chunk.size();
+ chunk.setObjectType(type);
+
+ if (fragmentList.isEmpty()) {
+ ChunkKey key = chunk.end(chunkDigest);
+ chunk.setChunkIndex(Collections.singletonList(oe));
+ chunk.safePut(db, dbBuffer());
+ ObjectInfo info = new ObjectInfo(key, -1, type, position,
+ packedSize, inflatedSize, null, false);
+ ObjectIndexKey objKey = ObjectIndexKey.create(repo, objId);
+ db.objectIndex().add(objKey, info, dbBuffer());
+ return objId;
+ }
+
+ chunk.setFragment();
+ fragmentList.add(chunk.end(chunkDigest));
+ chunk.safePut(db, dbBuffer());
+ }
+ chunk = null;
+
+ ChunkKey firstChunkKey = fragmentList.get(0);
+ for (ChunkKey key : fragmentList) {
+ PackChunk.Members builder = new PackChunk.Members();
+ builder.setChunkKey(key);
+
+ ChunkMeta meta = new ChunkMeta(key);
+ meta.fragments = fragmentList;
+ builder.setMeta(meta);
+
+ if (firstChunkKey.equals(key))
+ builder.setChunkIndex(ChunkIndex.create(Arrays.asList(oe)));
+
+ db.chunk().put(builder, dbBuffer());
+ }
+
+ ObjectInfo info = new ObjectInfo(firstChunkKey, -1, type, position,
+ packedSize, inflatedSize, null, true);
+ ObjectIndexKey objKey = ObjectIndexKey.create(repo, objId);
+ db.objectIndex().add(objKey, info, dbBuffer());
+
+ return objId;
+ }
+
+ @Override
+ public ObjectId insert(int type, byte[] data, int off, int len)
+ throws IOException {
+ // TODO Is it important to avoid duplicate objects here?
+ // IIRC writing out a DirCache just blindly writes all of the
+ // tree objects to the inserter, relying on the inserter to
+ // strip out duplicates. We might need to buffer trees as
+ // long as possible, then collapse the buffer by looking up
+ // any existing objects and avoiding inserting those.
+
+ if (mustFragmentSize() < len)
+ return insertStream(type, len, asStream(data, off, len));
+
+ ObjectId objId = idFor(type, data, off, len);
+
+ if (activeChunk == null)
+ activeChunk = newChunk();
+
+ if (activeChunk.whole(deflater(), type, data, off, len, objId))
+ return objId;
+
+ // TODO Allow more than one chunk pending at a time, this would
+ // permit batching puts of the ChunkInfo records.
+
+ activeChunk.end(digest());
+ activeChunk.safePut(db, dbBuffer());
+ activeChunk = newChunk();
+
+ if (activeChunk.whole(deflater(), type, data, off, len, objId))
+ return objId;
+
+ return insertStream(type, len, asStream(data, off, len));
+ }
+
+ /** @return size that compressing still won't fit into a single chunk. */
+ private int mustFragmentSize() {
+ return 4 * options.getChunkSize();
+ }
+
+ @Override
+ public PackParser newPackParser(InputStream in) throws IOException {
+ return new DhtPackParser(objdb, in);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ if (activeChunk != null && !activeChunk.isEmpty()) {
+ activeChunk.end(digest());
+ activeChunk.safePut(db, dbBuffer());
+ activeChunk = null;
+ }
+
+ if (dbWriteBuffer != null)
+ dbWriteBuffer.flush();
+ }
+
+ @Override
+ public void release() {
+ if (deflater != null) {
+ deflater.end();
+ deflater = null;
+ }
+
+ dbWriteBuffer = null;
+ activeChunk = null;
+ }
+
+ private Deflater deflater() {
+ if (deflater == null)
+ deflater = new Deflater(options.getCompression());
+ else
+ deflater.reset();
+ return deflater;
+ }
+
+ private WriteBuffer dbBuffer() {
+ if (dbWriteBuffer == null)
+ dbWriteBuffer = db.newWriteBuffer();
+ return dbWriteBuffer;
+ }
+
+ private ChunkFormatter newChunk() {
+ ChunkFormatter fmt;
+
+ fmt = new ChunkFormatter(repo, options);
+ fmt.setSource(ChunkInfo.Source.INSERT);
+ return fmt;
+ }
+
+ private static ByteArrayInputStream asStream(byte[] data, int off, int len) {
+ return new ByteArrayInputStream(data, off, len);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java
new file mode 100644
index 0000000000..b1b1b5c5f8
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static java.util.zip.Deflater.DEFAULT_COMPRESSION;
+import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
+
+import java.security.SecureRandom;
+import java.util.zip.Deflater;
+
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.CoreConfig;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+
+/** Options controlling how objects are inserted into a DHT stored repository. */
+public class DhtInserterOptions {
+ private static final SecureRandom prng = new SecureRandom();
+
+ /** 1024 (number of bytes in one kibibyte/kilobyte) */
+ public static final int KiB = 1024;
+
+ /** 1024 {@link #KiB} (number of bytes in one mebibyte/megabyte) */
+ public static final int MiB = 1024 * KiB;
+
+ private int chunkSize;
+
+ private int writeBufferSize;
+
+ private int compression;
+
+ private int prefetchDepth;
+
+ private long parserCacheLimit;
+
+ /** Create a default inserter configuration. */
+ public DhtInserterOptions() {
+ setChunkSize(1 * MiB);
+ setWriteBufferSize(1 * MiB);
+ setCompression(DEFAULT_COMPRESSION);
+ setPrefetchDepth(50);
+ setParserCacheLimit(512 * getChunkSize());
+ }
+
+ /** @return maximum size of a chunk, in bytes. */
+ public int getChunkSize() {
+ return chunkSize;
+ }
+
+ /**
+ * Set the maximum size of a chunk, in bytes.
+ *
+ * @param sizeInBytes
+ * the maximum size. A chunk's data segment won't exceed this.
+ * @return {@code this}
+ */
+ public DhtInserterOptions setChunkSize(int sizeInBytes) {
+ chunkSize = Math.max(1024, sizeInBytes);
+ return this;
+ }
+
+ /** @return maximum number of outstanding write bytes. */
+ public int getWriteBufferSize() {
+ return writeBufferSize;
+ }
+
+ /**
+ * Set the maximum number of outstanding bytes in a {@link WriteBuffer}.
+ *
+ * @param sizeInBytes
+ * maximum number of bytes.
+ * @return {@code this}
+ */
+ public DhtInserterOptions setWriteBufferSize(int sizeInBytes) {
+ writeBufferSize = Math.max(1024, sizeInBytes);
+ return this;
+ }
+
+ /** @return maximum number of objects to put into a chunk. */
+ public int getMaxObjectCount() {
+ // Do not allow the index to be larger than a chunk itself.
+ return getChunkSize() / (OBJECT_ID_LENGTH + 4);
+ }
+
+ /** @return compression level used when writing new objects into chunks. */
+ public int getCompression() {
+ return compression;
+ }
+
+ /**
+ * Set the compression level used when writing new objects.
+ *
+ * @param level
+ * the compression level. Use
+ * {@link Deflater#DEFAULT_COMPRESSION} to specify a default
+ * compression setting.
+ * @return {@code this}
+ */
+ public DhtInserterOptions setCompression(int level) {
+ compression = level;
+ return this;
+ }
+
+ /**
+ * Maximum number of entries in a chunk's prefetch list.
+ * <p>
+ * Each commit or tree chunk stores an optional prefetch list containing the
+ * next X chunk keys that a reader would need if they were traversing the
+ * project history. This implies that chunk prefetch lists are overlapping.
+ * <p>
+ * The depth at insertion time needs to be deep enough to allow readers to
+ * have sufficient parallel prefetch to keep themselves busy without waiting
+ * on sequential loads. If the depth is not sufficient, readers will stall
+ * while they sequentially look up the next chunk they need.
+ *
+ * @return maximum number of entries in a {@link ChunkMeta} list.
+ */
+ public int getPrefetchDepth() {
+ return prefetchDepth;
+ }
+
+ /**
+ * Maximum number of entries in a chunk's prefetch list.
+ *
+ * @param depth
+ * maximum depth of the prefetch list.
+ * @return {@code this}
+ */
+ public DhtInserterOptions setPrefetchDepth(int depth) {
+ prefetchDepth = Math.max(0, depth);
+ return this;
+ }
+
+ /**
+ * Number of chunks the parser can cache for delta resolution support.
+ *
+ * @return chunks to hold in memory to support delta resolution.
+ */
+ public int getParserCacheSize() {
+ return (int) (getParserCacheLimit() / getChunkSize());
+ }
+
+ /** @return number of bytes the PackParser can cache for delta resolution. */
+ public long getParserCacheLimit() {
+ return parserCacheLimit;
+ }
+
+ /**
+ * Set the number of bytes the PackParser can cache.
+ *
+ * @param limit
+ * number of bytes the parser can cache.
+ * @return {@code this}
+ */
+ public DhtInserterOptions setParserCacheLimit(long limit) {
+ parserCacheLimit = Math.max(0, limit);
+ return this;
+ }
+
+ /** @return next random 32 bits to salt chunk keys. */
+ int nextChunkSalt() {
+ return prng.nextInt();
+ }
+
+ /**
+ * Update properties by setting fields from the configuration.
+ * <p>
+ * If a property is not defined in the configuration, then it is left
+ * unmodified.
+ *
+ * @param rc
+ * configuration to read properties from.
+ * @return {@code this}
+ */
+ public DhtInserterOptions fromConfig(Config rc) {
+ setChunkSize(rc.getInt("core", "dht", "chunkSize", getChunkSize()));
+ setWriteBufferSize(rc.getInt("core", "dht", "writeBufferSize", getWriteBufferSize()));
+ setCompression(rc.get(CoreConfig.KEY).getCompression());
+ setPrefetchDepth(rc.getInt("core", "dht", "packParserPrefetchDepth", getPrefetchDepth()));
+ setParserCacheLimit(rc.getLong("core", "dht", "packParserCacheLimit", getParserCacheLimit()));
+ return this;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java
new file mode 100644
index 0000000000..4fc103be95
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.text.MessageFormat;
+
+/** Indicates a {@link PackChunk} doesn't exist in the database. */
+public class DhtMissingChunkException extends DhtException {
+ private static final long serialVersionUID = 1L;
+
+ private final ChunkKey chunkKey;
+
+ /**
+ * Initialize a new missing chunk exception.
+ *
+ * @param key
+ * the key of the chunk that is not found.
+ */
+ public DhtMissingChunkException(ChunkKey key) {
+ super(MessageFormat.format(DhtText.get().missingChunk, key));
+ chunkKey = key;
+ }
+
+ /**
+ * Initialize a new missing chunk exception.
+ *
+ * @param key
+ * the key of the chunk that is not found.
+ * @param why
+ * reason the chunk is missing. This may be an explanation about
+ * low-level data corruption in the database.
+ */
+ public DhtMissingChunkException(ChunkKey key, Throwable why) {
+ this(key);
+ initCause(why);
+ }
+
+ /** @return key of the chunk that is missing. */
+ public ChunkKey getChunkKey() {
+ return chunkKey;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java
new file mode 100644
index 0000000000..4261676b9e
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.lib.ObjectDatabase;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+/** ObjectDatabase stored on top of the DHT database. */
+public class DhtObjDatabase extends ObjectDatabase {
+ private final DhtRepository repository;
+
+ private final Database db;
+
+ private final DhtReaderOptions readerOptions;
+
+ private final DhtInserterOptions inserterOptions;
+
+ DhtObjDatabase(DhtRepository repository, DhtRepositoryBuilder builder) {
+ this.repository = repository;
+ this.db = builder.getDatabase();
+ this.readerOptions = builder.getReaderOptions();
+ this.inserterOptions = builder.getInserterOptions();
+ }
+
+ DhtRepository getRepository() {
+ return repository;
+ }
+
+ Database getDatabase() {
+ return db;
+ }
+
+ DhtReaderOptions getReaderOptions() {
+ return readerOptions;
+ }
+
+ DhtInserterOptions getInserterOptions() {
+ return inserterOptions;
+ }
+
+ @Override
+ public boolean exists() {
+ return repository.getRepositoryKey() != null;
+ }
+
+ @Override
+ public void close() {
+ // Do nothing.
+ }
+
+ @Override
+ public ObjectReader newReader() {
+ return new DhtReader(this);
+ }
+
+ @Override
+ public ObjectInserter newInserter() {
+ return new DhtInserter(this);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java
new file mode 100644
index 0000000000..a5499254e5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.storage.pack.StoredObjectRepresentation;
+
+final class DhtObjectRepresentation extends StoredObjectRepresentation {
+ private ObjectInfo info;
+
+ void set(ObjectInfo link) {
+ this.info = link;
+ }
+
+ ChunkKey getChunkKey() {
+ return info.getChunkKey();
+ }
+
+ int getOffset() {
+ return info.getOffset();
+ }
+
+ long getPackedSize() {
+ return info.getPackedSize();
+ }
+
+ boolean isFragmented() {
+ return info.isFragmented();
+ }
+
+ @Override
+ public ObjectId getDeltaBase() {
+ return info.getDeltaBase();
+ }
+
+ @Override
+ public int getFormat() {
+ if (info.getDeltaBase() != null)
+ return PACK_DELTA;
+ return PACK_WHOLE;
+ }
+
+ @Override
+ public int getWeight() {
+ long size = info.getPackedSize();
+ return (int) Math.min(size, Integer.MAX_VALUE);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java
new file mode 100644
index 0000000000..98161802fa
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.revwalk.RevObject;
+import org.eclipse.jgit.storage.pack.ObjectToPack;
+import org.eclipse.jgit.storage.pack.StoredObjectRepresentation;
+
+final class DhtObjectToPack extends ObjectToPack {
+ private static final int FRAGMENTED = 1 << 0;
+
+ /** Chunk that contains this object. */
+ ChunkKey chunk;
+
+ /** Offset of this object within its chunk. */
+ int offset;
+
+ /** Number of bytes in the object's compressed form, excluding pack header. */
+ int size;
+
+ /** Order this chunk occurs in the {@link Prefetcher}. */
+ int visitOrder;
+
+ DhtObjectToPack(RevObject obj) {
+ super(obj);
+ }
+
+ boolean isFragmented() {
+ return isExtendedFlag(FRAGMENTED);
+ }
+
+ @Override
+ public void select(StoredObjectRepresentation ref) {
+ DhtObjectRepresentation rep = (DhtObjectRepresentation) ref;
+ chunk = rep.getChunkKey();
+ offset = rep.getOffset();
+
+ final long sz = rep.getPackedSize();
+ if (sz <= Integer.MAX_VALUE)
+ size = (int) sz;
+ else
+ size = -1;
+
+ if (rep.isFragmented())
+ setExtendedFlag(FRAGMENTED);
+ else
+ clearExtendedFlag(FRAGMENTED);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java
new file mode 100644
index 0000000000..86078335d3
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java
@@ -0,0 +1,1380 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
+import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
+import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
+import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
+import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
+import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
+import static org.eclipse.jgit.storage.dht.ChunkInfo.OBJ_MIXED;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.MutableObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectIdSubclassMap;
+import org.eclipse.jgit.lib.ProgressMonitor;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.file.PackLock;
+import org.eclipse.jgit.transport.PackParser;
+import org.eclipse.jgit.transport.PackedObjectInfo;
+import org.eclipse.jgit.treewalk.CanonicalTreeParser;
+import org.eclipse.jgit.util.LongList;
+
+/** Parses the pack stream into chunks, and indexes the chunks for lookup. */
+public class DhtPackParser extends PackParser {
+ private final DhtObjDatabase objdb;
+
+ private final RepositoryKey repo;
+
+ private final Database db;
+
+ private final DhtInserterOptions options;
+
+ private final MessageDigest chunkKeyDigest;
+
+ /** Number of objects to write to the global index at once. */
+ private final int linkBatchSize;
+
+ private Boolean saveAsCachedPack;
+
+ private WriteBuffer dbWriteBuffer;
+
+ /** Chunk writers for the 4 major object types, keyed by object type code. */
+ private ChunkFormatter[] openChunks;
+
+ /** Edges for current chunks. */
+ private Edges[] openEdges;
+
+ /** Prior chunks that were written, keyed by object type code. */
+ private List<ChunkInfo>[] infoByOrder;
+
+ /** Information on chunks already written out. */
+ private Map<ChunkKey, ChunkInfo> infoByKey;
+
+ /** Information on chunks already written out. */
+ private Map<ChunkKey, ChunkMeta> chunkMeta;
+
+ /** ChunkMeta that needs to be written out again, as it was modified. */
+ private Map<ChunkKey, ChunkMeta> dirtyMeta;
+
+ private Map<ChunkKey, Edges> chunkEdges;
+
+ // Correlated lists, sorted by object stream position.
+ private LongList objStreamPos;
+
+ private LongList objChunkPtrs;
+
+ /** Formatter handling the current object's data stream. */
+ private ChunkFormatter currChunk;
+
+ /** Current type of the object, if known. */
+ private int currType;
+
+ /** Position of the current object in the chunks we create. */
+ private long currChunkPtr;
+
+ /** If using OFS_DELTA, location of the base object in chunk space. */
+ private long currBasePtr;
+
+ /** Starting byte of the object data (aka end of the object header). */
+ private int currDataPos;
+
+ /** Total number of bytes in the object representation. */
+ private long currPackedSize;
+
+ /** Total number of bytes in the entire inflated object. */
+ private long currInflatedSize;
+
+ /** If the current object is fragmented, the list of chunks holding it. */
+ private List<ChunkKey> currFragments;
+
+ /** Previously written chunk that is being re-read during delta resolution. */
+ private PackChunk dbChunk;
+
+ /** Current read position in {@link #dbChunk}. */
+ private int dbPtr;
+
+ /** Recent chunks that were written, or recently read. */
+ private LinkedHashMap<ChunkKey, PackChunk> chunkReadBackCache;
+
+ /** Objects parsed from the stream, sorted by SHA-1. */
+ private List<DhtInfo> objectListByName;
+
+ /** Objects parsed from the stream, sorted by chunk (aka offset). */
+ private List<DhtInfo> objectListByChunk;
+
+ /** Iterators to write {@link #objectListByName} into the global index. */
+ private ListIterator<DhtInfo>[] linkIterators;
+
+ /** If the pack stream was self-contained, the cached pack info record key. */
+ private CachedPackKey cachedPackKey;
+
+ private CanonicalTreeParser treeParser;
+
+ private final MutableObjectId idBuffer;
+
+ private ObjectIdSubclassMap<DhtInfo> objectMap;
+
+ DhtPackParser(DhtObjDatabase objdb, InputStream in) {
+ super(objdb, in);
+
+ // Disable collision checking. DhtReader performs some magic to look
+ // only at old objects, so a colliding replacement will be ignored until
+ // its removed during garbage collection.
+ //
+ setCheckObjectCollisions(false);
+
+ this.objdb = objdb;
+ this.repo = objdb.getRepository().getRepositoryKey();
+ this.db = objdb.getDatabase();
+ this.options = objdb.getInserterOptions();
+ this.chunkKeyDigest = Constants.newMessageDigest();
+
+ dbWriteBuffer = db.newWriteBuffer();
+ openChunks = new ChunkFormatter[5];
+ openEdges = new Edges[5];
+ infoByOrder = newListArray(5);
+ infoByKey = new HashMap<ChunkKey, ChunkInfo>();
+ dirtyMeta = new HashMap<ChunkKey, ChunkMeta>();
+ chunkMeta = new HashMap<ChunkKey, ChunkMeta>();
+ chunkEdges = new HashMap<ChunkKey, Edges>();
+ treeParser = new CanonicalTreeParser();
+ idBuffer = new MutableObjectId();
+ objectMap = new ObjectIdSubclassMap<DhtInfo>();
+
+ final int max = options.getParserCacheSize();
+ chunkReadBackCache = new LinkedHashMap<ChunkKey, PackChunk>(max, 0.75f, true) {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ protected boolean removeEldestEntry(Entry<ChunkKey, PackChunk> e) {
+ return max < size();
+ }
+ };
+
+ // The typical WriteBuffer flushes at 512 KiB increments, and
+ // the typical ObjectInfo record is around 180 bytes. Use these
+ // figures to come up with a rough estimate for how many links
+ // to construct in one region of the DHT before moving onto a
+ // different region in order to increase parallelism on large
+ // object imports.
+ //
+ linkBatchSize = 512 * 1024 / 180;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static <T> List<T>[] newListArray(int size) {
+ return new List[size];
+ }
+
+ /** @return if true, the pack stream is marked as a cached pack. */
+ public boolean isSaveAsCachedPack() {
+ return saveAsCachedPack != null && saveAsCachedPack.booleanValue();
+ }
+
+ /**
+ * Enable saving the pack stream as a cached pack.
+ *
+ * @param save
+ * if true, the stream is saved.
+ */
+ public void setSaveAsCachedPack(boolean save) {
+ saveAsCachedPack = Boolean.valueOf(save);
+ }
+
+ @Override
+ public PackLock parse(ProgressMonitor receiving, ProgressMonitor resolving)
+ throws IOException {
+ boolean success = false;
+ try {
+ PackLock lock = super.parse(receiving, resolving);
+
+ chunkReadBackCache = null;
+ openChunks = null;
+ openEdges = null;
+ treeParser = null;
+
+ final int objCnt = getObjectCount();
+ if (objCnt == 0) {
+ // If no objects were received, no chunks were created. Leaving
+ // success to false and doing a rollback is a good way to make
+ // sure this is true.
+ //
+ return lock;
+ }
+
+ createObjectLists();
+
+ if (isSaveAsCachedPack())
+ putCachedPack();
+ computeChunkEdges();
+ putChunkIndexes();
+ putDirtyMeta();
+
+ chunkMeta = null;
+ chunkEdges = null;
+ dirtyMeta = null;
+ objectMap = null;
+ objectListByChunk = null;
+ dbWriteBuffer.flush();
+
+ putGlobalIndex(resolving);
+ dbWriteBuffer.flush();
+
+ success = true;
+ return lock;
+ } finally {
+ openChunks = null;
+ openEdges = null;
+ objStreamPos = null;
+ objChunkPtrs = null;
+ currChunk = null;
+ currFragments = null;
+ dbChunk = null;
+ chunkReadBackCache = null;
+ infoByKey = null;
+ chunkMeta = null;
+ chunkEdges = null;
+ treeParser = null;
+
+ if (!success)
+ rollback();
+
+ infoByOrder = null;
+ objectListByName = null;
+ objectListByChunk = null;
+ linkIterators = null;
+ dbWriteBuffer = null;
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private void createObjectLists() {
+ List objs = getSortedObjectList(null /* by name */);
+ objectListByName = objs;
+
+ int cnt = objectListByName.size();
+ DhtInfo[] copy = objectListByName.toArray(new DhtInfo[cnt]);
+ Arrays.sort(copy, new Comparator<PackedObjectInfo>() {
+ public int compare(PackedObjectInfo o1, PackedObjectInfo o2) {
+ DhtInfo a = (DhtInfo) o1;
+ DhtInfo b = (DhtInfo) o2;
+ return Long.signum(a.chunkPtr - b.chunkPtr);
+ }
+ });
+ objectListByChunk = Arrays.asList(copy);
+ }
+
+ private void putCachedPack() throws DhtException {
+ CachedPackInfo info = new CachedPackInfo();
+
+ for (DhtInfo obj : objectMap) {
+ if (!obj.isInPack())
+ return;
+
+ if (!obj.isReferenced())
+ info.tips.add(obj.copy());
+ }
+
+ MessageDigest version = Constants.newMessageDigest();
+ addChunkList(info, version, infoByOrder[OBJ_TAG]);
+ addChunkList(info, version, infoByOrder[OBJ_COMMIT]);
+ addChunkList(info, version, infoByOrder[OBJ_TREE]);
+ addChunkList(info, version, infoByOrder[OBJ_BLOB]);
+
+ info.name = computePackName();
+ info.version = ObjectId.fromRaw(version.digest());
+
+ cachedPackKey = info.getRowKey();
+ for (List<ChunkInfo> list : infoByOrder) {
+ if (list == null)
+ continue;
+ for (ChunkInfo c : list) {
+ c.cachedPack = cachedPackKey;
+ if (c.isFragment())
+ db.repository().put(repo, info, dbWriteBuffer);
+ }
+ }
+
+ db.repository().put(repo, info, dbWriteBuffer);
+ }
+
+ private void addChunkList(CachedPackInfo info, MessageDigest version,
+ List<ChunkInfo> list) {
+ if (list == null)
+ return;
+ byte[] buf = new byte[Constants.OBJECT_ID_LENGTH];
+ for (ChunkInfo c : list) {
+ int len = c.chunkSize - ChunkFormatter.TRAILER_SIZE;
+ info.bytesTotal += len;
+ info.objectsTotal += c.objectsTotal;
+ info.objectsDelta += c.objectsOfsDelta;
+ info.objectsDelta += c.objectsRefDelta;
+ info.chunks.add(c.getChunkKey());
+ c.getChunkKey().getChunkHash().copyRawTo(buf, 0);
+ version.update(buf);
+ }
+ }
+
+ private ObjectId computePackName() {
+ byte[] buf = new byte[Constants.OBJECT_ID_LENGTH];
+ MessageDigest md = Constants.newMessageDigest();
+ for (DhtInfo otp : objectListByName) {
+ otp.copyRawTo(buf, 0);
+ md.update(buf);
+ }
+ return ObjectId.fromRaw(md.digest());
+ }
+
+ private void rollback() throws DhtException {
+ try {
+ dbWriteBuffer.abort();
+ dbWriteBuffer = db.newWriteBuffer();
+
+ if (cachedPackKey != null)
+ db.repository().remove(repo, cachedPackKey, dbWriteBuffer);
+
+ if (linkIterators != null) {
+ boolean removed = true;
+ while (removed) {
+ removed = false;
+ for (ListIterator<DhtInfo> itr : linkIterators) {
+ int cnt = 0;
+ while (itr.hasPrevious() && cnt < linkBatchSize) {
+ DhtInfo oe = itr.previous();
+ db.objectIndex().remove( //
+ ObjectIndexKey.create(repo, oe), //
+ chunkOf(oe.chunkPtr), //
+ dbWriteBuffer);
+ cnt++;
+ }
+ if (0 < cnt)
+ removed = true;
+ }
+ }
+ }
+
+ deleteChunks(infoByOrder[OBJ_COMMIT]);
+ deleteChunks(infoByOrder[OBJ_TREE]);
+ deleteChunks(infoByOrder[OBJ_BLOB]);
+ deleteChunks(infoByOrder[OBJ_TAG]);
+
+ dbWriteBuffer.flush();
+ } catch (Throwable err) {
+ throw new DhtException(DhtText.get().packParserRollbackFailed, err);
+ }
+ }
+
+ private void deleteChunks(List<ChunkInfo> list) throws DhtException {
+ if (list != null) {
+ for (ChunkInfo info : list) {
+ ChunkKey key = info.getChunkKey();
+ db.chunk().remove(key, dbWriteBuffer);
+ db.repository().remove(repo, key, dbWriteBuffer);
+ }
+ }
+ }
+
+ private void putGlobalIndex(ProgressMonitor pm) throws DhtException {
+ int objcnt = objectListByName.size();
+ pm.beginTask(DhtText.get().recordingObjects, objcnt);
+
+ int segments = Math.max(1, Math.min(objcnt / linkBatchSize, 32));
+ linkIterators = newListIteratorArray(segments);
+
+ int objsPerSegment = objcnt / segments;
+ int beginIdx = 0;
+ for (int i = 0; i < segments - 1; i++) {
+ int endIdx = Math.min(beginIdx + objsPerSegment, objcnt);
+ linkIterators[i] = objectListByName.subList(beginIdx, endIdx)
+ .listIterator();
+ beginIdx = endIdx;
+ }
+ linkIterators[segments - 1] = objectListByName
+ .subList(beginIdx, objcnt).listIterator();
+
+ boolean inserted = true;
+ while (inserted) {
+ inserted = false;
+ for (ListIterator<DhtInfo> itr : linkIterators) {
+ int cnt = 0;
+ while (itr.hasNext() && cnt < linkBatchSize) {
+ DhtInfo oe = itr.next();
+ db.objectIndex().add( //
+ ObjectIndexKey.create(repo, oe), //
+ oe.info(chunkOf(oe.chunkPtr)), //
+ dbWriteBuffer);
+ cnt++;
+ }
+ if (0 < cnt) {
+ pm.update(cnt);
+ inserted = true;
+ }
+ }
+ }
+
+ pm.endTask();
+ }
+
+ @SuppressWarnings("unchecked")
+ private static ListIterator<DhtInfo>[] newListIteratorArray(int size) {
+ return new ListIterator[size];
+ }
+
+ private void computeChunkEdges() throws DhtException {
+ List<DhtInfo> objs = objectListByChunk;
+ int beginIdx = 0;
+ ChunkKey key = chunkOf(objs.get(0).chunkPtr);
+ int type = typeOf(objs.get(0).chunkPtr);
+
+ int objIdx = 1;
+ for (; objIdx < objs.size(); objIdx++) {
+ DhtInfo oe = objs.get(objIdx);
+ ChunkKey oeKey = chunkOf(oe.chunkPtr);
+ if (!key.equals(oeKey)) {
+ computeEdges(objs.subList(beginIdx, objIdx), key, type);
+ beginIdx = objIdx;
+
+ key = oeKey;
+ type = typeOf(oe.chunkPtr);
+ }
+ if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr))
+ type = OBJ_MIXED;
+ }
+ computeEdges(objs.subList(beginIdx, objs.size()), key, type);
+ }
+
+ private void computeEdges(List<DhtInfo> objs, ChunkKey key, int type)
+ throws DhtException {
+ Edges edges = chunkEdges.get(key);
+ if (edges == null)
+ return;
+
+ for (DhtInfo obj : objs)
+ edges.remove(obj);
+
+ switch (type) {
+ case OBJ_COMMIT:
+ edges.commitEdges = toChunkList(edges.commitIds);
+ break;
+ case OBJ_TREE:
+ // TODO prefetch tree edges
+ break;
+ }
+
+ edges.commitIds = null;
+ }
+
+ private List<ChunkKey> toChunkList(Set<DhtInfo> objects)
+ throws DhtException {
+ if (objects == null || objects.isEmpty())
+ return null;
+
+ Map<ChunkKey, ChunkOrderingEntry> map = new HashMap<ChunkKey, ChunkOrderingEntry>();
+ for (DhtInfo obj : objects) {
+ if (!obj.isInPack())
+ continue;
+
+ long chunkPtr = obj.chunkPtr;
+ ChunkKey key = chunkOf(chunkPtr);
+ ChunkOrderingEntry e = map.get(key);
+ if (e == null) {
+ e = new ChunkOrderingEntry();
+ e.key = key;
+ e.order = chunkIdx(chunkPtr);
+ map.put(key, e);
+ } else {
+ e.order = Math.min(e.order, chunkIdx(chunkPtr));
+ }
+ }
+
+ ChunkOrderingEntry[] tmp = map.values().toArray(
+ new ChunkOrderingEntry[map.size()]);
+ Arrays.sort(tmp);
+
+ ChunkKey[] out = new ChunkKey[tmp.length];
+ for (int i = 0; i < tmp.length; i++)
+ out[i] = tmp[i].key;
+ return Arrays.asList(out);
+ }
+
+ private static final class ChunkOrderingEntry implements
+ Comparable<ChunkOrderingEntry> {
+ ChunkKey key;
+
+ int order;
+
+ public int compareTo(ChunkOrderingEntry o) {
+ return order - o.order;
+ }
+ }
+
+ private void putChunkIndexes() throws DhtException {
+ List<DhtInfo> objs = objectListByChunk;
+ int sIdx = 0;
+ DhtInfo oe = objs.get(0);
+ oe.setOffset(offsetOf(oe.chunkPtr));
+
+ ChunkKey key = chunkOf(oe.chunkPtr);
+ int type = typeOf(oe.chunkPtr);
+
+ int objIdx = 1;
+ for (; objIdx < objs.size(); objIdx++) {
+ oe = objs.get(objIdx);
+ oe.setOffset(offsetOf(oe.chunkPtr));
+
+ ChunkKey oeKey = chunkOf(oe.chunkPtr);
+ if (!key.equals(oeKey)) {
+ putChunkIndex(objs.subList(sIdx, objIdx), key, type);
+ sIdx = objIdx;
+
+ key = oeKey;
+ type = typeOf(oe.chunkPtr);
+ }
+ if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr))
+ type = OBJ_MIXED;
+ }
+ putChunkIndex(objs.subList(sIdx, objs.size()), key, type);
+ }
+
+ private void putChunkIndex(List<DhtInfo> objectList, ChunkKey key, int type)
+ throws DhtException {
+ ChunkInfo info = infoByKey.get(key);
+ info.objectsTotal = objectList.size();
+ info.objectType = type;
+
+ PackChunk.Members builder = new PackChunk.Members();
+ builder.setChunkKey(key);
+
+ byte[] index = ChunkIndex.create(objectList);
+ info.indexSize = index.length;
+ builder.setChunkIndex(index);
+
+ ChunkMeta meta = dirtyMeta.remove(key);
+ if (meta == null)
+ meta = chunkMeta.get(key);
+ if (meta == null)
+ meta = new ChunkMeta(key);
+
+ switch (type) {
+ case OBJ_COMMIT: {
+ Edges edges = chunkEdges.get(key);
+ if (edges != null) {
+ List<ChunkKey> e = edges.commitEdges;
+ List<ChunkKey> s = sequentialHint(key, OBJ_COMMIT);
+ meta.commitPrefetch = new ChunkMeta.PrefetchHint(e, s);
+ }
+ break;
+ }
+ case OBJ_TREE: {
+ List<ChunkKey> s = sequentialHint(key, OBJ_TREE);
+ meta.treePrefetch = new ChunkMeta.PrefetchHint(null, s);
+ break;
+ }
+ }
+
+ if (meta.isEmpty()) {
+ info.metaSize = 0;
+ } else {
+ info.metaSize = meta.asBytes().length;
+ builder.setMeta(meta);
+ }
+
+ db.repository().put(repo, info, dbWriteBuffer);
+ db.chunk().put(builder, dbWriteBuffer);
+ }
+
+ private List<ChunkKey> sequentialHint(ChunkKey key, int typeCode) {
+ List<ChunkInfo> infoList = infoByOrder[typeCode];
+ if (infoList == null)
+ return null;
+
+ List<ChunkKey> all = new ArrayList<ChunkKey>(infoList.size());
+ for (ChunkInfo info : infoList)
+ all.add(info.getChunkKey());
+
+ int idx = all.indexOf(key);
+ if (0 <= idx) {
+ int max = options.getPrefetchDepth();
+ int end = Math.min(idx + 1 + max, all.size());
+ return all.subList(idx + 1, end);
+ }
+ return null;
+ }
+
+ private void putDirtyMeta() throws DhtException {
+ for (ChunkMeta meta : dirtyMeta.values()) {
+ PackChunk.Members builder = new PackChunk.Members();
+ builder.setChunkKey(meta.getChunkKey());
+ builder.setMeta(meta);
+ db.chunk().put(builder, dbWriteBuffer);
+ }
+ }
+
+ @Override
+ protected PackedObjectInfo newInfo(AnyObjectId id, UnresolvedDelta delta,
+ ObjectId baseId) {
+ DhtInfo obj = objectMap.addIfAbsent(new DhtInfo(id));
+ if (delta != null) {
+ DhtDelta d = (DhtDelta) delta;
+ obj.chunkPtr = d.chunkPtr;
+ obj.packedSize = d.packedSize;
+ obj.inflatedSize = d.inflatedSize;
+ obj.base = baseId;
+ obj.setType(d.getType());
+ if (d.isFragmented())
+ obj.setFragmented();
+ }
+ return obj;
+ }
+
+ @Override
+ protected void onPackHeader(long objCnt) throws IOException {
+ if (Integer.MAX_VALUE < objCnt) {
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().tooManyObjectsInPack, Long.valueOf(objCnt)));
+ }
+
+ objStreamPos = new LongList((int) objCnt);
+ objChunkPtrs = new LongList((int) objCnt);
+
+ if (saveAsCachedPack == null)
+ setSaveAsCachedPack(1000 < objCnt);
+ }
+
+ @Override
+ protected void onBeginWholeObject(long streamPosition, int type,
+ long inflatedSize) throws IOException {
+ ChunkFormatter w = begin(type);
+ if (!w.whole(type, inflatedSize)) {
+ endChunk(type);
+ w = begin(type);
+ if (!w.whole(type, inflatedSize))
+ throw panicCannotInsert();
+ }
+
+ currType = type;
+ currDataPos = w.position();
+ currPackedSize = 0;
+ currInflatedSize = inflatedSize;
+ objStreamPos.add(streamPosition);
+ }
+
+ @Override
+ protected void onEndWholeObject(PackedObjectInfo info) throws IOException {
+ boolean fragmented = currFragments != null;
+ endOneObject();
+
+ DhtInfo oe = (DhtInfo) info;
+ oe.chunkPtr = currChunkPtr;
+ oe.packedSize = currPackedSize;
+ oe.inflatedSize = currInflatedSize;
+ oe.setType(currType);
+ if (fragmented)
+ oe.setFragmented();
+ }
+
+ private void endOneObject() throws DhtException {
+ if (currFragments != null)
+ endFragmentedObject();
+ objChunkPtrs.add(currChunkPtr);
+ }
+
+ @Override
+ protected void onBeginOfsDelta(long deltaPos, long basePos,
+ long inflatedSize) throws IOException {
+ long basePtr = objChunkPtrs.get(findStreamIndex(basePos));
+ int type = typeOf(basePtr);
+
+ currType = type;
+ currPackedSize = 0;
+ currInflatedSize = inflatedSize;
+ currBasePtr = basePtr;
+ objStreamPos.add(deltaPos);
+
+ ChunkFormatter w = begin(type);
+ if (isInCurrentChunk(basePtr)) {
+ if (w.ofsDelta(inflatedSize, w.position() - offsetOf(basePtr))) {
+ currDataPos = w.position();
+ return;
+ }
+
+ endChunk(type);
+ w = begin(type);
+ }
+
+ if (!longOfsDelta(w, inflatedSize, basePtr)) {
+ endChunk(type);
+ w = begin(type);
+ if (!longOfsDelta(w, inflatedSize, basePtr))
+ throw panicCannotInsert();
+ }
+
+ currDataPos = w.position();
+ }
+
+ @Override
+ protected void onBeginRefDelta(long deltaPos, AnyObjectId baseId,
+ long inflatedSize) throws IOException {
+ // Try to get the base type, but only if it was seen before in this
+ // pack stream. If not assume worst-case of BLOB type.
+ //
+ int typeCode;
+ DhtInfo baseInfo = objectMap.get(baseId);
+ if (baseInfo != null && baseInfo.isInPack()) {
+ typeCode = baseInfo.getType();
+ currType = typeCode;
+ } else {
+ typeCode = OBJ_BLOB;
+ currType = -1;
+ }
+
+ ChunkFormatter w = begin(typeCode);
+ if (!w.refDelta(inflatedSize, baseId)) {
+ endChunk(typeCode);
+ w = begin(typeCode);
+ if (!w.refDelta(inflatedSize, baseId))
+ throw panicCannotInsert();
+ }
+
+ currDataPos = w.position();
+ currPackedSize = 0;
+ currInflatedSize = inflatedSize;
+ objStreamPos.add(deltaPos);
+ }
+
+ @Override
+ protected DhtDelta onEndDelta() throws IOException {
+ boolean fragmented = currFragments != null;
+ endOneObject();
+
+ DhtDelta delta = new DhtDelta();
+ delta.chunkPtr = currChunkPtr;
+ delta.packedSize = currPackedSize;
+ delta.inflatedSize = currInflatedSize;
+ if (0 < currType)
+ delta.setType(currType);
+ if (fragmented)
+ delta.setFragmented();
+ return delta;
+ }
+
+ @Override
+ protected void onObjectData(Source src, byte[] raw, int pos, int len)
+ throws IOException {
+ if (src != Source.INPUT)
+ return;
+
+ if (currChunk.append(raw, pos, len)) {
+ currPackedSize += len;
+ return;
+ }
+
+ if (currFragments == null && currChunk.getObjectCount() == 1)
+ currFragments = new LinkedList<ChunkKey>();
+ if (currFragments != null) {
+ appendToFragment(raw, pos, len);
+ return;
+ }
+
+ // Everything between dataPos and dataEnd must be saved.
+ //
+ final int dataPos = currDataPos;
+ final int dataEnd = currChunk.position();
+ final int hdrPos = offsetOf(currChunkPtr);
+ final int hdrLen = dataPos - hdrPos;
+ final int type = typeOf(currChunkPtr);
+ byte[] dataOld = currChunk.getRawChunkDataArray();
+ final int typeOld = currChunk.getCurrentObjectType();
+
+ currChunk.rollback();
+ endChunk(type);
+
+ final ChunkFormatter w = begin(type);
+ switch (typeOld) {
+ case OBJ_COMMIT:
+ case OBJ_BLOB:
+ case OBJ_TREE:
+ case OBJ_TAG:
+ case OBJ_REF_DELTA:
+ w.adjustObjectCount(1, typeOld);
+ if (!w.append(dataOld, hdrPos, hdrLen))
+ throw panicCannotInsert();
+ break;
+
+ case OBJ_OFS_DELTA:
+ if (!longOfsDelta(w, currInflatedSize, currBasePtr))
+ throw panicCannotInsert();
+ break;
+
+ default:
+ throw new DhtException("Internal programming error: " + typeOld);
+ }
+
+ currDataPos = w.position();
+ if (dataPos < dataEnd && !w.append(dataOld, dataPos, dataEnd - dataPos))
+ throw panicCannotInsert();
+ dataOld = null;
+
+ if (w.append(raw, pos, len)) {
+ currPackedSize += len;
+ } else {
+ currFragments = new LinkedList<ChunkKey>();
+ appendToFragment(raw, pos, len);
+ }
+ }
+
+ private boolean longOfsDelta(ChunkFormatter w, long infSize, long basePtr) {
+ final int type = typeOf(basePtr);
+ final List<ChunkInfo> infoList = infoByOrder[type];
+ final int baseIdx = chunkIdx(basePtr);
+ final ChunkInfo baseInfo = infoList.get(baseIdx);
+
+ // Go backwards to the start of the base's chunk.
+ long relativeChunkStart = 0;
+ for (int i = infoList.size() - 1; baseIdx <= i; i--) {
+ ChunkInfo info = infoList.get(i);
+ int packSize = info.chunkSize - ChunkFormatter.TRAILER_SIZE;
+ relativeChunkStart += packSize;
+ }
+
+ // Offset to the base goes back to start of our chunk, then start of
+ // the base chunk, but slide forward the distance of the base within
+ // its own chunk.
+ //
+ long ofs = w.position() + relativeChunkStart - offsetOf(basePtr);
+ if (w.ofsDelta(infSize, ofs)) {
+ w.useBaseChunk(relativeChunkStart, baseInfo.getChunkKey());
+ return true;
+ }
+ return false;
+ }
+
+ private void appendToFragment(byte[] raw, int pos, int len)
+ throws DhtException {
+ while (0 < len) {
+ if (currChunk.free() == 0) {
+ int typeCode = typeOf(currChunkPtr);
+ currChunk.setFragment();
+ currFragments.add(endChunk(typeCode));
+ currChunk = openChunk(typeCode);
+ }
+
+ int n = Math.min(len, currChunk.free());
+ currChunk.append(raw, pos, n);
+ currPackedSize += n;
+ pos += n;
+ len -= n;
+ }
+ }
+
+ private void endFragmentedObject() throws DhtException {
+ currChunk.setFragment();
+ ChunkKey lastKey = endChunk(typeOf(currChunkPtr));
+ if (lastKey != null)
+ currFragments.add(lastKey);
+
+ for (ChunkKey key : currFragments) {
+ ChunkMeta meta = chunkMeta.get(key);
+ if (meta == null) {
+ meta = new ChunkMeta(key);
+ chunkMeta.put(key, meta);
+ }
+ meta.fragments = currFragments;
+ dirtyMeta.put(key, meta);
+ }
+ currFragments = null;
+ }
+
+ @Override
+ protected void onInflatedObjectData(PackedObjectInfo obj, int typeCode,
+ byte[] data) throws IOException {
+ DhtInfo info = (DhtInfo) obj;
+ info.inflatedSize = data.length;
+ info.setType(typeCode);
+
+ switch (typeCode) {
+ case OBJ_COMMIT:
+ onCommit(info, data);
+ break;
+
+ case OBJ_TREE:
+ onTree(info, data);
+ break;
+
+ case OBJ_TAG:
+ onTag(info, data);
+ break;
+ }
+ }
+
+ private void onCommit(DhtInfo obj, byte[] raw) throws DhtException {
+ Edges edges = edges(obj.chunkPtr);
+ edges.remove(obj);
+
+ // TODO compute hints for trees.
+ if (isSaveAsCachedPack()) {
+ idBuffer.fromString(raw, 5);
+ lookupByName(idBuffer).setReferenced();
+ }
+
+ int ptr = 46;
+ while (raw[ptr] == 'p') {
+ idBuffer.fromString(raw, ptr + 7);
+ DhtInfo p = lookupByName(idBuffer);
+ p.setReferenced();
+ edges.commit(p);
+ ptr += 48;
+ }
+ }
+
+ private void onTree(DhtInfo obj, byte[] data) {
+ if (isSaveAsCachedPack()) {
+ treeParser.reset(data);
+ while (!treeParser.eof()) {
+ idBuffer.fromRaw(treeParser.idBuffer(), treeParser.idOffset());
+ lookupByName(idBuffer).setReferenced();
+ treeParser.next();
+ }
+ }
+ }
+
+ private void onTag(DhtInfo obj, byte[] data) {
+ if (isSaveAsCachedPack()) {
+ idBuffer.fromString(data, 7); // "object $sha1"
+ lookupByName(idBuffer).setReferenced();
+ }
+ }
+
+ private DhtInfo lookupByName(AnyObjectId obj) {
+ DhtInfo info = objectMap.get(obj);
+ if (info == null) {
+ info = new DhtInfo(obj);
+ objectMap.add(info);
+ }
+ return info;
+ }
+
+ private Edges edges(long chunkPtr) throws DhtException {
+ if (isInCurrentChunk(chunkPtr)) {
+ int type = typeOf(chunkPtr);
+ Edges s = openEdges[type];
+ if (s == null) {
+ s = new Edges();
+ openEdges[type] = s;
+ }
+ return s;
+ } else {
+ ChunkKey key = chunkOf(chunkPtr);
+ Edges s = chunkEdges.get(key);
+ if (s == null) {
+ s = new Edges();
+ chunkEdges.put(key, s);
+ }
+ return s;
+ }
+ }
+
+ private static class Edges {
+ Set<DhtInfo> commitIds;
+
+ List<ChunkKey> commitEdges;
+
+ void commit(DhtInfo id) {
+ if (commitIds == null)
+ commitIds = new HashSet<DhtInfo>();
+ commitIds.add(id);
+ }
+
+ void remove(DhtInfo id) {
+ if (commitIds != null)
+ commitIds.remove(id);
+ }
+ }
+
+ @Override
+ protected ObjectTypeAndSize seekDatabase(PackedObjectInfo obj,
+ ObjectTypeAndSize info) throws IOException {
+ return seekDatabase(((DhtInfo) obj).chunkPtr, info);
+ }
+
+ @Override
+ protected ObjectTypeAndSize seekDatabase(UnresolvedDelta delta,
+ ObjectTypeAndSize info) throws IOException {
+ return seekDatabase(((DhtDelta) delta).chunkPtr, info);
+ }
+
+ private ObjectTypeAndSize seekDatabase(long chunkPtr, ObjectTypeAndSize info)
+ throws DhtException {
+ seekChunk(chunkOf(chunkPtr), true);
+ dbPtr = dbChunk.readObjectTypeAndSize(offsetOf(chunkPtr), info);
+ return info;
+ }
+
+ @Override
+ protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException {
+ int n = dbChunk.read(dbPtr, dst, pos, cnt);
+ if (0 < n) {
+ dbPtr += n;
+ return n;
+ }
+
+ // ChunkMeta for fragments is delayed writing, so it isn't available
+ // on the chunk if the chunk was read-back from the database. Use
+ // our copy of ChunkMeta instead of the PackChunk's copy.
+
+ ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey());
+ if (meta == null)
+ return 0;
+
+ ChunkKey next = meta.getNextFragment(dbChunk.getChunkKey());
+ if (next == null)
+ return 0;
+
+ seekChunk(next, false);
+ n = dbChunk.read(0, dst, pos, cnt);
+ dbPtr = n;
+ return n;
+ }
+
+ private void seekChunk(ChunkKey key, boolean cache) throws DhtException,
+ DhtTimeoutException {
+ if (dbChunk == null || !dbChunk.getChunkKey().equals(key)) {
+ dbChunk = chunkReadBackCache.get(key);
+ if (dbChunk == null) {
+ dbWriteBuffer.flush();
+
+ Collection<PackChunk.Members> found;
+ Context opt = Context.READ_REPAIR;
+ Sync<Collection<PackChunk.Members>> sync = Sync.create();
+ db.chunk().get(opt, Collections.singleton(key), sync);
+ try {
+ found = sync.get(objdb.getReaderOptions().getTimeout());
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+
+ if (found.isEmpty()) {
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().missingChunk, key));
+ }
+
+ dbChunk = found.iterator().next().build();
+ if (cache)
+ chunkReadBackCache.put(key, dbChunk);
+ }
+ }
+ }
+
+ @Override
+ protected boolean onAppendBase(int typeCode, byte[] data,
+ PackedObjectInfo info) throws IOException {
+ return false; // This implementation does not copy base objects.
+ }
+
+ @Override
+ protected void onEndThinPack() throws IOException {
+ // Do nothing, this event is not relevant.
+ }
+
+ @Override
+ protected void onPackFooter(byte[] hash) throws IOException {
+ // TODO Combine together fractional chunks to reduce overhead.
+ // Fractional chunks are common for single-commit pushes since
+ // they are broken out by object type.
+
+ // TODO Try to combine the chunk data and its index into a single
+ // put call for the last chunk of each type. This would break the
+ // read back we do in seekDatabase during delta resolution.
+
+ // If there are deltas to be resolved the pending chunks
+ // will need to be reloaded later. Ensure they are stored.
+ //
+ endChunk(OBJ_COMMIT);
+ endChunk(OBJ_TREE);
+ endChunk(OBJ_BLOB);
+ endChunk(OBJ_TAG);
+
+ // These are only necessary during initial parsing. Drop them now.
+ //
+ objStreamPos = null;
+ objChunkPtrs = null;
+ }
+
+ @Override
+ protected void onObjectHeader(Source src, byte[] raw, int pos, int len)
+ throws IOException {
+ // Do nothing, the original stream headers are not used.
+ }
+
+ @Override
+ protected void onStoreStream(byte[] raw, int pos, int len)
+ throws IOException {
+ // Do nothing, the stream is being sliced and cannot be stored as-is.
+ }
+
+ @Override
+ protected boolean checkCRC(int oldCRC) {
+ return true; // Don't bother to check CRCs, assume the chunk is OK.
+ }
+
+ private ChunkFormatter begin(int typeCode) throws DhtException {
+ ChunkFormatter w = openChunk(typeCode);
+ currChunk = w;
+ currChunkPtr = makeObjectPointer(w, typeCode);
+ return w;
+ }
+
+ private ChunkFormatter openChunk(int typeCode) throws DhtException {
+ if (typeCode == 0)
+ throw new DhtException("Invalid internal typeCode 0");
+
+ ChunkFormatter w = openChunks[typeCode];
+ if (w == null) {
+ w = new ChunkFormatter(repo, options);
+ w.setSource(ChunkInfo.Source.RECEIVE);
+ w.setObjectType(typeCode);
+ openChunks[typeCode] = w;
+ }
+ return w;
+ }
+
+ private ChunkKey endChunk(int typeCode) throws DhtException {
+ ChunkFormatter w = openChunks[typeCode];
+ if (w == null)
+ return null;
+
+ openChunks[typeCode] = null;
+ currChunk = null;
+
+ if (w.isEmpty())
+ return null;
+
+ ChunkKey key = w.end(chunkKeyDigest);
+ ChunkInfo info = w.getChunkInfo();
+
+ if (infoByOrder[typeCode] == null)
+ infoByOrder[typeCode] = new ArrayList<ChunkInfo>();
+ infoByOrder[typeCode].add(info);
+ infoByKey.put(key, info);
+
+ if (w.getChunkMeta() != null)
+ chunkMeta.put(key, w.getChunkMeta());
+
+ Edges e = openEdges[typeCode];
+ if (e != null) {
+ chunkEdges.put(key, e);
+ openEdges[typeCode] = null;
+ }
+
+ if (currFragments == null)
+ chunkReadBackCache.put(key, w.getPackChunk());
+
+ w.unsafePut(db, dbWriteBuffer);
+ return key;
+ }
+
+ private int findStreamIndex(long streamPosition) throws DhtException {
+ int high = objStreamPos.size();
+ int low = 0;
+ do {
+ final int mid = (low + high) >>> 1;
+ final long pos = objStreamPos.get(mid);
+ if (streamPosition < pos)
+ high = mid;
+ else if (streamPosition == pos)
+ return mid;
+ else
+ low = mid + 1;
+ } while (low < high);
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().noSavedTypeForBase, Long.valueOf(streamPosition)));
+ }
+
+ private long makeObjectPointer(ChunkFormatter w, int typeCode) {
+ List<ChunkInfo> list = infoByOrder[typeCode];
+ int idx = list == null ? 0 : list.size();
+ int ptr = w.position();
+ return (((long) typeCode) << 61) | (((long) idx) << 32) | ptr;
+ }
+
+ private static int typeOf(long objectPtr) {
+ return (int) (objectPtr >>> 61);
+ }
+
+ private static int chunkIdx(long objectPtr) {
+ return ((int) ((objectPtr << 3) >>> (32 + 3)));
+ }
+
+ private static int offsetOf(long objectPtr) {
+ return (int) objectPtr;
+ }
+
+ private boolean isInCurrentChunk(long objectPtr) {
+ List<ChunkInfo> list = infoByOrder[typeOf(objectPtr)];
+ if (list == null)
+ return chunkIdx(objectPtr) == 0;
+ return chunkIdx(objectPtr) == list.size();
+ }
+
+ private ChunkKey chunkOf(long objectPtr) throws DhtException {
+ List<ChunkInfo> list = infoByOrder[typeOf(objectPtr)];
+ int idx = chunkIdx(objectPtr);
+ if (list == null || list.size() <= idx) {
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().packParserInvalidPointer, //
+ Constants.typeString(typeOf(objectPtr)), //
+ Integer.valueOf(idx), //
+ Integer.valueOf(offsetOf(objectPtr))));
+ }
+ return list.get(idx).getChunkKey();
+ }
+
+ private static DhtException panicCannotInsert() {
+ // This exception should never happen.
+ return new DhtException(DhtText.get().cannotInsertObject);
+ }
+
+ static class DhtInfo extends PackedObjectInfo {
+ private static final int REFERENCED = 1 << 3;
+
+ static final int FRAGMENTED = 1 << 4;
+
+ long chunkPtr;
+
+ long packedSize;
+
+ long inflatedSize;
+
+ ObjectId base;
+
+ DhtInfo(AnyObjectId id) {
+ super(id);
+ }
+
+ boolean isInPack() {
+ return chunkPtr != 0;
+ }
+
+ boolean isReferenced() {
+ return (getCRC() & REFERENCED) != 0;
+ }
+
+ void setReferenced() {
+ setCRC(getCRC() | REFERENCED);
+ }
+
+ boolean isFragmented() {
+ return (getCRC() & FRAGMENTED) != 0;
+ }
+
+ void setFragmented() {
+ setCRC(getCRC() | FRAGMENTED);
+ }
+
+ int getType() {
+ return getCRC() & 7;
+ }
+
+ void setType(int type) {
+ setCRC((getCRC() & ~7) | type);
+ }
+
+ ObjectInfo info(ChunkKey chunkKey) {
+ return new ObjectInfo(chunkKey, -1, getType(), offsetOf(chunkPtr),
+ packedSize, inflatedSize, base, isFragmented());
+ }
+ }
+
+ static class DhtDelta extends UnresolvedDelta {
+ long chunkPtr;
+
+ long packedSize;
+
+ long inflatedSize;
+
+ int getType() {
+ return getCRC() & 7;
+ }
+
+ void setType(int type) {
+ setCRC((getCRC() & ~7) | type);
+ }
+
+ boolean isFragmented() {
+ return (getCRC() & DhtInfo.FRAGMENTED) != 0;
+ }
+
+ void setFragmented() {
+ setCRC(getCRC() | DhtInfo.FRAGMENTED);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java
new file mode 100644
index 0000000000..c4977feef2
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java
@@ -0,0 +1,747 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
+import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+import java.util.zip.Inflater;
+
+import org.eclipse.jgit.errors.IncorrectObjectTypeException;
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
+import org.eclipse.jgit.lib.AbbreviatedObjectId;
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.AsyncObjectLoaderQueue;
+import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
+import org.eclipse.jgit.lib.InflaterCache;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.eclipse.jgit.lib.ProgressMonitor;
+import org.eclipse.jgit.revwalk.ObjectWalk;
+import org.eclipse.jgit.revwalk.RevCommit;
+import org.eclipse.jgit.revwalk.RevObject;
+import org.eclipse.jgit.revwalk.RevWalk;
+import org.eclipse.jgit.storage.dht.RefData.IdWithChunk;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.pack.CachedPack;
+import org.eclipse.jgit.storage.pack.ObjectReuseAsIs;
+import org.eclipse.jgit.storage.pack.ObjectToPack;
+import org.eclipse.jgit.storage.pack.PackOutputStream;
+import org.eclipse.jgit.storage.pack.PackWriter;
+
+/**
+ * ObjectReader implementation for DHT based repositories.
+ * <p>
+ * This class is public only to expose its unique statistics for runtime
+ * performance reporting. Applications should always prefer to use the more
+ * generic base class, {@link ObjectReader}.
+ */
+public class DhtReader extends ObjectReader implements ObjectReuseAsIs {
+ private final DhtRepository repository;
+
+ private final RepositoryKey repo;
+
+ private final Database db;
+
+ private final DhtReaderOptions readerOptions;
+
+ private final DhtInserterOptions inserterOptions;
+
+ private final Statistics stats;
+
+ private final RecentInfoCache recentInfo;
+
+ private final RecentChunks recentChunks;
+
+ private final DeltaBaseCache deltaBaseCache;
+
+ private Collection<CachedPack> cachedPacks;
+
+ private Inflater inflater;
+
+ private Prefetcher prefetcher;
+
+ DhtReader(DhtObjDatabase objdb) {
+ this.repository = objdb.getRepository();
+ this.repo = objdb.getRepository().getRepositoryKey();
+ this.db = objdb.getDatabase();
+ this.readerOptions = objdb.getReaderOptions();
+ this.inserterOptions = objdb.getInserterOptions();
+
+ this.stats = new Statistics();
+ this.recentInfo = new RecentInfoCache(getOptions());
+ this.recentChunks = new RecentChunks(this);
+ this.deltaBaseCache = new DeltaBaseCache(this);
+ }
+
+ /** @return describes how this DhtReader has performed. */
+ public Statistics getStatistics() {
+ return stats;
+ }
+
+ Database getDatabase() {
+ return db;
+ }
+
+ RepositoryKey getRepositoryKey() {
+ return repo;
+ }
+
+ DhtReaderOptions getOptions() {
+ return readerOptions;
+ }
+
+ DhtInserterOptions getInserterOptions() {
+ return inserterOptions;
+ }
+
+ RecentInfoCache getRecentInfoCache() {
+ return recentInfo;
+ }
+
+ DeltaBaseCache getDeltaBaseCache() {
+ return deltaBaseCache;
+ }
+
+ Inflater inflater() {
+ if (inflater == null)
+ inflater = InflaterCache.get();
+ else
+ inflater.reset();
+ return inflater;
+ }
+
+ @Override
+ public void release() {
+ recentChunks.clear();
+ endPrefetch();
+
+ InflaterCache.release(inflater);
+ inflater = null;
+
+ super.release();
+ }
+
+ @Override
+ public ObjectReader newReader() {
+ return new DhtReader(repository.getObjectDatabase());
+ }
+
+ @Override
+ public boolean has(AnyObjectId objId, int typeHint) throws IOException {
+ if (objId instanceof RefData.IdWithChunk)
+ return true;
+
+ if (recentChunks.has(repo, objId))
+ return true;
+
+ if (repository.getRefDatabase().findChunk(objId) != null)
+ return true;
+
+ // TODO(spearce) This is expensive. Is it worthwhile?
+ if (ChunkCache.get().find(repo, objId) != null)
+ return true;
+
+ return !find(objId).isEmpty();
+ }
+
+ @Override
+ public ObjectLoader open(AnyObjectId objId, int typeHint)
+ throws MissingObjectException, IncorrectObjectTypeException,
+ IOException {
+ ObjectLoader ldr = recentChunks.open(repo, objId, typeHint);
+ if (ldr != null)
+ return ldr;
+
+ ChunkAndOffset p = getChunk(objId, typeHint, true, false);
+ ldr = PackChunk.read(p.chunk, p.offset, this, typeHint);
+ recentChunk(p.chunk);
+ return ldr;
+ }
+
+ @Override
+ public <T extends ObjectId> AsyncObjectLoaderQueue<T> open(
+ Iterable<T> objectIds, boolean reportMissing) {
+ return new OpenQueue<T>(this, objectIds, reportMissing);
+ }
+
+ @Override
+ public long getObjectSize(AnyObjectId objectId, int typeHint)
+ throws MissingObjectException, IncorrectObjectTypeException,
+ IOException {
+ for (ObjectInfo info : find(objectId))
+ return info.getSize();
+ throw missing(objectId, typeHint);
+ }
+
+ @Override
+ public <T extends ObjectId> AsyncObjectSizeQueue<T> getObjectSize(
+ Iterable<T> objectIds, boolean reportMissing) {
+ return new SizeQueue<T>(this, objectIds, reportMissing);
+ }
+
+ @Override
+ public void walkAdviceBeginCommits(RevWalk rw, Collection<RevCommit> roots)
+ throws IOException {
+ endPrefetch();
+
+ // Don't assign the prefetcher right away. Delay until its
+ // configured as push might invoke our own methods that may
+ // try to call back into the active prefetcher.
+ //
+ Prefetcher p = new Prefetcher(this, OBJ_COMMIT);
+ p.push(this, roots);
+ prefetcher = p;
+ }
+
+ @Override
+ public void walkAdviceBeginTrees(ObjectWalk ow, RevCommit min, RevCommit max)
+ throws IOException {
+ endPrefetch();
+
+ // Don't assign the prefetcher right away. Delay until its
+ // configured as push might invoke our own methods that may
+ // try to call back into the active prefetcher.
+ //
+ Prefetcher p = new Prefetcher(this, OBJ_TREE);
+ p.push(this, min.getTree(), max.getTree());
+ prefetcher = p;
+ }
+
+ @Override
+ public void walkAdviceEnd() {
+ endPrefetch();
+ }
+
+ void recentChunk(PackChunk chunk) {
+ recentChunks.put(chunk);
+ }
+
+ ChunkAndOffset getChunk(AnyObjectId objId, int typeHint, boolean recent)
+ throws DhtException, MissingObjectException {
+ return getChunk(objId, typeHint, true /* load */, recent);
+ }
+
+ ChunkAndOffset getChunkGently(AnyObjectId objId, int typeHint)
+ throws DhtException, MissingObjectException {
+ return getChunk(objId, typeHint, false /* no load */, true /* recent */);
+ }
+
+ private ChunkAndOffset getChunk(AnyObjectId objId, int typeHint,
+ boolean loadIfRequired, boolean checkRecent) throws DhtException,
+ MissingObjectException {
+ if (checkRecent) {
+ ChunkAndOffset r = recentChunks.find(repo, objId);
+ if (r != null)
+ return r;
+ }
+
+ ChunkKey key;
+ if (objId instanceof RefData.IdWithChunk)
+ key = ((RefData.IdWithChunk) objId).getChunkKey();
+ else
+ key = repository.getRefDatabase().findChunk(objId);
+ if (key != null) {
+ PackChunk chunk = ChunkCache.get().get(key);
+ if (chunk != null) {
+ int pos = chunk.findOffset(repo, objId);
+ if (0 <= pos)
+ return new ChunkAndOffset(chunk, pos);
+ }
+
+ if (loadIfRequired) {
+ chunk = load(key);
+ if (chunk != null && chunk.hasIndex()) {
+ int pos = chunk.findOffset(repo, objId);
+ if (0 <= pos) {
+ chunk = ChunkCache.get().put(chunk);
+ return new ChunkAndOffset(chunk, pos);
+ }
+ }
+ }
+
+ // The hint above is stale. Fall through and do a
+ // more exhaustive lookup to find the object.
+ }
+
+ ChunkAndOffset r = ChunkCache.get().find(repo, objId);
+ if (r != null)
+ return r;
+
+ if (!loadIfRequired)
+ return null;
+
+ if (prefetcher != null) {
+ r = prefetcher.find(repo, objId);
+ if (r != null)
+ return r;
+ }
+
+ for (ObjectInfo link : find(objId)) {
+ PackChunk chunk;
+
+ if (prefetcher != null) {
+ chunk = prefetcher.get(link.getChunkKey());
+ if (chunk == null) {
+ chunk = load(link.getChunkKey());
+ if (chunk == null)
+ continue;
+ if (prefetcher.isType(typeHint))
+ prefetcher.push(chunk.getMeta());
+ }
+ } else {
+ chunk = load(link.getChunkKey());
+ if (chunk == null)
+ continue;
+ }
+
+ if (chunk.hasIndex())
+ chunk = ChunkCache.get().put(chunk);
+ return new ChunkAndOffset(chunk, link.getOffset());
+ }
+
+ throw missing(objId, typeHint);
+ }
+
+ ChunkKey findChunk(AnyObjectId objId) throws DhtException {
+ if (objId instanceof IdWithChunk)
+ return ((IdWithChunk) objId).getChunkKey();
+
+ ChunkKey key = repository.getRefDatabase().findChunk(objId);
+ if (key != null)
+ return key;
+
+ ChunkAndOffset r = recentChunks.find(repo, objId);
+ if (r != null)
+ return r.chunk.getChunkKey();
+
+ r = ChunkCache.get().find(repo, objId);
+ if (r != null)
+ return r.chunk.getChunkKey();
+
+ for (ObjectInfo link : find(objId))
+ return link.getChunkKey();
+
+ return null;
+ }
+
+ static MissingObjectException missing(AnyObjectId objId, int typeHint) {
+ ObjectId id = objId.copy();
+ if (typeHint != OBJ_ANY)
+ return new MissingObjectException(id, typeHint);
+ return new MissingObjectException(id, DhtText.get().objectTypeUnknown);
+ }
+
+ PackChunk getChunk(ChunkKey key) throws DhtException {
+ PackChunk chunk = recentChunks.get(key);
+ if (chunk != null)
+ return chunk;
+
+ chunk = ChunkCache.get().get(key);
+ if (chunk != null)
+ return chunk;
+
+ chunk = load(key);
+ if (chunk != null) {
+ if (chunk.hasIndex())
+ return ChunkCache.get().put(chunk);
+ return chunk;
+ }
+
+ throw new DhtMissingChunkException(key);
+ }
+
+ @Override
+ public Collection<ObjectId> resolve(AbbreviatedObjectId id)
+ throws IOException {
+ // Because ObjectIndexKey requires at least 4 leading digits
+ // don't resolve anything that is shorter than 4 digits.
+ //
+ if (id.length() < 4)
+ return Collections.emptySet();
+
+ throw new DhtException.TODO("resolve abbreviations");
+ }
+
+ public DhtObjectToPack newObjectToPack(RevObject obj) {
+ return new DhtObjectToPack(obj);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void selectObjectRepresentation(PackWriter packer,
+ ProgressMonitor monitor, Iterable<ObjectToPack> objects)
+ throws IOException, MissingObjectException {
+ Iterable itr = objects;
+ new RepresentationSelector(packer, this, monitor).select(itr);
+ }
+
+ private void endPrefetch() {
+ prefetcher = null;
+ }
+
+ @SuppressWarnings("unchecked")
+ public void writeObjects(PackOutputStream out, List<ObjectToPack> objects)
+ throws IOException {
+ prefetcher = new Prefetcher(this, 0);
+ prefetcher.setCacheLoadedChunks(false);
+ try {
+ List itr = objects;
+ new ObjectWriter(this, prefetcher).plan(itr);
+ for (ObjectToPack otp : objects)
+ out.writeObject(otp);
+ } finally {
+ endPrefetch();
+ }
+ }
+
+ public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp,
+ boolean validate) throws IOException,
+ StoredObjectRepresentationNotAvailableException {
+ DhtObjectToPack obj = (DhtObjectToPack) otp;
+ try {
+ PackChunk chunk = recentChunks.get(obj.chunk);
+ if (chunk == null) {
+ chunk = prefetcher.get(obj.chunk);
+ if (chunk == null) {
+ // This should never happen during packing, it implies
+ // the fetch plan was incorrect. Unfortunately that can
+ // occur if objects need to be recompressed on the fly.
+ //
+ stats.access(obj.chunk).cntCopyObjectAsIs_PrefetchMiss++;
+ chunk = getChunk(obj.chunk);
+ }
+ if (!chunk.isFragment())
+ recentChunk(chunk);
+ }
+ chunk.copyObjectAsIs(out, obj, validate, this);
+ } catch (DhtMissingChunkException missingChunk) {
+ stats.access(missingChunk.getChunkKey()).cntCopyObjectAsIs_InvalidChunk++;
+ throw new StoredObjectRepresentationNotAvailableException(otp);
+ }
+ }
+
+ public Collection<CachedPack> getCachedPacks() throws IOException {
+ if (cachedPacks == null) {
+ Collection<CachedPackInfo> info;
+ Collection<CachedPack> packs;
+
+ try {
+ info = db.repository().getCachedPacks(repo);
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+
+ packs = new ArrayList<CachedPack>(info.size());
+ for (CachedPackInfo i : info)
+ packs.add(new DhtCachedPack(i));
+ cachedPacks = packs;
+ }
+ return cachedPacks;
+ }
+
+ public void copyPackAsIs(PackOutputStream out, CachedPack pack,
+ boolean validate) throws IOException {
+ ((DhtCachedPack) pack).copyAsIs(out, validate, this);
+ }
+
+ private List<ObjectInfo> find(AnyObjectId obj) throws DhtException {
+ List<ObjectInfo> info = recentInfo.get(obj);
+ if (info != null)
+ return info;
+
+ stats.cntObjectIndex_Load++;
+ ObjectIndexKey idxKey = ObjectIndexKey.create(repo, obj);
+ Context opt = Context.READ_REPAIR;
+ Sync<Map<ObjectIndexKey, Collection<ObjectInfo>>> sync = Sync.create();
+ db.objectIndex().get(opt, Collections.singleton(idxKey), sync);
+ try {
+ Collection<ObjectInfo> m;
+
+ m = sync.get(getOptions().getTimeout()).get(idxKey);
+ if (m == null || m.isEmpty())
+ return Collections.emptyList();
+
+ info = new ArrayList<ObjectInfo>(m);
+ ObjectInfo.sort(info);
+ recentInfo.put(obj, info);
+ return info;
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+ }
+
+ private PackChunk load(ChunkKey chunkKey) throws DhtException {
+ if (0 == stats.access(chunkKey).cntReader_Load++
+ && readerOptions.isTrackFirstChunkLoad())
+ stats.access(chunkKey).locReader_Load = new Throwable("first");
+ Context opt = Context.READ_REPAIR;
+ Sync<Collection<PackChunk.Members>> sync = Sync.create();
+ db.chunk().get(opt, Collections.singleton(chunkKey), sync);
+ try {
+ Collection<PackChunk.Members> c = sync.get(getOptions()
+ .getTimeout());
+ if (c.isEmpty())
+ return null;
+ if (c instanceof List)
+ return ((List<PackChunk.Members>) c).get(0).build();
+ return c.iterator().next().build();
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+ }
+
+ static class ChunkAndOffset {
+ final PackChunk chunk;
+
+ final int offset;
+
+ ChunkAndOffset(PackChunk chunk, int offset) {
+ this.chunk = chunk;
+ this.offset = offset;
+ }
+ }
+
+ /** How this DhtReader has performed since creation. */
+ public static class Statistics {
+ private final Map<ChunkKey, ChunkAccess> chunkAccess = new LinkedHashMap<ChunkKey, ChunkAccess>();
+
+ ChunkAccess access(ChunkKey chunkKey) {
+ ChunkAccess ca = chunkAccess.get(chunkKey);
+ if (ca == null) {
+ ca = new ChunkAccess(chunkKey);
+ chunkAccess.put(chunkKey, ca);
+ }
+ return ca;
+ }
+
+ /**
+ * Number of sequential {@link ObjectIndexTable} lookups made by the
+ * reader. These were made without the support of batch lookups.
+ */
+ public int cntObjectIndex_Load;
+
+ /** Cycles detected in delta chains during OBJ_REF_DELTA reads. */
+ public int deltaChainCycles;
+
+ int recentChunks_Hits;
+
+ int recentChunks_Miss;
+
+ int deltaBaseCache_Hits;
+
+ int deltaBaseCache_Miss;
+
+ /** @return ratio of recent chunk hits, [0.00,1.00]. */
+ public double getRecentChunksHitRatio() {
+ int total = recentChunks_Hits + recentChunks_Miss;
+ return ((double) recentChunks_Hits) / total;
+ }
+
+ /** @return ratio of delta base cache hits, [0.00,1.00]. */
+ public double getDeltaBaseCacheHitRatio() {
+ int total = deltaBaseCache_Hits + deltaBaseCache_Miss;
+ return ((double) deltaBaseCache_Hits) / total;
+ }
+
+ /**
+ * @return collection of chunk accesses made by the application code
+ * against this reader. The collection's iterator has no
+ * relevant order.
+ */
+ public Collection<ChunkAccess> getChunkAccess() {
+ return chunkAccess.values();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append("DhtReader.Statistics:\n");
+ b.append(" ");
+ if (recentChunks_Hits != 0 || recentChunks_Miss != 0)
+ ratio(b, "recentChunks", getRecentChunksHitRatio());
+ if (deltaBaseCache_Hits != 0 || deltaBaseCache_Miss != 0)
+ ratio(b, "deltaBaseCache", getDeltaBaseCacheHitRatio());
+ appendFields(this, b);
+ b.append("\n");
+ for (ChunkAccess ca : getChunkAccess()) {
+ b.append(" ");
+ b.append(ca.toString());
+ b.append("\n");
+ }
+ return b.toString();
+ }
+
+ @SuppressWarnings("boxing")
+ static void ratio(StringBuilder b, String name, double value) {
+ b.append(String.format(" %s=%.2f%%", name, value * 100.0));
+ }
+
+ static void appendFields(Object obj, StringBuilder b) {
+ try {
+ for (Field field : obj.getClass().getDeclaredFields()) {
+ String n = field.getName();
+
+ if (field.getType() == Integer.TYPE
+ && (field.getModifiers() & Modifier.PUBLIC) != 0) {
+ int v = field.getInt(obj);
+ if (0 < v)
+ b.append(' ').append(n).append('=').append(v);
+ }
+ }
+ } catch (IllegalArgumentException e) {
+ throw new RuntimeException(e);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /** Summary describing how a chunk was accessed. */
+ public static final class ChunkAccess {
+ /** Chunk this access block describes. */
+ public final ChunkKey chunkKey;
+
+ /**
+ * Number of times chunk was loaded sequentially. Incremented when
+ * the reader had to load the chunk on demand with no cache or
+ * prefetcher support.
+ */
+ public int cntReader_Load;
+
+ Throwable locReader_Load;
+
+ /**
+ * Number of times the prefetcher loaded from the database.
+ * Incremented each time the prefetcher asked for the chunk from the
+ * underlying database (which might have its own distributed cache,
+ * or not).
+ */
+ public int cntPrefetcher_Load;
+
+ /**
+ * Number of times the prefetcher obtained from {@link ChunkCache}.
+ * Incremented when the prefetcher recovered the chunk from the
+ * local JVM chunk cache and thus avoided reading the database.
+ */
+ public int cntPrefetcher_ChunkCacheHit;
+
+ /**
+ * Number of times the prefetcher ordering was wrong. Incremented if
+ * a reader wants a chunk but the prefetcher didn't have it ready at
+ * the time of request. This indicates a bad prefetching plan as the
+ * chunk should have been listed earlier in the prefetcher's list.
+ */
+ public int cntPrefetcher_OutOfOrder;
+
+ /**
+ * Number of times the reader had to stall to wait for a chunk that
+ * is currently being prefetched to finish loading and become ready.
+ * This indicates the prefetcher may have fetched other chunks first
+ * (had the wrong order), or does not have a deep enough window to
+ * hide these loads from the application.
+ */
+ public int cntPrefetcher_WaitedForLoad;
+
+ /**
+ * Number of times the reader asked the prefetcher for the same
+ * chunk after it was already consumed from the prefetcher. This
+ * indicates the reader has walked back on itself and revisited a
+ * chunk again.
+ */
+ public int cntPrefetcher_Revisited;
+
+ /**
+ * Number of times the reader needed this chunk to copy an object
+ * as-is into a pack stream, but the prefetcher didn't have it
+ * ready. This correlates with {@link #cntPrefetcher_OutOfOrder} or
+ * {@link #cntPrefetcher_Revisited}.
+ */
+ public int cntCopyObjectAsIs_PrefetchMiss;
+
+ /**
+ * Number of times the reader tried to copy an object from this
+ * chunk, but discovered the chunk was corrupt or did not contain
+ * the object as expected.
+ */
+ public int cntCopyObjectAsIs_InvalidChunk;
+
+ ChunkAccess(ChunkKey key) {
+ chunkKey = key;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append(chunkKey).append('[');
+ appendFields(this, b);
+ b.append(" ]");
+ if (locReader_Load != null) {
+ StringWriter sw = new StringWriter();
+ locReader_Load.printStackTrace(new PrintWriter(sw));
+ b.append(sw);
+ }
+ return b.toString();
+ }
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java
new file mode 100644
index 0000000000..0890e39ad0
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.lib.Config;
+
+/** Options controlling how objects are read from a DHT stored repository. */
+public class DhtReaderOptions {
+ /** 1024 (number of bytes in one kibibyte/kilobyte) */
+ public static final int KiB = 1024;
+
+ /** 1024 {@link #KiB} (number of bytes in one mebibyte/megabyte) */
+ public static final int MiB = 1024 * KiB;
+
+ private Timeout timeout;
+
+ private boolean prefetchFollowEdgeHints;
+
+ private int prefetchLimit;
+
+ private int objectIndexConcurrentBatches;
+
+ private int objectIndexBatchSize;
+
+ private int deltaBaseCacheSize;
+
+ private int deltaBaseCacheLimit;
+
+ private int recentInfoCacheSize;
+
+ private int recentChunkCacheSize;
+
+ private boolean trackFirstChunkLoad;
+
+ /** Create a default reader configuration. */
+ public DhtReaderOptions() {
+ setTimeout(Timeout.seconds(5));
+ setPrefetchFollowEdgeHints(true);
+ setPrefetchLimit(5 * MiB);
+
+ setObjectIndexConcurrentBatches(2);
+ setObjectIndexBatchSize(512);
+
+ setDeltaBaseCacheSize(1024);
+ setDeltaBaseCacheLimit(10 * MiB);
+
+ setRecentInfoCacheSize(4096);
+ setRecentChunkCacheSize(4);
+ }
+
+ /** @return default timeout to wait on long operations before aborting. */
+ public Timeout getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * Set the default timeout to wait on long operations.
+ *
+ * @param maxWaitTime
+ * new wait time.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setTimeout(Timeout maxWaitTime) {
+ if (maxWaitTime == null || maxWaitTime.getTime() < 0)
+ throw new IllegalArgumentException();
+ timeout = maxWaitTime;
+ return this;
+ }
+
+ /** @return if the prefetcher should follow edge hints (experimental) */
+ public boolean isPrefetchFollowEdgeHints() {
+ return prefetchFollowEdgeHints;
+ }
+
+ /**
+ * Enable (or disable) the experimental edge following feature.
+ *
+ * @param follow
+ * true to follow the edge hints.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setPrefetchFollowEdgeHints(boolean follow) {
+ prefetchFollowEdgeHints = follow;
+ return this;
+ }
+
+ /** @return number of bytes to load during prefetching. */
+ public int getPrefetchLimit() {
+ return prefetchLimit;
+ }
+
+ /**
+ * Set the number of bytes the prefetcher should hold onto.
+ *
+ * @param maxBytes
+ * @return {@code this}
+ */
+ public DhtReaderOptions setPrefetchLimit(int maxBytes) {
+ prefetchLimit = Math.max(1024, maxBytes);
+ return this;
+ }
+
+ /** @return number of concurrent reads against ObjectIndexTable. */
+ public int getObjectIndexConcurrentBatches() {
+ return objectIndexConcurrentBatches;
+ }
+
+ /**
+ * Set the number of concurrent readers on ObjectIndexTable.
+ *
+ * @param batches
+ * number of batches.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setObjectIndexConcurrentBatches(int batches) {
+ objectIndexConcurrentBatches = Math.max(1, batches);
+ return this;
+ }
+
+ /** @return number of objects to lookup in one batch. */
+ public int getObjectIndexBatchSize() {
+ return objectIndexBatchSize;
+ }
+
+ /**
+ * Set the number of objects to lookup at once.
+ *
+ * @param objectCnt
+ * the number of objects in a lookup batch.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setObjectIndexBatchSize(int objectCnt) {
+ objectIndexBatchSize = Math.max(1, objectCnt);
+ return this;
+ }
+
+ /** @return size of the delta base cache hash table, in object entries. */
+ public int getDeltaBaseCacheSize() {
+ return deltaBaseCacheSize;
+ }
+
+ /**
+ * Set the size of the delta base cache hash table.
+ *
+ * @param slotCnt
+ * number of slots in the hash table.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setDeltaBaseCacheSize(int slotCnt) {
+ deltaBaseCacheSize = Math.max(1, slotCnt);
+ return this;
+ }
+
+ /** @return maximum number of bytes to hold in per-reader DeltaBaseCache. */
+ public int getDeltaBaseCacheLimit() {
+ return deltaBaseCacheLimit;
+ }
+
+ /**
+ * Set the maximum number of bytes in the DeltaBaseCache.
+ *
+ * @param maxBytes
+ * the new limit.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setDeltaBaseCacheLimit(int maxBytes) {
+ deltaBaseCacheLimit = Math.max(0, maxBytes);
+ return this;
+ }
+
+ /** @return number of objects to cache information on. */
+ public int getRecentInfoCacheSize() {
+ return recentInfoCacheSize;
+ }
+
+ /**
+ * Set the number of objects to cache information on.
+ *
+ * @param objectCnt
+ * the number of objects to cache.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setRecentInfoCacheSize(int objectCnt) {
+ recentInfoCacheSize = Math.max(0, objectCnt);
+ return this;
+ }
+
+ /** @return number of recent chunks to hold onto per-reader. */
+ public int getRecentChunkCacheSize() {
+ return recentChunkCacheSize;
+ }
+
+ /**
+ * Set the number of chunks each reader holds onto for recently used access.
+ *
+ * @param chunkCnt
+ * the number of chunks each reader retains of recently used
+ * chunks to smooth out access.
+ * @return {@code this}
+ */
+ public DhtReaderOptions setRecentChunkCacheSize(int chunkCnt) {
+ recentChunkCacheSize = Math.max(0, chunkCnt);
+ return this;
+ }
+
+ /**
+ * @return true if {@link DhtReader.Statistics} includes the stack trace for
+ * the first time a chunk is loaded. Supports debugging DHT code.
+ */
+ public boolean isTrackFirstChunkLoad() {
+ return trackFirstChunkLoad;
+ }
+
+ /**
+ * Set whether or not the initial load of each chunk should be tracked.
+ *
+ * @param track
+ * true to track the stack trace of the first load.
+ * @return {@code this}.
+ */
+ public DhtReaderOptions setTrackFirstChunkLoad(boolean track) {
+ trackFirstChunkLoad = track;
+ return this;
+ }
+
+ /**
+ * Update properties by setting fields from the configuration.
+ * <p>
+ * If a property is not defined in the configuration, then it is left
+ * unmodified.
+ *
+ * @param rc
+ * configuration to read properties from.
+ * @return {@code this}
+ */
+ public DhtReaderOptions fromConfig(Config rc) {
+ setTimeout(Timeout.getTimeout(rc, "core", "dht", "timeout", getTimeout()));
+ setPrefetchFollowEdgeHints(rc.getBoolean("core", "dht", "prefetchFollowEdgeHints", isPrefetchFollowEdgeHints()));
+ setPrefetchLimit(rc.getInt("core", "dht", "prefetchLimit", getPrefetchLimit()));
+
+ setObjectIndexConcurrentBatches(rc.getInt("core", "dht", "objectIndexConcurrentBatches", getObjectIndexConcurrentBatches()));
+ setObjectIndexBatchSize(rc.getInt("core", "dht", "objectIndexBatchSize", getObjectIndexBatchSize()));
+
+ setDeltaBaseCacheSize(rc.getInt("core", "dht", "deltaBaseCacheSize", getDeltaBaseCacheSize()));
+ setDeltaBaseCacheLimit(rc.getInt("core", "dht", "deltaBaseCacheLimit", getDeltaBaseCacheLimit()));
+
+ setRecentInfoCacheSize(rc.getInt("core", "dht", "recentInfoCacheSize", getRecentInfoCacheSize()));
+ setRecentChunkCacheSize(rc.getInt("core", "dht", "recentChunkCacheSize", getRecentChunkCacheSize()));
+
+ setTrackFirstChunkLoad(rc.getBoolean("core", "dht", "debugTrackFirstChunkLoad", isTrackFirstChunkLoad()));
+ return this;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java
new file mode 100644
index 0000000000..22569b91ee
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Ref.Storage.LOOSE;
+import static org.eclipse.jgit.lib.Ref.Storage.NEW;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectIdRef.PeeledNonTag;
+import org.eclipse.jgit.lib.ObjectIdRef.PeeledTag;
+import org.eclipse.jgit.lib.ObjectIdRef.Unpeeled;
+import org.eclipse.jgit.lib.ObjectIdSubclassMap;
+import org.eclipse.jgit.lib.Ref;
+import org.eclipse.jgit.lib.RefDatabase;
+import org.eclipse.jgit.lib.RefRename;
+import org.eclipse.jgit.lib.SymbolicRef;
+import org.eclipse.jgit.revwalk.RevObject;
+import org.eclipse.jgit.revwalk.RevTag;
+import org.eclipse.jgit.revwalk.RevWalk;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.util.RefList;
+import org.eclipse.jgit.util.RefMap;
+
+/** Repository references stored on top of a DHT database. */
+public class DhtRefDatabase extends RefDatabase {
+ private final DhtRepository repository;
+
+ private final Database db;
+
+ private final AtomicReference<RefCache> cache;
+
+ DhtRefDatabase(DhtRepository repository, Database db) {
+ this.repository = repository;
+ this.db = db;
+ this.cache = new AtomicReference<RefCache>();
+ }
+
+ DhtRepository getRepository() {
+ return repository;
+ }
+
+ ChunkKey findChunk(AnyObjectId id) {
+ RefCache c = cache.get();
+ if (c != null) {
+ RefData.IdWithChunk i = c.hints.get(id);
+ if (i != null)
+ return i.getChunkKey();
+ }
+ return null;
+ }
+
+ @Override
+ public Ref getRef(String needle) throws IOException {
+ RefCache curr = readRefs();
+ for (String prefix : SEARCH_PATH) {
+ Ref ref = curr.ids.get(prefix + needle);
+ if (ref != null) {
+ ref = resolve(ref, 0, curr.ids);
+ return ref;
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public List<Ref> getAdditionalRefs() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public Map<String, Ref> getRefs(String prefix) throws IOException {
+ RefCache curr = readRefs();
+ RefList<Ref> packed = RefList.emptyList();
+ RefList<Ref> loose = curr.ids;
+ RefList.Builder<Ref> sym = new RefList.Builder<Ref>(curr.sym.size());
+
+ for (int idx = 0; idx < curr.sym.size(); idx++) {
+ Ref ref = curr.sym.get(idx);
+ String name = ref.getName();
+ ref = resolve(ref, 0, loose);
+ if (ref != null && ref.getObjectId() != null) {
+ sym.add(ref);
+ } else {
+ // A broken symbolic reference, we have to drop it from the
+ // collections the client is about to receive. Should be a
+ // rare occurrence so pay a copy penalty.
+ int toRemove = loose.find(name);
+ if (0 <= toRemove)
+ loose = loose.remove(toRemove);
+ }
+ }
+
+ return new RefMap(prefix, packed, loose, sym.toRefList());
+ }
+
+ private Ref resolve(Ref ref, int depth, RefList<Ref> loose)
+ throws IOException {
+ if (!ref.isSymbolic())
+ return ref;
+
+ Ref dst = ref.getTarget();
+
+ if (MAX_SYMBOLIC_REF_DEPTH <= depth)
+ return null; // claim it doesn't exist
+
+ dst = loose.get(dst.getName());
+ if (dst == null)
+ return ref;
+
+ dst = resolve(dst, depth + 1, loose);
+ if (dst == null)
+ return null;
+
+ return new SymbolicRef(ref.getName(), dst);
+ }
+
+ @Override
+ public Ref peel(Ref ref) throws IOException {
+ final Ref oldLeaf = ref.getLeaf();
+ if (oldLeaf.isPeeled() || oldLeaf.getObjectId() == null)
+ return ref;
+
+ Ref newLeaf = doPeel(oldLeaf);
+
+ RefCache cur = readRefs();
+ int idx = cur.ids.find(oldLeaf.getName());
+ if (0 <= idx && cur.ids.get(idx) == oldLeaf) {
+ RefList<Ref> newList = cur.ids.set(idx, newLeaf);
+ if (cache.compareAndSet(cur, new RefCache(newList, cur)))
+ cachePeeledState(oldLeaf, newLeaf);
+ }
+
+ return recreate(ref, newLeaf);
+ }
+
+ private void cachePeeledState(Ref oldLeaf, Ref newLeaf) {
+ // TODO(spearce) Use an ExecutorService here
+ try {
+ RepositoryKey repo = repository.getRepositoryKey();
+ RefKey key = RefKey.create(repo, newLeaf.getName());
+ RefData oldData = RefData.fromRef(oldLeaf);
+ RefData newData = RefData.fromRef(newLeaf);
+ db.ref().compareAndPut(key, oldData, newData);
+ } catch (TimeoutException e) {
+ // Ignore a timeout here, we were only trying to update
+ // a cached value to save peeling costs in the future.
+
+ } catch (DhtException e) {
+ // Ignore a database error, this was only an attempt to
+ // fix a value that could be cached to save time later.
+ }
+ }
+
+ private Ref doPeel(final Ref leaf) throws MissingObjectException,
+ IOException {
+ RevWalk rw = new RevWalk(getRepository());
+ try {
+ String name = leaf.getName();
+ ObjectId oId = leaf.getObjectId();
+ RevObject obj = rw.parseAny(oId);
+ DhtReader ctx = (DhtReader) rw.getObjectReader();
+
+ ChunkKey key = ctx.findChunk(oId);
+ if (key != null)
+ oId = new RefData.IdWithChunk(oId, key);
+
+ if (obj instanceof RevTag) {
+ ObjectId pId = rw.peel(obj);
+ key = ctx.findChunk(pId);
+ pId = key != null ? new RefData.IdWithChunk(pId, key) : pId
+ .copy();
+ return new PeeledTag(leaf.getStorage(), name, oId, pId);
+ } else {
+ return new PeeledNonTag(leaf.getStorage(), name, oId);
+ }
+ } finally {
+ rw.release();
+ }
+ }
+
+ private static Ref recreate(final Ref old, final Ref leaf) {
+ if (old.isSymbolic()) {
+ Ref dst = recreate(old.getTarget(), leaf);
+ return new SymbolicRef(old.getName(), dst);
+ }
+ return leaf;
+ }
+
+ @Override
+ public DhtRefUpdate newUpdate(String refName, boolean detach)
+ throws IOException {
+ Ref ref = getRefs(ALL).get(refName);
+ if (ref == null)
+ ref = new Unpeeled(NEW, refName, null);
+ RepositoryKey repo = repository.getRepositoryKey();
+ return new DhtRefUpdate(this, repo, db, ref);
+ }
+
+ @Override
+ public RefRename newRename(String fromName, String toName)
+ throws IOException {
+ DhtRefUpdate src = newUpdate(fromName, true);
+ DhtRefUpdate dst = newUpdate(toName, true);
+ return new DhtRefRename(src, dst);
+ }
+
+ @Override
+ public boolean isNameConflicting(String refName) throws IOException {
+ RefList<Ref> all = readRefs().ids;
+
+ // Cannot be nested within an existing reference.
+ int lastSlash = refName.lastIndexOf('/');
+ while (0 < lastSlash) {
+ String needle = refName.substring(0, lastSlash);
+ if (all.contains(needle))
+ return true;
+ lastSlash = refName.lastIndexOf('/', lastSlash - 1);
+ }
+
+ // Cannot be the container of an existing reference.
+ String prefix = refName + '/';
+ int idx = -(all.find(prefix) + 1);
+ if (idx < all.size() && all.get(idx).getName().startsWith(prefix))
+ return true;
+ return false;
+ }
+
+ @Override
+ public void create() {
+ // Nothing to do.
+ }
+
+ @Override
+ public void close() {
+ clearCache();
+ }
+
+ void clearCache() {
+ cache.set(null);
+ }
+
+ void stored(String refName, RefData newData) {
+ Ref ref = fromData(refName, newData);
+ RefCache oldCache, newCache;
+ do {
+ oldCache = cache.get();
+ if (oldCache == null)
+ return;
+
+ RefList<Ref> ids = oldCache.ids.put(ref);
+ RefList<Ref> sym = oldCache.sym;
+
+ if (ref.isSymbolic()) {
+ sym.put(ref);
+ } else {
+ int p = sym.find(refName);
+ if (0 <= p)
+ sym = sym.remove(p);
+ }
+
+ newCache = new RefCache(ids, sym, oldCache.hints);
+ } while (!cache.compareAndSet(oldCache, newCache));
+ }
+
+ void removed(String refName) {
+ RefCache oldCache, newCache;
+ do {
+ oldCache = cache.get();
+ if (oldCache == null)
+ return;
+
+ int p;
+
+ RefList<Ref> ids = oldCache.ids;
+ p = ids.find(refName);
+ if (0 <= p)
+ ids = ids.remove(p);
+
+ RefList<Ref> sym = oldCache.sym;
+ p = sym.find(refName);
+ if (0 <= p)
+ sym = sym.remove(p);
+
+ newCache = new RefCache(ids, sym, oldCache.hints);
+ } while (!cache.compareAndSet(oldCache, newCache));
+ }
+
+ private RefCache readRefs() throws DhtException {
+ RefCache c = cache.get();
+ if (c == null) {
+ try {
+ c = read();
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+ cache.set(c);
+ }
+ return c;
+ }
+
+ private RefCache read() throws DhtException, TimeoutException {
+ RefList.Builder<Ref> id = new RefList.Builder<Ref>();
+ RefList.Builder<Ref> sym = new RefList.Builder<Ref>();
+ ObjectIdSubclassMap<RefData.IdWithChunk> hints = new ObjectIdSubclassMap<RefData.IdWithChunk>();
+
+ for (Map.Entry<RefKey, RefData> e : scan()) {
+ Ref ref = fromData(e.getKey().getName(), e.getValue());
+
+ if (ref.isSymbolic())
+ sym.add(ref);
+ id.add(ref);
+
+ if (ref.getObjectId() instanceof RefData.IdWithChunk
+ && !hints.contains(ref.getObjectId()))
+ hints.add((RefData.IdWithChunk) ref.getObjectId());
+ if (ref.getPeeledObjectId() instanceof RefData.IdWithChunk
+ && !hints.contains(ref.getPeeledObjectId()))
+ hints.add((RefData.IdWithChunk) ref.getPeeledObjectId());
+ }
+
+ id.sort();
+ sym.sort();
+
+ return new RefCache(id.toRefList(), sym.toRefList(), hints);
+ }
+
+ private static Ref fromData(String name, RefData data) {
+ ObjectId oId = null;
+ boolean peeled = false;
+ ObjectId pId = null;
+
+ TinyProtobuf.Decoder d = data.decode();
+ DECODE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break DECODE;
+
+ case RefData.TAG_SYMREF: {
+ String symref = d.string();
+ Ref leaf = new Unpeeled(NEW, symref, null);
+ return new SymbolicRef(name, leaf);
+ }
+
+ case RefData.TAG_TARGET:
+ oId = RefData.IdWithChunk.decode(d.message());
+ continue;
+ case RefData.TAG_IS_PEELED:
+ peeled = d.bool();
+ continue;
+ case RefData.TAG_PEELED:
+ pId = RefData.IdWithChunk.decode(d.message());
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+
+ if (peeled && pId != null)
+ return new PeeledTag(LOOSE, name, oId, pId);
+ if (peeled)
+ return new PeeledNonTag(LOOSE, name, oId);
+ return new Unpeeled(LOOSE, name, oId);
+ }
+
+ private Set<Map.Entry<RefKey, RefData>> scan() throws DhtException,
+ TimeoutException {
+ // TODO(spearce) Do we need to perform READ_REPAIR here?
+ RepositoryKey repo = repository.getRepositoryKey();
+ return db.ref().getAll(Context.LOCAL, repo).entrySet();
+ }
+
+ private static class RefCache {
+ final RefList<Ref> ids;
+
+ final RefList<Ref> sym;
+
+ final ObjectIdSubclassMap<RefData.IdWithChunk> hints;
+
+ RefCache(RefList<Ref> ids, RefList<Ref> sym,
+ ObjectIdSubclassMap<RefData.IdWithChunk> hints) {
+ this.ids = ids;
+ this.sym = sym;
+ this.hints = hints;
+ }
+
+ RefCache(RefList<Ref> ids, RefCache old) {
+ this(ids, old.sym, old.hints);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java
new file mode 100644
index 0000000000..4df3bde787
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.RefRename;
+import org.eclipse.jgit.lib.RefUpdate.Result;
+
+class DhtRefRename extends RefRename {
+ DhtRefRename(DhtRefUpdate src, DhtRefUpdate dst) {
+ super(src, dst);
+ }
+
+ @Override
+ protected Result doRename() throws IOException {
+ // TODO(spearce) Correctly handle renameing foo/bar to foo.
+
+ destination.setExpectedOldObjectId(ObjectId.zeroId());
+ destination.setNewObjectId(source.getRef().getObjectId());
+ switch (destination.update()) {
+ case NEW:
+ source.delete();
+ return Result.RENAMED;
+
+ default:
+ return destination.getResult();
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java
new file mode 100644
index 0000000000..158b7cf496
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.Ref;
+import org.eclipse.jgit.lib.RefUpdate;
+import org.eclipse.jgit.revwalk.RevObject;
+import org.eclipse.jgit.revwalk.RevTag;
+import org.eclipse.jgit.revwalk.RevWalk;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+class DhtRefUpdate extends RefUpdate {
+ private final DhtRefDatabase refdb;
+
+ private final RepositoryKey repo;
+
+ private final Database db;
+
+ private RefKey refKey;
+
+ private RefData oldData;
+
+ private RefData newData;
+
+ private Ref dstRef;
+
+ private RevWalk rw;
+
+ DhtRefUpdate(DhtRefDatabase refdb, RepositoryKey repo, Database db, Ref ref) {
+ super(ref);
+ this.refdb = refdb;
+ this.repo = repo;
+ this.db = db;
+ }
+
+ @Override
+ protected DhtRefDatabase getRefDatabase() {
+ return refdb;
+ }
+
+ @Override
+ protected DhtRepository getRepository() {
+ return refdb.getRepository();
+ }
+
+ @Override
+ public Result update(RevWalk walk) throws IOException {
+ try {
+ rw = walk;
+ return super.update(walk);
+ } finally {
+ rw = null;
+ }
+ }
+
+ @Override
+ protected boolean tryLock(boolean deref) throws IOException {
+ dstRef = getRef();
+ if (deref)
+ dstRef = dstRef.getLeaf();
+
+ refKey = RefKey.create(repo, dstRef.getName());
+ oldData = RefData.fromRef(dstRef);
+
+ if (dstRef.isSymbolic())
+ setOldObjectId(null);
+ else
+ setOldObjectId(dstRef.getObjectId());
+
+ return true;
+ }
+
+ @Override
+ protected void unlock() {
+ // No state is held while "locked".
+ }
+
+ @Override
+ protected Result doUpdate(Result desiredResult) throws IOException {
+ try {
+ newData = newData();
+ boolean r = db.ref().compareAndPut(refKey, oldData, newData);
+ if (r) {
+ getRefDatabase().stored(dstRef.getName(), newData);
+ return desiredResult;
+ } else {
+ getRefDatabase().clearCache();
+ return Result.LOCK_FAILURE;
+ }
+ } catch (TimeoutException e) {
+ return Result.IO_FAILURE;
+ }
+ }
+
+ @Override
+ protected Result doDelete(Result desiredResult) throws IOException {
+ try {
+ boolean r = db.ref().compareAndRemove(refKey, oldData);
+ if (r) {
+ getRefDatabase().removed(dstRef.getName());
+ return desiredResult;
+ } else {
+ getRefDatabase().clearCache();
+ return Result.LOCK_FAILURE;
+ }
+ } catch (TimeoutException e) {
+ return Result.IO_FAILURE;
+ }
+ }
+
+ @Override
+ protected Result doLink(String target) throws IOException {
+ try {
+ newData = RefData.symbolic(target);
+ boolean r = db.ref().compareAndPut(refKey, oldData, newData);
+ if (r) {
+ getRefDatabase().stored(dstRef.getName(), newData);
+ if (getRef().getStorage() == Ref.Storage.NEW)
+ return Result.NEW;
+ return Result.FORCED;
+ } else {
+ getRefDatabase().clearCache();
+ return Result.LOCK_FAILURE;
+ }
+ } catch (TimeoutException e) {
+ return Result.IO_FAILURE;
+ }
+ }
+
+ private RefData newData() throws IOException {
+ ObjectId newId = getNewObjectId();
+ try {
+ RevObject obj = rw.parseAny(newId);
+ DhtReader ctx = (DhtReader) rw.getObjectReader();
+
+ ChunkKey key = ctx.findChunk(newId);
+ if (key != null)
+ newId = new RefData.IdWithChunk(newId, key);
+
+ if (obj instanceof RevTag) {
+ ObjectId pId = rw.peel(obj);
+ key = ctx.findChunk(pId);
+ pId = key != null ? new RefData.IdWithChunk(pId, key) : pId;
+ return RefData.peeled(newId, pId);
+ } else if (obj != null)
+ return RefData.peeled(newId, null);
+ else
+ return RefData.id(newId);
+ } catch (MissingObjectException e) {
+ return RefData.id(newId);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java
new file mode 100644
index 0000000000..9f60ef5758
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+import java.text.MessageFormat;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.RefUpdate;
+import org.eclipse.jgit.lib.Repository;
+import org.eclipse.jgit.lib.StoredConfig;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.file.ReflogReader;
+
+/**
+ * A Git repository storing its objects and references in a DHT.
+ * <p>
+ * With the exception of repository creation, this class is thread-safe, but
+ * readers created from it are not. When creating a new repository using the
+ * {@link #create(boolean)} method, the newly constructed repository object does
+ * not ensure the assigned {@link #getRepositoryKey()} will be visible to all
+ * threads. Applications are encouraged to use their own synchronization when
+ * sharing a Repository instance that was used to create a new repository.
+ */
+public class DhtRepository extends Repository {
+ private final RepositoryName name;
+
+ private final Database db;
+
+ private final DhtRefDatabase refdb;
+
+ private final DhtObjDatabase objdb;
+
+ private final DhtConfig config;
+
+ private RepositoryKey key;
+
+ /**
+ * Initialize an in-memory representation of a DHT backed repository.
+ *
+ * @param builder
+ * description of the repository and its data storage.
+ */
+ public DhtRepository(DhtRepositoryBuilder builder) {
+ super(builder);
+ this.name = RepositoryName.create(builder.getRepositoryName());
+ this.key = builder.getRepositoryKey();
+ this.db = builder.getDatabase();
+
+ this.refdb = new DhtRefDatabase(this, db);
+ this.objdb = new DhtObjDatabase(this, builder);
+ this.config = new DhtConfig();
+ }
+
+ /** @return database cluster that houses this repository (among others). */
+ public Database getDatabase() {
+ return db;
+ }
+
+ /** @return human readable name used to open this repository. */
+ public RepositoryName getRepositoryName() {
+ return name;
+ }
+
+ /** @return unique identity of the repository in the {@link #getDatabase()}. */
+ public RepositoryKey getRepositoryKey() {
+ return key;
+ }
+
+ @Override
+ public StoredConfig getConfig() {
+ return config;
+ }
+
+ @Override
+ public DhtRefDatabase getRefDatabase() {
+ return refdb;
+ }
+
+ @Override
+ public DhtObjDatabase getObjectDatabase() {
+ return objdb;
+ }
+
+ @Override
+ public void create(boolean bare) throws IOException {
+ if (!bare)
+ throw new IllegalArgumentException(
+ DhtText.get().repositoryMustBeBare);
+
+ if (getObjectDatabase().exists())
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().repositoryAlreadyExists, name.asString()));
+
+ try {
+ key = db.repository().nextKey();
+ db.repositoryIndex().putUnique(name, key);
+ } catch (TimeoutException err) {
+ throw new DhtTimeoutException(MessageFormat.format(
+ DhtText.get().timeoutLocatingRepository, name), err);
+ }
+
+ String master = Constants.R_HEADS + Constants.MASTER;
+ RefUpdate.Result result = updateRef(Constants.HEAD, true).link(master);
+ if (result != RefUpdate.Result.NEW)
+ throw new IOException(result.name());
+ }
+
+ @Override
+ public void scanForRepoChanges() {
+ refdb.clearCache();
+ }
+
+ @Override
+ public String toString() {
+ return "DhtRepostitory[" + key + " / " + name + "]";
+ }
+
+ // TODO This method should be removed from the JGit API.
+ @Override
+ public ReflogReader getReflogReader(String refName) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java
new file mode 100644
index 0000000000..a02b313cf1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.File;
+import java.text.MessageFormat;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.errors.RepositoryNotFoundException;
+import org.eclipse.jgit.lib.BaseRepositoryBuilder;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+/**
+ * Constructs a {@link DhtRepository}.
+ *
+ * @param <B>
+ * type of builder used by the DHT system.
+ * @param <R>
+ * type of repository used by the DHT system.
+ * @param <D>
+ * type of database used by the DHT system.
+ */
+public class DhtRepositoryBuilder<B extends DhtRepositoryBuilder, R extends DhtRepository, D extends Database>
+ extends BaseRepositoryBuilder<B, R> {
+ private D database;
+
+ private DhtReaderOptions readerOptions;
+
+ private DhtInserterOptions inserterOptions;
+
+ private String name;
+
+ private RepositoryKey key;
+
+ /** Initializes an empty builder with no values set. */
+ public DhtRepositoryBuilder() {
+ setBare();
+ setMustExist(true);
+ }
+
+ /** @return the database that stores the repositories. */
+ public D getDatabase() {
+ return database;
+ }
+
+ /**
+ * Set the cluster used to store the repositories.
+ *
+ * @param database
+ * the database supplier.
+ * @return {@code this}
+ */
+ public B setDatabase(D database) {
+ this.database = database;
+ return self();
+ }
+
+ /** @return options used by readers accessing the repository. */
+ public DhtReaderOptions getReaderOptions() {
+ return readerOptions;
+ }
+
+ /**
+ * Set the reader options.
+ *
+ * @param opt
+ * new reader options object.
+ * @return {@code this}
+ */
+ public B setReaderOptions(DhtReaderOptions opt) {
+ readerOptions = opt;
+ return self();
+ }
+
+ /** @return options used by writers accessing the repository. */
+ public DhtInserterOptions getInserterOptions() {
+ return inserterOptions;
+ }
+
+ /**
+ * Set the inserter options.
+ *
+ * @param opt
+ * new inserter options object.
+ * @return {@code this}
+ */
+ public B setInserterOptions(DhtInserterOptions opt) {
+ inserterOptions = opt;
+ return self();
+ }
+
+ /** @return name of the repository in the DHT. */
+ public String getRepositoryName() {
+ return name;
+ }
+
+ /**
+ * Set the name of the repository to open.
+ *
+ * @param name
+ * the name.
+ * @return {@code this}.
+ */
+ public B setRepositoryName(String name) {
+ this.name = name;
+ return self();
+ }
+
+ /** @return the repository's key. */
+ public RepositoryKey getRepositoryKey() {
+ return key;
+ }
+
+ /**
+ * @param key
+ * @return {@code this}
+ */
+ public B setRepositoryKey(RepositoryKey key) {
+ this.key = key;
+ return self();
+ }
+
+ @Override
+ public B setup() throws IllegalArgumentException, DhtException,
+ RepositoryNotFoundException {
+ if (getDatabase() == null)
+ throw new IllegalArgumentException(DhtText.get().databaseRequired);
+
+ if (getReaderOptions() == null)
+ setReaderOptions(new DhtReaderOptions());
+ if (getInserterOptions() == null)
+ setInserterOptions(new DhtInserterOptions());
+
+ if (getRepositoryKey() == null) {
+ if (getRepositoryName() == null)
+ throw new IllegalArgumentException(DhtText.get().nameRequired);
+
+ RepositoryKey r;
+ try {
+ r = getDatabase().repositoryIndex().get(
+ RepositoryName.create(name));
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(MessageFormat.format(
+ DhtText.get().timeoutLocatingRepository, name), e);
+ }
+ if (isMustExist() && r == null)
+ throw new RepositoryNotFoundException(getRepositoryName());
+ if (r != null)
+ setRepositoryKey(r);
+ }
+ return self();
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public R build() throws IllegalArgumentException, DhtException,
+ RepositoryNotFoundException {
+ return (R) new DhtRepository(setup());
+ }
+
+ // We don't support local file IO and thus shouldn't permit these to set.
+
+ @Override
+ public B setGitDir(File gitDir) {
+ if (gitDir != null)
+ throw new IllegalArgumentException();
+ return self();
+ }
+
+ @Override
+ public B setObjectDirectory(File objectDirectory) {
+ if (objectDirectory != null)
+ throw new IllegalArgumentException();
+ return self();
+ }
+
+ @Override
+ public B addAlternateObjectDirectory(File other) {
+ throw new UnsupportedOperationException("Alternates not supported");
+ }
+
+ @Override
+ public B setWorkTree(File workTree) {
+ if (workTree != null)
+ throw new IllegalArgumentException();
+ return self();
+ }
+
+ @Override
+ public B setIndexFile(File indexFile) {
+ if (indexFile != null)
+ throw new IllegalArgumentException();
+ return self();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java
new file mode 100644
index 0000000000..3c35ad6df3
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.nls.NLS;
+import org.eclipse.jgit.nls.TranslationBundle;
+
+/** Translation bundle for the DHT storage provider. */
+public class DhtText extends TranslationBundle {
+ /** @return an instance of this translation bundle. */
+ public static DhtText get() {
+ return NLS.getBundleFor(DhtText.class);
+ }
+
+ /***/ public String cannotInsertObject;
+ /***/ public String corruptChunk;
+ /***/ public String corruptCompressedObject;
+ /***/ public String cycleInDeltaChain;
+ /***/ public String databaseRequired;
+ /***/ public String expectedObjectSizeDuringCopyAsIs;
+ /***/ public String invalidChunkKey;
+ /***/ public String invalidObjectIndexKey;
+ /***/ public String invalidObjectInfo;
+ /***/ public String missingChunk;
+ /***/ public String missingLongOffsetBase;
+ /***/ public String nameRequired;
+ /***/ public String noSavedTypeForBase;
+ /***/ public String notTimeUnit;
+ /***/ public String objectListSelectingName;
+ /***/ public String objectListCountingFrom;
+ /***/ public String objectTypeUnknown;
+ /***/ public String packParserInvalidPointer;
+ /***/ public String packParserRollbackFailed;
+ /***/ public String protobufNegativeValuesNotSupported;
+ /***/ public String protobufNoArray;
+ /***/ public String protobufNotBooleanValue;
+ /***/ public String protobufUnsupportedFieldType;
+ /***/ public String protobufWrongFieldLength;
+ /***/ public String protobufWrongFieldType;
+ /***/ public String recordingObjects;
+ /***/ public String repositoryAlreadyExists;
+ /***/ public String repositoryMustBeBare;
+ /***/ public String shortCompressedObject;
+ /***/ public String timeoutChunkMeta;
+ /***/ public String timeoutLocatingRepository;
+ /***/ public String tooManyObjectsInPack;
+ /***/ public String unsupportedChunkIndex;
+ /***/ public String unsupportedObjectTypeInChunk;
+ /***/ public String wrongChunkPositionInCachedPack;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java
new file mode 100644
index 0000000000..32d52f0a99
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+/** Any error caused by a {@link Database} operation. */
+public class DhtTimeoutException extends DhtException {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * @param message
+ */
+ public DhtTimeoutException(String message) {
+ super(message);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ */
+ public DhtTimeoutException(String message, TimeoutException cause) {
+ super(message);
+ initCause(cause);
+ }
+
+ /**
+ * @param cause
+ */
+ public DhtTimeoutException(TimeoutException cause) {
+ super(cause.getMessage());
+ initCause(cause);
+ }
+
+ /**
+ * @param cause
+ */
+ public DhtTimeoutException(InterruptedException cause) {
+ super(cause.getMessage());
+ initCause(cause);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java
new file mode 100644
index 0000000000..6608a388e1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import org.eclipse.jgit.util.RawParseUtils;
+
+final class KeyUtils {
+ static short parse16(byte[] src, int pos) {
+ return (short) RawParseUtils.parseHexInt16(src, pos);
+ }
+
+ static int parse32(byte[] src, int pos) {
+ return RawParseUtils.parseHexInt32(src, pos);
+ }
+
+ static void format16(byte[] dst, int p, short w) {
+ int o = p + 3;
+ while (o >= p && w != 0) {
+ dst[o--] = hexbyte[w & 0xf];
+ w >>>= 4;
+ }
+ while (o >= p)
+ dst[o--] = '0';
+ }
+
+ static void format32(byte[] dst, int p, int w) {
+ int o = p + 7;
+ while (o >= p && w != 0) {
+ dst[o--] = hexbyte[w & 0xf];
+ w >>>= 4;
+ }
+ while (o >= p)
+ dst[o--] = '0';
+ }
+
+ private static final byte[] hexbyte = { '0', '1', '2', '3', '4', '5', '6',
+ '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+ private KeyUtils() {
+ // Do not create instances of this class.
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java
new file mode 100644
index 0000000000..aaef431c73
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.InflaterInputStream;
+
+import org.eclipse.jgit.errors.LargeObjectException;
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.lib.ObjectStream;
+
+/** Loader for a large non-delta object. */
+class LargeNonDeltaObject extends ObjectLoader {
+ private final int type;
+
+ private final long sz;
+
+ private final int pos;
+
+ private final DhtReader ctx;
+
+ private final ChunkMeta meta;
+
+ private PackChunk firstChunk;
+
+ LargeNonDeltaObject(int type, long sz, PackChunk pc, int pos, DhtReader ctx) {
+ this.type = type;
+ this.sz = sz;
+ this.pos = pos;
+ this.ctx = ctx;
+ this.meta = pc.getMeta();
+ firstChunk = pc;
+ }
+
+ @Override
+ public boolean isLarge() {
+ return true;
+ }
+
+ @Override
+ public byte[] getCachedBytes() throws LargeObjectException {
+ throw new LargeObjectException.ExceedsByteArrayLimit();
+ }
+
+ @Override
+ public int getType() {
+ return type;
+ }
+
+ @Override
+ public long getSize() {
+ return sz;
+ }
+
+ @Override
+ public ObjectStream openStream() throws MissingObjectException, IOException {
+ PackChunk pc = firstChunk;
+ if (pc != null)
+ firstChunk = null;
+ else
+ pc = ctx.getChunk(meta.getFragmentKey(0));
+
+ InputStream in = new ChunkInputStream(meta, ctx, pos, pc);
+ in = new BufferedInputStream(new InflaterInputStream(in), 8192);
+ return new ObjectStream.Filter(type, sz, in);
+ }
+
+ private static class ChunkInputStream extends InputStream {
+ private final ChunkMeta meta;
+
+ private final DhtReader ctx;
+
+ private int ptr;
+
+ private PackChunk pc;
+
+ private int fragment;
+
+ ChunkInputStream(ChunkMeta meta, DhtReader ctx, int pos, PackChunk pc) {
+ this.ctx = ctx;
+ this.meta = meta;
+ this.ptr = pos;
+ this.pc = pc;
+ }
+
+ @Override
+ public int read(byte[] dstbuf, int dstptr, int dstlen)
+ throws IOException {
+ if (0 == dstlen)
+ return 0;
+
+ int n = pc.read(ptr, dstbuf, dstptr, dstlen);
+ if (n == 0) {
+ if (fragment == meta.getFragmentCount())
+ return -1;
+
+ pc = ctx.getChunk(meta.getFragmentKey(++fragment));
+ ptr = 0;
+ n = pc.read(ptr, dstbuf, dstptr, dstlen);
+ if (n == 0)
+ return -1;
+ }
+ ptr += n;
+ return n;
+ }
+
+ @Override
+ public int read() throws IOException {
+ byte[] tmp = new byte[1];
+ int n = read(tmp, 0, 1);
+ return n == 1 ? tmp[0] & 0xff : -1;
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java
new file mode 100644
index 0000000000..b38fdcec22
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.storage.dht.KeyUtils.format32;
+import static org.eclipse.jgit.storage.dht.KeyUtils.parse32;
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+import java.text.MessageFormat;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+
+/** Identifies an ObjectId in the DHT. */
+public final class ObjectIndexKey extends ObjectId implements RowKey {
+ private static final int KEYLEN = 52;
+
+ /**
+ * @param repo
+ * @param objId
+ * @return the key
+ */
+ public static ObjectIndexKey create(RepositoryKey repo, AnyObjectId objId) {
+ return new ObjectIndexKey(repo.asInt(), objId);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static ObjectIndexKey fromBytes(byte[] key) {
+ if (key.length != KEYLEN)
+ throw new IllegalArgumentException(MessageFormat.format(
+ DhtText.get().invalidChunkKey, decode(key)));
+
+ int repo = parse32(key, 3);
+ ObjectId id = ObjectId.fromString(key, 12);
+ return new ObjectIndexKey(repo, id);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static ObjectIndexKey fromString(String key) {
+ return fromBytes(Constants.encodeASCII(key));
+ }
+
+ private final int repo;
+
+ ObjectIndexKey(int repo, AnyObjectId objId) {
+ super(objId);
+ this.repo = repo;
+ }
+
+ /** @return the repository that contains the object. */
+ public RepositoryKey getRepositoryKey() {
+ return RepositoryKey.fromInt(repo);
+ }
+
+ int getRepositoryId() {
+ return repo;
+ }
+
+ public byte[] asBytes() {
+ byte[] r = new byte[KEYLEN];
+ copyTo(r, 12);
+ format32(r, 3, repo);
+ // bucket is the leading 2 digits of the SHA-1.
+ r[11] = '.';
+ r[2] = '.';
+ r[1] = r[12 + 1];
+ r[0] = r[12 + 0];
+ return r;
+ }
+
+ public String asString() {
+ return decode(asBytes());
+ }
+
+ @Override
+ public String toString() {
+ return "object-index:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java
new file mode 100644
index 0000000000..941ed6a6d1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.text.MessageFormat;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.List;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+
+/** Connects an object to the chunk it is stored in. */
+public class ObjectInfo {
+ /** Orders ObjectInfo by their time member, oldest first. */
+ public static final Comparator<ObjectInfo> BY_TIME = new Comparator<ObjectInfo>() {
+ public int compare(ObjectInfo a, ObjectInfo b) {
+ return Long.signum(a.getTime() - b.getTime());
+ }
+ };
+
+ /**
+ * Sort the info list according to time, oldest member first.
+ *
+ * @param toSort
+ * list to sort.
+ */
+ public static void sort(List<ObjectInfo> toSort) {
+ Collections.sort(toSort, BY_TIME);
+ }
+
+ /**
+ * Parse an ObjectInfo from the storage system.
+ *
+ * @param chunkKey
+ * the chunk the object points to.
+ * @param data
+ * the data of the ObjectInfo.
+ * @param time
+ * timestamp of the ObjectInfo. If the implementation does not
+ * store timestamp data, supply a negative value.
+ * @return the object's information.
+ */
+ public static ObjectInfo fromBytes(ChunkKey chunkKey, byte[] data, long time) {
+ return fromBytes(chunkKey, TinyProtobuf.decode(data), time);
+ }
+
+ /**
+ * Parse an ObjectInfo from the storage system.
+ *
+ * @param chunkKey
+ * the chunk the object points to.
+ * @param d
+ * the data of the ObjectInfo.
+ * @param time
+ * timestamp of the ObjectInfo. If the implementation does not
+ * store timestamp data, supply a negative value.
+ * @return the object's information.
+ */
+ public static ObjectInfo fromBytes(ChunkKey chunkKey,
+ TinyProtobuf.Decoder d, long time) {
+ int typeCode = -1;
+ int offset = -1;
+ long packedSize = -1;
+ long inflatedSize = -1;
+ ObjectId deltaBase = null;
+ boolean fragmented = false;
+
+ PARSE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break PARSE;
+ case 1:
+ typeCode = d.int32();
+ continue;
+ case 2:
+ offset = d.int32();
+ continue;
+ case 3:
+ packedSize = d.int64();
+ continue;
+ case 4:
+ inflatedSize = d.int64();
+ continue;
+ case 5:
+ deltaBase = d.bytesObjectId();
+ continue;
+ case 6:
+ fragmented = d.bool();
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+
+ if (typeCode < 0 || offset < 0 || packedSize < 0 || inflatedSize < 0)
+ throw new IllegalArgumentException(MessageFormat.format(
+ DhtText.get().invalidObjectInfo, chunkKey));
+
+ return new ObjectInfo(chunkKey, time, typeCode, offset, //
+ packedSize, inflatedSize, deltaBase, fragmented);
+ }
+
+ private final ChunkKey chunk;
+
+ private final long time;
+
+ private final int typeCode;
+
+ private final int offset;
+
+ private final long packedSize;
+
+ private final long inflatedSize;
+
+ private final ObjectId deltaBase;
+
+ private final boolean fragmented;
+
+ ObjectInfo(ChunkKey chunk, long time, int typeCode, int offset,
+ long packedSize, long inflatedSize, ObjectId base,
+ boolean fragmented) {
+ this.chunk = chunk;
+ this.time = time < 0 ? 0 : time;
+ this.typeCode = typeCode;
+ this.offset = offset;
+ this.packedSize = packedSize;
+ this.inflatedSize = inflatedSize;
+ this.deltaBase = base;
+ this.fragmented = fragmented;
+ }
+
+ /** @return the chunk this link points to. */
+ public ChunkKey getChunkKey() {
+ return chunk;
+ }
+
+ /** @return approximate time the object was created, in milliseconds. */
+ public long getTime() {
+ return time;
+ }
+
+ /** @return type of the object, in OBJ_* constants. */
+ public int getType() {
+ return typeCode;
+ }
+
+ /** @return size of the object when fully inflated. */
+ public long getSize() {
+ return inflatedSize;
+ }
+
+ /** @return true if the object storage uses delta compression. */
+ public boolean isDelta() {
+ return getDeltaBase() != null;
+ }
+
+ /** @return true if the object has been fragmented across chunks. */
+ public boolean isFragmented() {
+ return fragmented;
+ }
+
+ int getOffset() {
+ return offset;
+ }
+
+ long getPackedSize() {
+ return packedSize;
+ }
+
+ ObjectId getDeltaBase() {
+ return deltaBase;
+ }
+
+ /**
+ * Convert this ObjectInfo into a byte array for storage.
+ *
+ * @return the ObjectInfo data, encoded as a byte array. This does not
+ * include the ChunkKey, callers must store that separately.
+ */
+ public byte[] asBytes() {
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(256);
+ e.int32(1, typeCode);
+ e.int32(2, offset);
+ e.int64(3, packedSize);
+ e.int64(4, inflatedSize);
+ e.bytes(5, deltaBase);
+ if (fragmented)
+ e.bool(6, fragmented);
+ return e.asByteArray();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append("ObjectInfo:");
+ b.append(chunk);
+ b.append(" [");
+ if (0 < time)
+ b.append(" time=").append(new Date(time));
+ b.append(" type=").append(Constants.typeString(typeCode));
+ b.append(" offset=").append(offset);
+ b.append(" packedSize=").append(packedSize);
+ b.append(" inflatedSize=").append(inflatedSize);
+ if (deltaBase != null)
+ b.append(" deltaBase=").append(deltaBase.name());
+ if (fragmented)
+ b.append(" fragmented");
+ b.append(" ]");
+ return b.toString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java
new file mode 100644
index 0000000000..17e36ab99a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.util.BlockList;
+
+/**
+ * Re-orders objects destined for a pack stream by chunk locality.
+ * <p>
+ * By re-ordering objects according to chunk locality, and then the original
+ * order the PackWriter intended to use, objects can be copied quickly from
+ * chunks, and each chunk is visited at most once. A {@link Prefetcher} for the
+ * {@link DhtReader} is used to fetch chunks in the order they will be used,
+ * improving throughput by reducing the number of round-trips required to the
+ * storage system.
+ */
+final class ObjectWriter {
+ private final DhtReader ctx;
+
+ private final Prefetcher prefetch;
+
+ private final int batchSize;
+
+ private final Semaphore metaBatches;
+
+ private final AtomicReference<DhtException> metaError;
+
+ private final LinkedHashMap<ChunkKey, Integer> allVisits;
+
+ private final Map<ChunkKey, ChunkMeta> allMeta;
+
+ private final Set<ChunkKey> metaMissing;
+
+ private Set<ChunkKey> metaToRead;
+
+ private int curVisit;
+
+ ObjectWriter(DhtReader ctx, Prefetcher prefetch) {
+ this.ctx = ctx;
+ this.prefetch = prefetch;
+
+ batchSize = ctx.getOptions().getObjectIndexBatchSize();
+ metaBatches = new Semaphore(batchSize);
+ metaError = new AtomicReference<DhtException>();
+
+ allVisits = new LinkedHashMap<ChunkKey, Integer>();
+ allMeta = new HashMap<ChunkKey, ChunkMeta>();
+ metaMissing = new HashSet<ChunkKey>();
+ metaToRead = new HashSet<ChunkKey>();
+ curVisit = 1;
+ }
+
+ void plan(List<DhtObjectToPack> list) throws DhtException {
+ try {
+ for (DhtObjectToPack obj : list)
+ visit(obj);
+
+ if (!metaToRead.isEmpty())
+ startBatch(Context.FAST_MISSING_OK);
+ awaitPendingBatches();
+
+ synchronized (metaMissing) {
+ if (!metaMissing.isEmpty()) {
+ metaBatches.release(batchSize);
+ resolveMissing();
+ awaitPendingBatches();
+ }
+ }
+ } catch (InterruptedException err) {
+ throw new DhtTimeoutException(err);
+ }
+
+ Iterable<ChunkKey> order;
+ synchronized (allMeta) {
+ if (allMeta.isEmpty()) {
+ order = allVisits.keySet();
+ } else {
+ BlockList<ChunkKey> keys = new BlockList<ChunkKey>();
+ for (ChunkKey key : allVisits.keySet()) {
+ keys.add(key);
+
+ ChunkMeta meta = allMeta.remove(key);
+ if (meta != null) {
+ for (int i = 1; i < meta.getFragmentCount(); i++)
+ keys.add(meta.getFragmentKey(i));
+ }
+ }
+ order = keys;
+ }
+ }
+ prefetch.push(order);
+
+ Collections.sort(list, new Comparator<DhtObjectToPack>() {
+ public int compare(DhtObjectToPack a, DhtObjectToPack b) {
+ return a.visitOrder - b.visitOrder;
+ }
+ });
+ }
+
+ private void visit(DhtObjectToPack obj) throws InterruptedException,
+ DhtTimeoutException {
+ // Plan the visit to the delta base before the object. This
+ // ensures the base is in the stream first, and OFS_DELTA can
+ // be used for the delta.
+ //
+ DhtObjectToPack base = (DhtObjectToPack) obj.getDeltaBase();
+ if (base != null && base.visitOrder == 0) {
+ // Use the current visit, even if its wrong. This will
+ // prevent infinite recursion when there is a cycle in the
+ // delta chain. Cycles are broken during writing, not in
+ // the earlier planning phases.
+ //
+ obj.visitOrder = curVisit;
+ visit(base);
+ }
+
+ ChunkKey key = obj.chunk;
+ if (key != null) {
+ Integer i = allVisits.get(key);
+ if (i == null) {
+ i = Integer.valueOf(1 + allVisits.size());
+ allVisits.put(key, i);
+ }
+ curVisit = i.intValue();
+ }
+
+ if (obj.isFragmented()) {
+ metaToRead.add(key);
+ if (metaToRead.size() == batchSize)
+ startBatch(Context.FAST_MISSING_OK);
+ }
+ obj.visitOrder = curVisit;
+ }
+
+ private void resolveMissing() throws DhtTimeoutException,
+ InterruptedException {
+ metaToRead = new HashSet<ChunkKey>();
+ for (ChunkKey key : metaMissing) {
+ metaToRead.add(key);
+ if (metaToRead.size() == batchSize)
+ startBatch(Context.LOCAL);
+ }
+ if (!metaToRead.isEmpty())
+ startBatch(Context.LOCAL);
+ }
+
+ private void startBatch(Context context) throws InterruptedException,
+ DhtTimeoutException {
+ Timeout to = ctx.getOptions().getTimeout();
+ if (!metaBatches.tryAcquire(1, to.getTime(), to.getUnit()))
+ throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta);
+
+ Set<ChunkKey> keys = metaToRead;
+ ctx.getDatabase().chunk().getMeta(
+ context,
+ keys,
+ new MetaLoader(context, keys));
+ metaToRead = new HashSet<ChunkKey>();
+ }
+
+ private void awaitPendingBatches() throws InterruptedException,
+ DhtTimeoutException, DhtException {
+ Timeout to = ctx.getOptions().getTimeout();
+ if (!metaBatches.tryAcquire(batchSize, to.getTime(), to.getUnit()))
+ throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta);
+ if (metaError.get() != null)
+ throw metaError.get();
+ }
+
+ private class MetaLoader implements AsyncCallback<Collection<ChunkMeta>> {
+ private final Context context;
+
+ private final Set<ChunkKey> keys;
+
+ MetaLoader(Context context, Set<ChunkKey> keys) {
+ this.context = context;
+ this.keys = keys;
+ }
+
+ public void onSuccess(Collection<ChunkMeta> result) {
+ try {
+ synchronized (allMeta) {
+ for (ChunkMeta meta : result) {
+ allMeta.put(meta.getChunkKey(), meta);
+ keys.remove(meta.getChunkKey());
+ }
+ }
+ if (context == Context.FAST_MISSING_OK && !keys.isEmpty()) {
+ synchronized (metaMissing) {
+ metaMissing.addAll(keys);
+ }
+ }
+ } finally {
+ metaBatches.release(1);
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ metaError.compareAndSet(null, error);
+ metaBatches.release(1);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java
new file mode 100644
index 0000000000..2fcded83a7
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.AsyncObjectLoaderQueue;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.lib.ObjectReader;
+
+/**
+ * Locates objects in large batches, then opens them clustered by chunk.
+ * <p>
+ * To simplify the implementation this method does not consult the local
+ * {@link ChunkCache} for objects. Instead it performs lookups for the
+ * {@link ObjectInfo} in large batches, clusters those by ChunkKey, and loads
+ * the chunks with a {@link Prefetcher}.
+ * <p>
+ * The lookup queue is completely spun out during the first invocation of
+ * {@link #next()}, ensuring all chunks are known before any single chunk is
+ * accessed. This is necessary to improve access locality and prevent thrashing
+ * of the local ChunkCache. It also causes {@link MissingObjectException} to be
+ * thrown at the start of traversal, until the lookup queue is exhausted.
+ *
+ * @param <T>
+ * type of object to associate with the loader.
+ */
+final class OpenQueue<T extends ObjectId> extends QueueObjectLookup<T>
+ implements AsyncObjectLoaderQueue<T> {
+ private Map<ChunkKey, Collection<ObjectWithInfo<T>>> byChunk;
+
+ private Iterator<Collection<ObjectWithInfo<T>>> chunkItr;
+
+ private Iterator<ObjectWithInfo<T>> objectItr;
+
+ private Prefetcher prefetcher;
+
+ private ObjectWithInfo<T> current;
+
+ private PackChunk currChunk;
+
+ OpenQueue(DhtReader reader, Iterable<T> objectIds, boolean reportMissing) {
+ super(reader, reportMissing);
+ setCacheLoadedInfo(true);
+ setNeedChunkOnly(true);
+ init(objectIds);
+
+ byChunk = new LinkedHashMap<ChunkKey, Collection<ObjectWithInfo<T>>>();
+ objectItr = Collections.<ObjectWithInfo<T>> emptyList().iterator();
+ }
+
+ public boolean next() throws MissingObjectException, IOException {
+ if (chunkItr == null)
+ init();
+
+ if (!objectItr.hasNext()) {
+ currChunk = null;
+ if (!chunkItr.hasNext()) {
+ release();
+ return false;
+ }
+ objectItr = chunkItr.next().iterator();
+ }
+
+ current = objectItr.next();
+ return true;
+ }
+
+ public T getCurrent() {
+ return current.object;
+ }
+
+ public ObjectId getObjectId() {
+ return getCurrent();
+ }
+
+ public ObjectLoader open() throws IOException {
+ ChunkKey chunkKey = current.chunkKey;
+
+ // Objects returned by the queue are clustered by chunk. This object
+ // is either in the current chunk, or are the next chunk ready on the
+ // prefetcher. Anything else is a programming error.
+ //
+ PackChunk chunk;
+ if (currChunk != null && chunkKey.equals(currChunk.getChunkKey()))
+ chunk = currChunk;
+ else {
+ chunk = prefetcher.get(chunkKey);
+ if (chunk == null)
+ throw new DhtMissingChunkException(chunkKey);
+ currChunk = chunk;
+ reader.recentChunk(chunk);
+ }
+
+ if (current.info != null) {
+ int ptr = current.info.getOffset();
+ int type = current.info.getType();
+ return PackChunk.read(chunk, ptr, reader, type);
+ } else {
+ int ptr = chunk.findOffset(repo, current.object);
+ if (ptr < 0)
+ throw DhtReader.missing(current.object, ObjectReader.OBJ_ANY);
+ return PackChunk.read(chunk, ptr, reader, ObjectReader.OBJ_ANY);
+ }
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ release();
+ return true;
+ }
+
+ @Override
+ public void release() {
+ prefetcher = null;
+ currChunk = null;
+ }
+
+ private void init() throws IOException {
+ ObjectWithInfo<T> c;
+
+ while ((c = nextObjectWithInfo()) != null) {
+ ChunkKey chunkKey = c.chunkKey;
+ Collection<ObjectWithInfo<T>> list = byChunk.get(chunkKey);
+ if (list == null) {
+ list = new ArrayList<ObjectWithInfo<T>>();
+ byChunk.put(chunkKey, list);
+
+ if (prefetcher == null)
+ prefetcher = new Prefetcher(reader, 0);
+ prefetcher.push(chunkKey);
+ }
+ list.add(c);
+ }
+
+ chunkItr = byChunk.values().iterator();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java
new file mode 100644
index 0000000000..c3bedc4ae1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java
@@ -0,0 +1,803 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_BAD;
+import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
+import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
+import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
+import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
+import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
+import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
+import static org.eclipse.jgit.lib.Constants.newMessageDigest;
+import static org.eclipse.jgit.storage.dht.ChunkFormatter.TRAILER_SIZE;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.MessageDigest;
+import java.text.MessageFormat;
+import java.util.zip.DataFormatException;
+import java.util.zip.Inflater;
+
+import org.eclipse.jgit.errors.CorruptObjectException;
+import org.eclipse.jgit.errors.LargeObjectException;
+import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.storage.pack.BinaryDelta;
+import org.eclipse.jgit.storage.pack.PackOutputStream;
+import org.eclipse.jgit.transport.PackParser;
+
+/**
+ * Chunk of object data, stored under a {@link ChunkKey}.
+ * <p>
+ * A chunk typically contains thousands of objects, compressed in the Git native
+ * pack file format. Its associated {@link ChunkIndex} provides offsets for each
+ * object's header and compressed data.
+ * <p>
+ * Chunks (and their indexes) are opaque binary blobs meant only to be read by
+ * the Git implementation.
+ */
+public final class PackChunk {
+ /** Constructs a {@link PackChunk} while reading from the DHT. */
+ public static class Members {
+ private ChunkKey chunkKey;
+
+ private byte[] dataBuf;
+
+ private int dataPtr;
+
+ private int dataLen;
+
+ private byte[] indexBuf;
+
+ private int indexPtr;
+
+ private int indexLen;
+
+ private ChunkMeta meta;
+
+ /** @return the chunk key. Never null. */
+ public ChunkKey getChunkKey() {
+ return chunkKey;
+ }
+
+ /**
+ * @param key
+ * @return {@code this}
+ */
+ public Members setChunkKey(ChunkKey key) {
+ this.chunkKey = key;
+ return this;
+ }
+
+ /** @return true if there is chunk data present. */
+ public boolean hasChunkData() {
+ return dataBuf != null;
+ }
+
+ /** @return the chunk data, or null if not available. */
+ public byte[] getChunkData() {
+ return asArray(dataBuf, dataPtr, dataLen);
+ }
+
+ /** @return the chunk data, or null if not available. */
+ public ByteBuffer getChunkDataAsByteBuffer() {
+ return asByteBuffer(dataBuf, dataPtr, dataLen);
+ }
+
+ private static byte[] asArray(byte[] buf, int ptr, int len) {
+ if (buf == null)
+ return null;
+ if (ptr == 0 && buf.length == len)
+ return buf;
+ byte[] r = new byte[len];
+ System.arraycopy(buf, ptr, r, 0, len);
+ return r;
+ }
+
+ private static ByteBuffer asByteBuffer(byte[] buf, int ptr, int len) {
+ return buf != null ? ByteBuffer.wrap(buf, ptr, len) : null;
+ }
+
+ /**
+ * @param chunkData
+ * @return {@code this}
+ */
+ public Members setChunkData(byte[] chunkData) {
+ return setChunkData(chunkData, 0, chunkData.length);
+ }
+
+ /**
+ * @param chunkData
+ * @param ptr
+ * @param len
+ * @return {@code this}
+ */
+ public Members setChunkData(byte[] chunkData, int ptr, int len) {
+ this.dataBuf = chunkData;
+ this.dataPtr = ptr;
+ this.dataLen = len;
+ return this;
+ }
+
+ /** @return true if there is a chunk index present. */
+ public boolean hasChunkIndex() {
+ return indexBuf != null;
+ }
+
+ /** @return the chunk index, or null if not available. */
+ public byte[] getChunkIndex() {
+ return asArray(indexBuf, indexPtr, indexLen);
+ }
+
+ /** @return the chunk index, or null if not available. */
+ public ByteBuffer getChunkIndexAsByteBuffer() {
+ return asByteBuffer(indexBuf, indexPtr, indexLen);
+ }
+
+ /**
+ * @param chunkIndex
+ * @return {@code this}
+ */
+ public Members setChunkIndex(byte[] chunkIndex) {
+ return setChunkIndex(chunkIndex, 0, chunkIndex.length);
+ }
+
+ /**
+ * @param chunkIndex
+ * @param ptr
+ * @param len
+ * @return {@code this}
+ */
+ public Members setChunkIndex(byte[] chunkIndex, int ptr, int len) {
+ this.indexBuf = chunkIndex;
+ this.indexPtr = ptr;
+ this.indexLen = len;
+ return this;
+ }
+
+ /** @return true if there is meta information present. */
+ public boolean hasMeta() {
+ return meta != null;
+ }
+
+ /** @return the inline meta data, or null if not available. */
+ public ChunkMeta getMeta() {
+ return meta;
+ }
+
+ /**
+ * @param meta
+ * @return {@code this}
+ */
+ public Members setMeta(ChunkMeta meta) {
+ this.meta = meta;
+ return this;
+ }
+
+ /**
+ * @return the PackChunk instance.
+ * @throws DhtException
+ * if early validation indicates the chunk data is corrupt
+ * or not recognized by this version of the library.
+ */
+ public PackChunk build() throws DhtException {
+ ChunkIndex i;
+ if (indexBuf != null)
+ i = ChunkIndex.fromBytes(chunkKey, indexBuf, indexPtr, indexLen);
+ else
+ i = null;
+
+ return new PackChunk(chunkKey, dataBuf, dataPtr, dataLen, i, meta);
+ }
+ }
+
+ private static final int INFLATE_STRIDE = 512;
+
+ private final ChunkKey key;
+
+ private final byte[] dataBuf;
+
+ private final int dataPtr;
+
+ private final int dataLen;
+
+ private final ChunkIndex index;
+
+ private final ChunkMeta meta;
+
+ private volatile Boolean valid;
+
+ private volatile ChunkKey nextFragment;
+
+ PackChunk(ChunkKey key, byte[] dataBuf, int dataPtr, int dataLen,
+ ChunkIndex index, ChunkMeta meta) {
+ this.key = key;
+ this.dataBuf = dataBuf;
+ this.dataPtr = dataPtr;
+ this.dataLen = dataLen;
+ this.index = index;
+ this.meta = meta;
+ }
+
+ /** @return unique name of this chunk in the database. */
+ public ChunkKey getChunkKey() {
+ return key;
+ }
+
+ /** @return index describing the objects stored within this chunk. */
+ public ChunkIndex getIndex() {
+ return index;
+ }
+
+ /** @return inline meta information, or null if no data was necessary. */
+ public ChunkMeta getMeta() {
+ return meta;
+ }
+
+ @Override
+ public String toString() {
+ return "PackChunk[" + getChunkKey() + "]";
+ }
+
+ boolean hasIndex() {
+ return index != null;
+ }
+
+ boolean isFragment() {
+ return meta != null && 0 < meta.getFragmentCount();
+ }
+
+ int findOffset(RepositoryKey repo, AnyObjectId objId) {
+ if (key.getRepositoryId() == repo.asInt())
+ return index.findOffset(objId);
+ return -1;
+ }
+
+ boolean contains(RepositoryKey repo, AnyObjectId objId) {
+ return 0 <= findOffset(repo, objId);
+ }
+
+ static ObjectLoader read(PackChunk pc, int pos, final DhtReader ctx,
+ final int typeHint) throws IOException {
+ try {
+ return read1(pc, pos, ctx, typeHint, true /* use recentChunks */);
+ } catch (DeltaChainCycleException cycleFound) {
+ // A cycle can occur if recentChunks cache was used by the reader
+ // to satisfy an OBJ_REF_DELTA, but the chunk that was chosen has
+ // a reverse delta back onto an object already being read during
+ // this invocation. Its not as uncommon as it sounds, as the Git
+ // wire protocol can sometimes copy an object the repository already
+ // has when dealing with reverts or cherry-picks.
+ //
+ // Work around the cycle by disabling the recentChunks cache for
+ // this resolution only. This will force the DhtReader to re-read
+ // OBJECT_INDEX and consider only the oldest chunk for any given
+ // object. There cannot be a cycle if the method only walks along
+ // the oldest chunks.
+ try {
+ ctx.getStatistics().deltaChainCycles++;
+ return read1(pc, pos, ctx, typeHint, false /* no recentChunks */);
+ } catch (DeltaChainCycleException cannotRecover) {
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().cycleInDeltaChain, pc.getChunkKey(),
+ Integer.valueOf(pos)));
+ }
+ }
+ }
+
+ @SuppressWarnings("null")
+ private static ObjectLoader read1(PackChunk pc, int pos,
+ final DhtReader ctx, final int typeHint, final boolean recent)
+ throws IOException, DeltaChainCycleException {
+ try {
+ Delta delta = null;
+ byte[] data = null;
+ int type = OBJ_BAD;
+ boolean cached = false;
+
+ SEARCH: for (;;) {
+ final byte[] dataBuf = pc.dataBuf;
+ final int dataPtr = pc.dataPtr;
+ final int posPtr = dataPtr + pos;
+ int c = dataBuf[posPtr] & 0xff;
+ int typeCode = (c >> 4) & 7;
+ long sz = c & 15;
+ int shift = 4;
+ int p = 1;
+ while ((c & 0x80) != 0) {
+ c = dataBuf[posPtr + p++] & 0xff;
+ sz += (c & 0x7f) << shift;
+ shift += 7;
+ }
+
+ switch (typeCode) {
+ case OBJ_COMMIT:
+ case OBJ_TREE:
+ case OBJ_BLOB:
+ case OBJ_TAG: {
+ if (delta != null) {
+ data = inflate(sz, pc, pos + p, ctx);
+ type = typeCode;
+ break SEARCH;
+ }
+
+ if (sz < Integer.MAX_VALUE && !pc.isFragment()) {
+ try {
+ data = pc.inflateOne(sz, pos + p, ctx);
+ return new ObjectLoader.SmallObject(typeCode, data);
+ } catch (LargeObjectException tooBig) {
+ // Fall through and stream.
+ }
+ }
+
+ return new LargeNonDeltaObject(typeCode, sz, pc, pos + p, ctx);
+ }
+
+ case OBJ_OFS_DELTA: {
+ c = dataBuf[posPtr + p++] & 0xff;
+ long base = c & 127;
+ while ((c & 128) != 0) {
+ base += 1;
+ c = dataBuf[posPtr + p++] & 0xff;
+ base <<= 7;
+ base += (c & 127);
+ }
+
+ ChunkKey baseChunkKey;
+ int basePosInChunk;
+
+ if (base <= pos) {
+ // Base occurs in the same chunk, just earlier.
+ baseChunkKey = pc.getChunkKey();
+ basePosInChunk = pos - (int) base;
+ } else {
+ // Long offset delta, base occurs in another chunk.
+ // Adjust distance to be from our chunk start.
+ base = base - pos;
+
+ ChunkMeta.BaseChunk baseChunk;
+ baseChunk = pc.meta.getBaseChunk(base);
+ baseChunkKey = baseChunk.getChunkKey();
+ basePosInChunk = (int) (baseChunk.relativeStart - base);
+ }
+
+ delta = new Delta(delta, //
+ pc.key, pos, (int) sz, p, //
+ baseChunkKey, basePosInChunk);
+ if (sz != delta.deltaSize)
+ break SEARCH;
+
+ DeltaBaseCache.Entry e = delta.getBase(ctx);
+ if (e != null) {
+ type = e.type;
+ data = e.data;
+ cached = true;
+ break SEARCH;
+ }
+ if (baseChunkKey != pc.getChunkKey())
+ pc = ctx.getChunk(baseChunkKey);
+ pos = basePosInChunk;
+ continue SEARCH;
+ }
+
+ case OBJ_REF_DELTA: {
+ ObjectId id = ObjectId.fromRaw(dataBuf, posPtr + p);
+ PackChunk nc = pc;
+ int base = pc.index.findOffset(id);
+ if (base < 0) {
+ DhtReader.ChunkAndOffset n;
+ n = ctx.getChunk(id, typeHint, recent);
+ nc = n.chunk;
+ base = n.offset;
+ }
+ checkCycle(delta, pc.key, pos);
+ delta = new Delta(delta, //
+ pc.key, pos, (int) sz, p + 20, //
+ nc.getChunkKey(), base);
+ if (sz != delta.deltaSize)
+ break SEARCH;
+
+ DeltaBaseCache.Entry e = delta.getBase(ctx);
+ if (e != null) {
+ type = e.type;
+ data = e.data;
+ cached = true;
+ break SEARCH;
+ }
+ pc = nc;
+ pos = base;
+ continue SEARCH;
+ }
+
+ default:
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().unsupportedObjectTypeInChunk, //
+ Integer.valueOf(typeCode), //
+ pc.getChunkKey(), //
+ Integer.valueOf(pos)));
+ }
+ }
+
+ // At this point there is at least one delta to apply to data.
+ // (Whole objects with no deltas to apply return early above.)
+
+ do {
+ if (!delta.deltaChunk.equals(pc.getChunkKey()))
+ pc = ctx.getChunk(delta.deltaChunk);
+ pos = delta.deltaPos;
+
+ // Cache only the base immediately before desired object.
+ if (cached)
+ cached = false;
+ else if (delta.next == null)
+ delta.putBase(ctx, type, data);
+
+ final byte[] cmds = delta.decompress(pc, ctx);
+ final long sz = BinaryDelta.getResultSize(cmds);
+ final byte[] result = newResult(sz);
+ BinaryDelta.apply(data, cmds, result);
+ data = result;
+ delta = delta.next;
+ } while (delta != null);
+
+ return new ObjectLoader.SmallObject(type, data);
+
+ } catch (DataFormatException dfe) {
+ CorruptObjectException coe = new CorruptObjectException(
+ MessageFormat.format(DhtText.get().corruptCompressedObject,
+ pc.getChunkKey(), Integer.valueOf(pos)));
+ coe.initCause(dfe);
+ throw coe;
+ }
+ }
+
+ private static byte[] inflate(long sz, PackChunk pc, int pos,
+ DhtReader reader) throws DataFormatException, DhtException {
+ if (pc.isFragment())
+ return inflateFragment(sz, pc, pos, reader);
+ return pc.inflateOne(sz, pos, reader);
+ }
+
+ private byte[] inflateOne(long sz, int pos, DhtReader reader)
+ throws DataFormatException {
+ // Because the chunk ends in a 4 byte CRC, there is always
+ // more data available for input than the inflater needs.
+ // This also helps with an optimization in libz where it
+ // wants at least 1 extra byte of input beyond the end.
+
+ final byte[] dstbuf = newResult(sz);
+ final Inflater inf = reader.inflater();
+ final int offset = pos;
+ int dstoff = 0;
+
+ int bs = Math.min(dataLen - pos, INFLATE_STRIDE);
+ inf.setInput(dataBuf, dataPtr + pos, bs);
+ pos += bs;
+
+ while (dstoff < dstbuf.length) {
+ int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
+ if (n == 0) {
+ if (inf.needsInput()) {
+ bs = Math.min(dataLen - pos, INFLATE_STRIDE);
+ inf.setInput(dataBuf, dataPtr + pos, bs);
+ pos += bs;
+ continue;
+ }
+ break;
+ }
+ dstoff += n;
+ }
+
+ if (dstoff != sz) {
+ throw new DataFormatException(MessageFormat.format(
+ DhtText.get().shortCompressedObject,
+ getChunkKey(),
+ Integer.valueOf(offset)));
+ }
+ return dstbuf;
+ }
+
+ private static byte[] inflateFragment(long sz, PackChunk pc, final int pos,
+ DhtReader reader) throws DataFormatException, DhtException {
+ byte[] dstbuf = newResult(sz);
+ int dstoff = 0;
+
+ final Inflater inf = reader.inflater();
+ final ChunkMeta meta = pc.meta;
+ int nextChunk = 1;
+
+ int bs = pc.dataLen - pos - TRAILER_SIZE;
+ inf.setInput(pc.dataBuf, pc.dataPtr + pos, bs);
+
+ while (dstoff < dstbuf.length) {
+ int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
+ if (n == 0) {
+ if (inf.needsInput()) {
+ if (meta.getFragmentCount() <= nextChunk)
+ break;
+ pc = reader.getChunk(meta.getFragmentKey(nextChunk++));
+ if (meta.getFragmentCount() == nextChunk)
+ bs = pc.dataLen; // Include trailer on last chunk.
+ else
+ bs = pc.dataLen - TRAILER_SIZE;
+ inf.setInput(pc.dataBuf, pc.dataPtr, bs);
+ continue;
+ }
+ break;
+ }
+ dstoff += n;
+ }
+
+ if (dstoff != sz) {
+ throw new DataFormatException(MessageFormat.format(
+ DhtText.get().shortCompressedObject,
+ meta.getChunkKey(),
+ Integer.valueOf(pos)));
+ }
+ return dstbuf;
+ }
+
+ private static byte[] newResult(long sz) {
+ if (Integer.MAX_VALUE < sz)
+ throw new LargeObjectException.ExceedsByteArrayLimit();
+ try {
+ return new byte[(int) sz];
+ } catch (OutOfMemoryError noMemory) {
+ throw new LargeObjectException.OutOfMemory(noMemory);
+ }
+ }
+
+ int readObjectTypeAndSize(int ptr, PackParser.ObjectTypeAndSize info) {
+ ptr += dataPtr;
+
+ int c = dataBuf[ptr++] & 0xff;
+ int typeCode = (c >> 4) & 7;
+ long sz = c & 15;
+ int shift = 4;
+ while ((c & 0x80) != 0) {
+ c = dataBuf[ptr++] & 0xff;
+ sz += (c & 0x7f) << shift;
+ shift += 7;
+ }
+
+ switch (typeCode) {
+ case OBJ_OFS_DELTA:
+ c = dataBuf[ptr++] & 0xff;
+ while ((c & 128) != 0)
+ c = dataBuf[ptr++] & 0xff;
+ break;
+
+ case OBJ_REF_DELTA:
+ ptr += 20;
+ break;
+ }
+
+ info.type = typeCode;
+ info.size = sz;
+ return ptr - dataPtr;
+ }
+
+ int read(int ptr, byte[] dst, int dstPos, int cnt) {
+ // Do not allow readers to read the CRC-32 from the tail.
+ int n = Math.min(cnt, (dataLen - TRAILER_SIZE) - ptr);
+ System.arraycopy(dataBuf, dataPtr + ptr, dst, dstPos, n);
+ return n;
+ }
+
+ void copyObjectAsIs(PackOutputStream out, DhtObjectToPack obj,
+ boolean validate, DhtReader ctx) throws IOException,
+ StoredObjectRepresentationNotAvailableException {
+ if (validate && !isValid()) {
+ StoredObjectRepresentationNotAvailableException gone;
+
+ gone = new StoredObjectRepresentationNotAvailableException(obj);
+ gone.initCause(new DhtException(MessageFormat.format(
+ DhtText.get().corruptChunk, getChunkKey())));
+ throw gone;
+ }
+
+ int ptr = dataPtr + obj.offset;
+ int c = dataBuf[ptr++] & 0xff;
+ int typeCode = (c >> 4) & 7;
+ long inflatedSize = c & 15;
+ int shift = 4;
+ while ((c & 0x80) != 0) {
+ c = dataBuf[ptr++] & 0xff;
+ inflatedSize += (c & 0x7f) << shift;
+ shift += 7;
+ }
+
+ switch (typeCode) {
+ case OBJ_OFS_DELTA:
+ do {
+ c = dataBuf[ptr++] & 0xff;
+ } while ((c & 128) != 0);
+ break;
+
+ case OBJ_REF_DELTA:
+ ptr += 20;
+ break;
+ }
+
+ // If the size is positive, its accurate. If its -1, this is a
+ // fragmented object that will need more handling below,
+ // so copy all of the chunk, minus the trailer.
+
+ final int maxAvail = (dataLen - TRAILER_SIZE) - (ptr - dataPtr);
+ final int copyLen;
+ if (0 < obj.size)
+ copyLen = Math.min(obj.size, maxAvail);
+ else if (-1 == obj.size)
+ copyLen = maxAvail;
+ else
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().expectedObjectSizeDuringCopyAsIs, obj));
+ out.writeHeader(obj, inflatedSize);
+ out.write(dataBuf, ptr, copyLen);
+
+ // If the object was fragmented, send all of the other fragments.
+ if (isFragment()) {
+ int cnt = meta.getFragmentCount();
+ for (int fragId = 1; fragId < cnt; fragId++) {
+ PackChunk pc = ctx.getChunk(meta.getFragmentKey(fragId));
+ pc.copyEntireChunkAsIs(out, obj, validate);
+ }
+ }
+ }
+
+ void copyEntireChunkAsIs(PackOutputStream out, DhtObjectToPack obj,
+ boolean validate) throws IOException {
+ if (validate && !isValid()) {
+ if (obj != null)
+ throw new CorruptObjectException(obj, MessageFormat.format(
+ DhtText.get().corruptChunk, getChunkKey()));
+ else
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().corruptChunk, getChunkKey()));
+ }
+
+ // Do not copy the trailer onto the output stream.
+ out.write(dataBuf, dataPtr, dataLen - TRAILER_SIZE);
+ }
+
+ @SuppressWarnings("boxing")
+ private boolean isValid() {
+ Boolean v = valid;
+ if (v == null) {
+ MessageDigest m = newMessageDigest();
+ m.update(dataBuf, dataPtr, dataLen);
+ v = key.getChunkHash().compareTo(m.digest(), 0) == 0;
+ valid = v;
+ }
+ return v.booleanValue();
+ }
+
+ /** @return the complete size of this chunk, in memory. */
+ int getTotalSize() {
+ // Assume the index is part of the buffer, and report its total size..
+ if (dataPtr != 0 || dataLen != dataBuf.length)
+ return dataBuf.length;
+
+ int sz = dataLen;
+ if (index != null)
+ sz += index.getIndexSize();
+ return sz;
+ }
+
+ ChunkKey getNextFragment() {
+ if (meta == null)
+ return null;
+
+ ChunkKey next = nextFragment;
+ if (next == null) {
+ next = meta.getNextFragment(getChunkKey());
+ nextFragment = next;
+ }
+ return next;
+ }
+
+ private static class Delta {
+ /** Child that applies onto this object. */
+ final Delta next;
+
+ /** The chunk the delta is stored in. */
+ final ChunkKey deltaChunk;
+
+ /** Offset of the delta object. */
+ final int deltaPos;
+
+ /** Size of the inflated delta stream. */
+ final int deltaSize;
+
+ /** Total size of the delta's pack entry header (including base). */
+ final int hdrLen;
+
+ /** The chunk the base is stored in. */
+ final ChunkKey baseChunk;
+
+ /** Offset of the base object. */
+ final int basePos;
+
+ Delta(Delta next, ChunkKey dc, int ofs, int sz, int hdrLen,
+ ChunkKey bc, int bp) {
+ this.next = next;
+ this.deltaChunk = dc;
+ this.deltaPos = ofs;
+ this.deltaSize = sz;
+ this.hdrLen = hdrLen;
+ this.baseChunk = bc;
+ this.basePos = bp;
+ }
+
+ byte[] decompress(PackChunk chunk, DhtReader reader)
+ throws DataFormatException, DhtException {
+ return inflate(deltaSize, chunk, deltaPos + hdrLen, reader);
+ }
+
+ DeltaBaseCache.Entry getBase(DhtReader ctx) {
+ return ctx.getDeltaBaseCache().get(baseChunk, basePos);
+ }
+
+ void putBase(DhtReader ctx, int type, byte[] data) {
+ ctx.getDeltaBaseCache().put(baseChunk, basePos, type, data);
+ }
+ }
+
+ private static void checkCycle(Delta delta, ChunkKey key, int ofs)
+ throws DeltaChainCycleException {
+ for (; delta != null; delta = delta.next) {
+ if (delta.deltaPos == ofs && delta.deltaChunk.equals(key))
+ throw DeltaChainCycleException.INSTANCE;
+ }
+ }
+
+ private static class DeltaChainCycleException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ static final DeltaChainCycleException INSTANCE = new DeltaChainCycleException();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java
new file mode 100644
index 0000000000..03a7c773e1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
+import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.revwalk.RevCommit;
+import org.eclipse.jgit.revwalk.RevTree;
+import org.eclipse.jgit.storage.dht.DhtReader.ChunkAndOffset;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+class Prefetcher implements StreamingCallback<Collection<PackChunk.Members>> {
+ private static enum Status {
+ ON_QUEUE, LOADING, WAITING, READY, DONE;
+ }
+
+ private final Database db;
+
+ private final DhtReader.Statistics stats;
+
+ private final int objectType;
+
+ private final HashMap<ChunkKey, PackChunk> ready;
+
+ private final HashMap<ChunkKey, Status> status;
+
+ private final LinkedList<ChunkKey> queue;
+
+ private final boolean followEdgeHints;
+
+ private final int averageChunkSize;
+
+ private final int highWaterMark;
+
+ private final int lowWaterMark;
+
+ private boolean cacheLoadedChunks;
+
+ private boolean first = true;
+
+ private boolean automaticallyPushHints = true;
+
+ private ChunkKey stopAt;
+
+ private int bytesReady;
+
+ private int bytesLoading;
+
+ private DhtException error;
+
+ Prefetcher(DhtReader reader, int objectType) {
+ this.db = reader.getDatabase();
+ this.stats = reader.getStatistics();
+ this.objectType = objectType;
+ this.ready = new HashMap<ChunkKey, PackChunk>();
+ this.status = new HashMap<ChunkKey, Status>();
+ this.queue = new LinkedList<ChunkKey>();
+ this.followEdgeHints = reader.getOptions().isPrefetchFollowEdgeHints();
+ this.averageChunkSize = reader.getInserterOptions().getChunkSize();
+ this.highWaterMark = reader.getOptions().getPrefetchLimit();
+
+ int lwm = (highWaterMark / averageChunkSize) - 4;
+ if (lwm <= 0)
+ lwm = (highWaterMark / averageChunkSize) / 2;
+ lowWaterMark = lwm * averageChunkSize;
+ cacheLoadedChunks = true;
+ }
+
+ boolean isType(int type) {
+ return objectType == type;
+ }
+
+ synchronized void setCacheLoadedChunks(boolean cacheLoadedChunks) {
+ this.cacheLoadedChunks = cacheLoadedChunks;
+ }
+
+ void push(DhtReader ctx, Collection<RevCommit> roots) throws DhtException,
+ MissingObjectException {
+ // Approximate walk by using hints from the most recent commit.
+ // Since the commits were recently parsed by the reader, we can
+ // ask the reader for their chunk locations and most likely get
+ // cache hits.
+
+ int time = -1;
+ PackChunk chunk = null;
+
+ for (RevCommit cmit : roots) {
+ if (time < cmit.getCommitTime()) {
+ ChunkAndOffset p = ctx.getChunkGently(cmit, cmit.getType());
+ if (p != null && p.chunk.getMeta() != null) {
+ time = cmit.getCommitTime();
+ chunk = p.chunk;
+ }
+ }
+ }
+
+ if (chunk != null) {
+ synchronized (this) {
+ status.put(chunk.getChunkKey(), Status.DONE);
+ push(chunk.getMeta());
+ }
+ }
+ }
+
+ void push(DhtReader ctx, RevTree start, RevTree end) throws DhtException,
+ MissingObjectException {
+ // Unlike commits, trees aren't likely to be loaded when they
+ // are pushed into the prefetcher. Find the tree and load it
+ // as necessary to get the prefetch meta established.
+ //
+ Sync<Map<ObjectIndexKey, Collection<ObjectInfo>>> sync = Sync.create();
+ Set<ObjectIndexKey> toFind = new HashSet<ObjectIndexKey>();
+ toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), start));
+ toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), end));
+ db.objectIndex().get(Context.READ_REPAIR, toFind, sync);
+
+ Map<ObjectIndexKey, Collection<ObjectInfo>> trees;
+ try {
+ trees = sync.get(ctx.getOptions().getTimeout());
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ } catch (TimeoutException e) {
+ throw new DhtTimeoutException(e);
+ }
+
+ ChunkKey startKey = chunk(trees.get(start));
+ if (startKey == null)
+ throw DhtReader.missing(start, OBJ_TREE);
+
+ ChunkKey endKey = chunk(trees.get(end));
+ if (endKey == null)
+ throw DhtReader.missing(end, OBJ_TREE);
+
+ synchronized (this) {
+ stopAt = endKey;
+ push(startKey);
+ maybeStartGet();
+ }
+ }
+
+ private static ChunkKey chunk(Collection<ObjectInfo> info) {
+ if (info == null || info.isEmpty())
+ return null;
+
+ List<ObjectInfo> infoList = new ArrayList<ObjectInfo>(info);
+ ObjectInfo.sort(infoList);
+ return infoList.get(0).getChunkKey();
+ }
+
+ void push(ChunkKey key) {
+ push(Collections.singleton(key));
+ }
+
+ void push(ChunkMeta meta) {
+ if (meta == null)
+ return;
+
+ ChunkMeta.PrefetchHint hint;
+ switch (objectType) {
+ case OBJ_COMMIT:
+ hint = meta.getCommitPrefetch();
+ break;
+ case OBJ_TREE:
+ hint = meta.getTreePrefetch();
+ break;
+ default:
+ return;
+ }
+
+ if (hint != null) {
+ synchronized (this) {
+ if (followEdgeHints && !hint.getEdge().isEmpty())
+ push(hint.getEdge());
+ else
+ push(hint.getSequential());
+ }
+ }
+ }
+
+ void push(Iterable<ChunkKey> list) {
+ synchronized (this) {
+ for (ChunkKey key : list) {
+ if (status.containsKey(key))
+ continue;
+
+ status.put(key, Status.ON_QUEUE);
+ queue.add(key);
+
+ if (key.equals(stopAt)) {
+ automaticallyPushHints = false;
+ break;
+ }
+ }
+
+ if (!first)
+ maybeStartGet();
+ }
+ }
+
+ synchronized ChunkAndOffset find(
+ @SuppressWarnings("hiding") RepositoryKey repo, AnyObjectId objId) {
+ for (PackChunk c : ready.values()) {
+ int p = c.findOffset(repo, objId);
+ if (0 <= p)
+ return new ChunkAndOffset(useReadyChunk(c.getChunkKey()), p);
+ }
+ return null;
+ }
+
+ synchronized PackChunk get(ChunkKey key) throws DhtException {
+ GET: for (;;) {
+ if (error != null)
+ throw error;
+
+ Status chunkStatus = status.get(key);
+ if (chunkStatus == null)
+ return null;
+
+ switch (chunkStatus) {
+ case ON_QUEUE:
+ if (queue.isEmpty()) {
+ // Should never happen, but let the caller load.
+ status.put(key, Status.DONE);
+ return null;
+
+ } else if (bytesReady + bytesLoading < highWaterMark) {
+ // Make sure its first in the queue, start, and wait.
+ if (!queue.getFirst().equals(key)) {
+ int idx = queue.indexOf(key);
+ if (first && objectType == OBJ_COMMIT) {
+ // If the prefetcher has not started yet, skip all
+ // chunks up to this first request. Assume this
+ // initial out-of-order get occurred because the
+ // RevWalk has already parsed all of the commits
+ // up to this point and does not need them again.
+ //
+ for (; 0 < idx; idx--)
+ status.put(queue.removeFirst(), Status.DONE);
+ forceStartGet();
+ continue GET;
+ }
+
+ stats.access(key).cntPrefetcher_OutOfOrder++;
+ queue.remove(idx);
+ queue.addFirst(key);
+ }
+ forceStartGet();
+ continue GET;
+
+ } else {
+ // It cannot be moved up to the front of the queue
+ // without violating the prefetch size. Let the
+ // caller load the chunk out of order.
+ stats.access(key).cntPrefetcher_OutOfOrder++;
+ status.put(key, Status.DONE);
+ return null;
+ }
+
+ case LOADING: // Wait for a prefetch that is already started.
+ status.put(key, Status.WAITING);
+ //$FALL-THROUGH$
+ case WAITING:
+ stats.access(key).cntPrefetcher_WaitedForLoad++;
+ try {
+ wait();
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ }
+ continue GET;
+
+ case READY:
+ return useReadyChunk(key);
+
+ case DONE:
+ stats.access(key).cntPrefetcher_Revisited++;
+ return null;
+
+ default:
+ throw new IllegalStateException(key + " " + chunkStatus);
+ }
+ }
+ }
+
+ private PackChunk useReadyChunk(ChunkKey key) {
+ PackChunk chunk = ready.remove(key);
+
+ if (cacheLoadedChunks)
+ chunk = ChunkCache.get().put(chunk);
+
+ status.put(chunk.getChunkKey(), Status.DONE);
+ bytesReady -= chunk.getTotalSize();
+
+ if (automaticallyPushHints) {
+ push(chunk.getMeta());
+ maybeStartGet();
+ }
+
+ return chunk;
+ }
+
+ private void maybeStartGet() {
+ if (!queue.isEmpty() && bytesReady + bytesLoading <= lowWaterMark)
+ forceStartGet();
+ }
+
+ private void forceStartGet() {
+ // Use a LinkedHashSet so insertion order is iteration order.
+ // This may help a provider that loads sequentially in the
+ // set's iterator order to load in the order we want data.
+ //
+ LinkedHashSet<ChunkKey> toLoad = new LinkedHashSet<ChunkKey>();
+ ChunkCache cache = ChunkCache.get();
+
+ while (bytesReady + bytesLoading < highWaterMark && !queue.isEmpty()) {
+ ChunkKey key = queue.removeFirst();
+ PackChunk chunk = cache.get(key);
+
+ if (chunk != null) {
+ stats.access(key).cntPrefetcher_ChunkCacheHit++;
+ chunkIsReady(chunk);
+ } else {
+ stats.access(key).cntPrefetcher_Load++;
+ toLoad.add(key);
+ status.put(key, Status.LOADING);
+ bytesLoading += averageChunkSize;
+
+ // For the first chunk, start immediately to reduce the
+ // startup latency associated with additional chunks.
+ if (first)
+ break;
+ }
+ }
+
+ if (!toLoad.isEmpty() && error == null)
+ db.chunk().get(Context.LOCAL, toLoad, this);
+
+ if (first) {
+ first = false;
+ maybeStartGet();
+ }
+ }
+
+ public synchronized void onPartialResult(Collection<PackChunk.Members> res) {
+ try {
+ bytesLoading -= averageChunkSize * res.size();
+ for (PackChunk.Members builder : res)
+ chunkIsReady(builder.build());
+ } catch (DhtException loadError) {
+ onError(loadError);
+ }
+ }
+
+ private void chunkIsReady(PackChunk chunk) {
+ ChunkKey key = chunk.getChunkKey();
+ ready.put(key, chunk);
+ bytesReady += chunk.getTotalSize();
+
+ if (status.put(key, Status.READY) == Status.WAITING)
+ notifyAll();
+ }
+
+ public synchronized void onSuccess(Collection<PackChunk.Members> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+ }
+
+ public synchronized void onFailure(DhtException asyncError) {
+ onError(asyncError);
+ }
+
+ private void onError(DhtException asyncError) {
+ if (error == null) {
+ error = asyncError;
+ notifyAll();
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java
new file mode 100644
index 0000000000..482caf8917
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.AsyncOperation;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.storage.dht.RefData.IdWithChunk;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.Database;
+
+class QueueObjectLookup<T extends ObjectId> implements AsyncOperation {
+ protected final RepositoryKey repo;
+
+ protected final Database db;
+
+ protected final DhtReader reader;
+
+ private final DhtReaderOptions options;
+
+ private final boolean reportMissing;
+
+ private final ArrayList<ObjectInfo> tmp;
+
+ private final int concurrentBatches;
+
+ private int runningBatches;
+
+ private Context context;
+
+ private Iterator<T> toFind;
+
+ private List<T> toRetry;
+
+ private ObjectWithInfo<T> nextResult;
+
+ private DhtException error;
+
+ private boolean needChunkOnly;
+
+ private boolean cacheLoadedInfo;
+
+ QueueObjectLookup(DhtReader reader, boolean reportMissing) {
+ this.repo = reader.getRepositoryKey();
+ this.db = reader.getDatabase();
+ this.reader = reader;
+ this.options = reader.getOptions();
+ this.reportMissing = reportMissing;
+ this.tmp = new ArrayList<ObjectInfo>(4);
+ this.context = Context.FAST_MISSING_OK;
+ this.toRetry = new ArrayList<T>();
+
+ this.concurrentBatches = options.getObjectIndexConcurrentBatches();
+ }
+
+ void setCacheLoadedInfo(boolean on) {
+ cacheLoadedInfo = on;
+ }
+
+ void setNeedChunkOnly(boolean on) {
+ needChunkOnly = on;
+ }
+
+ void init(Iterable<T> objectIds) {
+ toFind = lookInCache(objectIds).iterator();
+ }
+
+ private Iterable<T> lookInCache(Iterable<T> objects) {
+ RecentInfoCache infoCache = reader.getRecentInfoCache();
+ List<T> missing = null;
+ for (T obj : objects) {
+ if (needChunkOnly && obj instanceof IdWithChunk) {
+ push(obj, ((IdWithChunk) obj).getChunkKey());
+ continue;
+ }
+
+ List<ObjectInfo> info = infoCache.get(obj);
+ if (info != null && !info.isEmpty()) {
+ push(obj, info.get(0));
+ } else {
+ if (missing == null) {
+ if (objects instanceof List<?>)
+ missing = new ArrayList<T>(((List<?>) objects).size());
+ else
+ missing = new ArrayList<T>();
+ }
+ missing.add(obj);
+ }
+ }
+ if (missing != null)
+ return missing;
+ return Collections.emptyList();
+ }
+
+ synchronized ObjectWithInfo<T> nextObjectWithInfo()
+ throws MissingObjectException, IOException {
+ for (;;) {
+ if (error != null)
+ throw error;
+
+ // Consider starting another batch before popping a result.
+ // This ensures lookup is running while results are being
+ // consumed by the calling application.
+ //
+ while (runningBatches < concurrentBatches) {
+ if (!toFind.hasNext() // reached end of original input
+ && runningBatches == 0 // all batches finished
+ && toRetry != null // haven't yet retried
+ && !toRetry.isEmpty()) {
+ toFind = toRetry.iterator();
+ toRetry = null;
+ context = Context.READ_REPAIR;
+ }
+
+ if (toFind.hasNext())
+ startBatch(context);
+ else
+ break;
+ }
+
+ ObjectWithInfo<T> c = pop();
+ if (c != null) {
+ if (c.chunkKey != null)
+ return c;
+ else
+ throw missing(c.object);
+
+ } else if (!toFind.hasNext() && runningBatches == 0)
+ return null;
+
+ try {
+ wait();
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ }
+ }
+ }
+
+ private synchronized void startBatch(final Context ctx) {
+ final int batchSize = options.getObjectIndexBatchSize();
+ final Map<ObjectIndexKey, T> batch = new HashMap<ObjectIndexKey, T>();
+ while (toFind.hasNext() && batch.size() < batchSize) {
+ T obj = toFind.next();
+ batch.put(ObjectIndexKey.create(repo, obj), obj);
+ }
+
+ final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> cb;
+
+ cb = new AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>() {
+ public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> r) {
+ processResults(ctx, batch, r);
+ }
+
+ public void onFailure(DhtException e) {
+ processFailure(e);
+ }
+ };
+ db.objectIndex().get(ctx, batch.keySet(), cb);
+ runningBatches++;
+ }
+
+ private synchronized void processResults(Context ctx,
+ Map<ObjectIndexKey, T> batch,
+ Map<ObjectIndexKey, Collection<ObjectInfo>> objects) {
+ for (T obj : batch.values()) {
+ Collection<ObjectInfo> matches = objects.get(obj);
+
+ if (matches == null || matches.isEmpty()) {
+ if (ctx == Context.FAST_MISSING_OK)
+ toRetry.add(obj);
+ else if (reportMissing)
+ push(obj, (ChunkKey) null);
+ continue;
+ }
+
+ tmp.clear();
+ tmp.addAll(matches);
+ ObjectInfo.sort(tmp);
+ if (cacheLoadedInfo)
+ reader.getRecentInfoCache().put(obj, tmp);
+
+ push(obj, tmp.get(0));
+ }
+
+ runningBatches--;
+ notify();
+ }
+
+ private synchronized void processFailure(DhtException e) {
+ runningBatches--;
+ error = e;
+ notify();
+ }
+
+ private void push(T obj, ChunkKey chunkKey) {
+ nextResult = new ObjectWithInfo<T>(obj, chunkKey, nextResult);
+ }
+
+ private void push(T obj, ObjectInfo info) {
+ nextResult = new ObjectWithInfo<T>(obj, info, nextResult);
+ }
+
+ private ObjectWithInfo<T> pop() {
+ ObjectWithInfo<T> r = nextResult;
+ if (r == null)
+ return null;
+ nextResult = r.next;
+ return r;
+ }
+
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return true;
+ }
+
+ public void release() {
+ // Do nothing, there is nothing to abort or discard.
+ }
+
+ private static <T extends ObjectId> MissingObjectException missing(T id) {
+ return new MissingObjectException(id, DhtText.get().objectTypeUnknown);
+ }
+
+ static class ObjectWithInfo<T extends ObjectId> {
+ final T object;
+
+ final ObjectInfo info;
+
+ final ChunkKey chunkKey;
+
+ final ObjectWithInfo<T> next;
+
+ ObjectWithInfo(T object, ObjectInfo info, ObjectWithInfo<T> next) {
+ this.object = object;
+ this.info = info;
+ this.chunkKey = info.getChunkKey();
+ this.next = next;
+ }
+
+ ObjectWithInfo(T object, ChunkKey chunkKey, ObjectWithInfo<T> next) {
+ this.object = object;
+ this.info = null;
+ this.chunkKey = chunkKey;
+ this.next = next;
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java
new file mode 100644
index 0000000000..f704c1daf5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.storage.dht.DhtReader.ChunkAndOffset;
+import org.eclipse.jgit.storage.dht.RefData.IdWithChunk;
+
+final class RecentChunks {
+ private final DhtReader reader;
+
+ private final DhtReader.Statistics stats;
+
+ private final int maxSize;
+
+ private int curSize;
+
+ private Node lruHead;
+
+ private Node lruTail;
+
+ RecentChunks(DhtReader reader) {
+ this.reader = reader;
+ this.stats = reader.getStatistics();
+ this.maxSize = reader.getOptions().getRecentChunkCacheSize();
+ }
+
+ PackChunk get(ChunkKey key) {
+ for (Node n = lruHead; n != null; n = n.next) {
+ if (key.equals(n.chunk.getChunkKey())) {
+ hit(n);
+ stats.recentChunks_Hits++;
+ return n.chunk;
+ }
+ }
+ stats.recentChunks_Miss++;
+ return null;
+ }
+
+ void put(PackChunk chunk) {
+ for (Node n = lruHead; n != null; n = n.next) {
+ if (n.chunk == chunk) {
+ hit(n);
+ return;
+ }
+ }
+
+ Node n;
+ if (curSize < maxSize) {
+ n = new Node();
+ curSize++;
+ } else {
+ n = lruTail;
+ }
+ n.chunk = chunk;
+ hit(n);
+ }
+
+ ObjectLoader open(RepositoryKey repo, AnyObjectId objId, int typeHint)
+ throws IOException {
+ if (objId instanceof IdWithChunk) {
+ PackChunk chunk = get(((IdWithChunk) objId).getChunkKey());
+ if (chunk != null) {
+ int pos = chunk.findOffset(repo, objId);
+ if (0 <= pos)
+ return PackChunk.read(chunk, pos, reader, typeHint);
+ }
+
+ // IdWithChunk is only a hint, and can be wrong. Locally
+ // searching is faster than looking in the Database.
+ }
+
+ for (Node n = lruHead; n != null; n = n.next) {
+ int pos = n.chunk.findOffset(repo, objId);
+ if (0 <= pos) {
+ hit(n);
+ stats.recentChunks_Hits++;
+ return PackChunk.read(n.chunk, pos, reader, typeHint);
+ }
+ }
+
+ return null;
+ }
+
+ ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) {
+ if (objId instanceof IdWithChunk) {
+ PackChunk chunk = get(((IdWithChunk) objId).getChunkKey());
+ if (chunk != null) {
+ int pos = chunk.findOffset(repo, objId);
+ if (0 <= pos)
+ return new ChunkAndOffset(chunk, pos);
+ }
+
+ // IdWithChunk is only a hint, and can be wrong. Locally
+ // searching is faster than looking in the Database.
+ }
+
+ for (Node n = lruHead; n != null; n = n.next) {
+ int pos = n.chunk.findOffset(repo, objId);
+ if (0 <= pos) {
+ hit(n);
+ stats.recentChunks_Hits++;
+ return new ChunkAndOffset(n.chunk, pos);
+ }
+ }
+
+ return null;
+ }
+
+ boolean has(RepositoryKey repo, AnyObjectId objId) {
+ for (Node n = lruHead; n != null; n = n.next) {
+ int pos = n.chunk.findOffset(repo, objId);
+ if (0 <= pos) {
+ hit(n);
+ stats.recentChunks_Hits++;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void clear() {
+ curSize = 0;
+ lruHead = null;
+ lruTail = null;
+ }
+
+ private void hit(Node n) {
+ if (lruHead != n) {
+ remove(n);
+ first(n);
+ }
+ }
+
+ private void remove(Node node) {
+ Node p = node.prev;
+ Node n = node.next;
+
+ if (p != null)
+ p.next = n;
+ if (n != null)
+ n.prev = p;
+
+ if (lruHead == node)
+ lruHead = n;
+ if (lruTail == node)
+ lruTail = p;
+ }
+
+ private void first(Node node) {
+ Node h = lruHead;
+
+ node.prev = null;
+ node.next = h;
+
+ if (h != null)
+ h.prev = node;
+ else
+ lruTail = node;
+
+ lruHead = node;
+ }
+
+ private static class Node {
+ PackChunk chunk;
+
+ Node prev;
+
+ Node next;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java
new file mode 100644
index 0000000000..cb5882af12
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+
+final class RecentInfoCache {
+ private final Map<ObjectId, List<ObjectInfo>> infoCache;
+
+ RecentInfoCache(DhtReaderOptions options) {
+ final int sz = options.getRecentInfoCacheSize();
+ infoCache = new LinkedHashMap<ObjectId, List<ObjectInfo>>(sz, 0.75f, true) {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ protected boolean removeEldestEntry(Entry<ObjectId, List<ObjectInfo>> e) {
+ return sz < size();
+ }
+ };
+ }
+
+ List<ObjectInfo> get(AnyObjectId id) {
+ return infoCache.get(id);
+ }
+
+ void put(AnyObjectId id, List<ObjectInfo> info) {
+ infoCache.put(id.copy(), copyList(info));
+ }
+
+ private static List<ObjectInfo> copyList(List<ObjectInfo> info) {
+ int cnt = info.size();
+ if (cnt == 1)
+ return Collections.singletonList(info.get(0));
+
+ ObjectInfo[] tmp = info.toArray(new ObjectInfo[cnt]);
+ return Collections.unmodifiableList(Arrays.asList(tmp));
+ }
+
+ void clear() {
+ infoCache.clear();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefData.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefData.java
new file mode 100644
index 0000000000..e34e9d1c34
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefData.java
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJECT_ID_STRING_LENGTH;
+import static org.eclipse.jgit.storage.dht.TinyProtobuf.encode;
+
+import java.util.Arrays;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.Ref;
+import org.eclipse.jgit.storage.dht.TinyProtobuf.Encoder;
+
+/**
+ * Describes the current state of a Git reference.
+ * <p>
+ * The reference state contains not just the SHA-1 object name that a reference
+ * points to, but the state also caches its peeled value if its a tag, and the
+ * {@link ChunkKey} the object was observed in when the reference was last
+ * updated. This cached data reduces latency when initially starting to work
+ * with a repository.
+ */
+public class RefData {
+ /** Magic constant meaning does not exist. */
+ public static final RefData NONE = new RefData(new byte[0]);
+
+ static final int TAG_SYMREF = 1;
+
+ static final int TAG_TARGET = 2;
+
+ static final int TAG_IS_PEELED = 3;
+
+ static final int TAG_PEELED = 4;
+
+ /**
+ * @param data
+ * @return the content
+ */
+ public static RefData fromBytes(byte[] data) {
+ return new RefData(data);
+ }
+
+ static RefData symbolic(String target) {
+ Encoder e = encode(2 + target.length());
+ e.string(TAG_SYMREF, target);
+ return new RefData(e.asByteArray());
+ }
+
+ static RefData id(AnyObjectId id) {
+ Encoder e = encode(4 + OBJECT_ID_STRING_LENGTH + ChunkKey.KEYLEN);
+ e.message(TAG_TARGET, IdWithChunk.encode(id));
+ return new RefData(e.asByteArray());
+ }
+
+ static RefData fromRef(Ref ref) {
+ if (ref.isSymbolic())
+ return symbolic(ref.getTarget().getName());
+
+ if (ref.getObjectId() == null)
+ return RefData.NONE;
+
+ int max = 8 + 2 * OBJECT_ID_STRING_LENGTH + 2 * ChunkKey.KEYLEN;
+ Encoder e = encode(max);
+ e.message(TAG_TARGET, IdWithChunk.encode(ref.getObjectId()));
+ if (ref.isPeeled()) {
+ e.bool(TAG_IS_PEELED, true);
+ if (ref.getPeeledObjectId() != null)
+ e.message(TAG_PEELED,
+ IdWithChunk.encode(ref.getPeeledObjectId()));
+ }
+ return new RefData(e.asByteArray());
+ }
+
+ static RefData peeled(ObjectId targetId, ObjectId peeledId) {
+ int max = 8 + 2 * OBJECT_ID_STRING_LENGTH + 2 * ChunkKey.KEYLEN;
+ Encoder e = encode(max);
+ e.message(TAG_TARGET, IdWithChunk.encode(targetId));
+ e.bool(TAG_IS_PEELED, true);
+ if (peeledId != null)
+ e.message(TAG_PEELED, IdWithChunk.encode(peeledId));
+ return new RefData(e.asByteArray());
+ }
+
+ private final byte[] data;
+
+ RefData(byte[] data) {
+ this.data = data;
+ }
+
+ TinyProtobuf.Decoder decode() {
+ return TinyProtobuf.decode(data);
+ }
+
+ /** @return the contents, encoded as a byte array for storage. */
+ public byte[] asBytes() {
+ return data;
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 5381;
+ for (int ptr = 0; ptr < data.length; ptr++)
+ hash = ((hash << 5) + hash) + (data[ptr] & 0xff);
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof RefData)
+ return Arrays.equals(data, ((RefData) other).data);
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ TinyProtobuf.Decoder d = decode();
+ for (;;) {
+ switch (d.next()) {
+ case 0:
+ return b.toString().substring(1);
+ case TAG_SYMREF:
+ b.append("\nsymref: ").append(d.string());
+ continue;
+ case TAG_TARGET:
+ b.append("\ntarget: ").append(IdWithChunk.decode(d.message()));
+ continue;
+ case TAG_IS_PEELED:
+ b.append("\nis_peeled: ").append(d.bool());
+ continue;
+ case TAG_PEELED:
+ b.append("\npeeled: ").append(IdWithChunk.decode(d.message()));
+ continue;
+ default:
+ d.skip();
+ continue;
+ }
+ }
+ }
+
+ static class IdWithChunk extends ObjectId {
+ static ObjectId decode(TinyProtobuf.Decoder d) {
+ ObjectId id = null;
+ ChunkKey key = null;
+ DECODE: for (;;) {
+ switch (d.next()) {
+ case 0:
+ break DECODE;
+ case 1:
+ id = d.stringObjectId();
+ continue;
+ case 2:
+ key = ChunkKey.fromBytes(d);
+ continue;
+ default:
+ d.skip();
+ }
+ }
+ return key != null ? new IdWithChunk(id, key) : id;
+ }
+
+ static TinyProtobuf.Encoder encode(AnyObjectId id) {
+ if (id instanceof IdWithChunk) {
+ int max = 4 + OBJECT_ID_STRING_LENGTH + ChunkKey.KEYLEN;
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(max);
+ e.string(1, id);
+ e.string(2, ((IdWithChunk) id).chunkKey);
+ return e;
+ } else {
+ int max = 2 + OBJECT_ID_STRING_LENGTH;
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(max);
+ e.string(1, id);
+ return e;
+ }
+ }
+
+ private final ChunkKey chunkKey;
+
+ IdWithChunk(AnyObjectId id, ChunkKey key) {
+ super(id);
+ this.chunkKey = key;
+ }
+
+ ChunkKey getChunkKey() {
+ return chunkKey;
+ }
+
+ @Override
+ public String toString() {
+ return name() + "->" + chunkKey;
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java
new file mode 100644
index 0000000000..b4d378f81a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.encode;
+import static org.eclipse.jgit.storage.dht.KeyUtils.format32;
+import static org.eclipse.jgit.storage.dht.KeyUtils.parse32;
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+import org.eclipse.jgit.lib.Constants;
+
+/** Unique identifier of a reference in the DHT. */
+public final class RefKey implements RowKey {
+ /**
+ * @param repo
+ * @param name
+ * @return the key
+ */
+ public static RefKey create(RepositoryKey repo, String name) {
+ return new RefKey(repo.asInt(), name);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static RefKey fromBytes(byte[] key) {
+ int repo = parse32(key, 0);
+ String name = decode(key, 9, key.length);
+ return new RefKey(repo, name);
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static RefKey fromString(String key) {
+ int c = key.indexOf(':');
+ int repo = parse32(Constants.encodeASCII(key.substring(0, c)), 0);
+ String name = key.substring(c + 1);
+ return new RefKey(repo, name);
+ }
+
+ private final int repo;
+
+ private final String name;
+
+ RefKey(int repo, String name) {
+ this.repo = repo;
+ this.name = name;
+ }
+
+ /** @return the repository this reference lives within. */
+ public RepositoryKey getRepositoryKey() {
+ return RepositoryKey.fromInt(repo);
+ }
+
+ /** @return the name of the reference. */
+ public String getName() {
+ return name;
+ }
+
+ public byte[] asBytes() {
+ byte[] nameRaw = encode(name);
+ byte[] r = new byte[9 + nameRaw.length];
+ format32(r, 0, repo);
+ r[8] = ':';
+ System.arraycopy(nameRaw, 0, r, 9, nameRaw.length);
+ return r;
+ }
+
+ public String asString() {
+ return getRepositoryKey().asString() + ":" + name;
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof RefKey) {
+ RefKey thisRef = this;
+ RefKey otherRef = (RefKey) other;
+ return thisRef.repo == otherRef.repo
+ && thisRef.name.equals(otherRef.name);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "ref:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java
new file mode 100644
index 0000000000..2835d62507
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.storage.dht.KeyUtils.format32;
+import static org.eclipse.jgit.storage.dht.KeyUtils.parse32;
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+import org.eclipse.jgit.lib.Constants;
+
+/** */
+public final class RepositoryKey implements RowKey {
+ /**
+ * @param sequentialId
+ * @return the key
+ */
+ public static RepositoryKey create(int sequentialId) {
+ return new RepositoryKey(Integer.reverse(sequentialId));
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static RepositoryKey fromBytes(byte[] key) {
+ return new RepositoryKey(parse32(key, 0));
+ }
+
+ /**
+ * @param key
+ * @return the key
+ */
+ public static RepositoryKey fromString(String key) {
+ return new RepositoryKey(parse32(Constants.encodeASCII(key), 0));
+ }
+
+ /**
+ * @param reverseId
+ * @return the key
+ */
+ public static RepositoryKey fromInt(int reverseId) {
+ return new RepositoryKey(reverseId);
+ }
+
+ private final int id;
+
+ RepositoryKey(int id) {
+ this.id = id;
+ }
+
+ /** @return 32 bit value describing the repository. */
+ public int asInt() {
+ return id;
+ }
+
+ public byte[] asBytes() {
+ byte[] r = new byte[8];
+ format32(r, 0, asInt());
+ return r;
+ }
+
+ public String asString() {
+ return decode(asBytes());
+ }
+
+ @Override
+ public int hashCode() {
+ return id;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof RepositoryKey)
+ return id == ((RepositoryKey) other).id;
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "repository:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java
new file mode 100644
index 0000000000..18443fa8ea
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.encode;
+import static org.eclipse.jgit.util.RawParseUtils.decode;
+
+/** Unique name of a repository, as specified by the URL. */
+public class RepositoryName implements RowKey {
+ /**
+ * @param name
+ * @return the key
+ */
+ public static RepositoryName create(String name) {
+ return new RepositoryName(name);
+ }
+
+ /**
+ * @param name
+ * @return the key
+ */
+ public static RepositoryName fromBytes(byte[] name) {
+ return new RepositoryName(decode(name));
+ }
+
+ /**
+ * @param name
+ * @return the key
+ */
+ public static RepositoryName fromString(String name) {
+ return new RepositoryName(name);
+ }
+
+ private final String name;
+
+ RepositoryName(String name) {
+ this.name = name;
+ }
+
+ public byte[] asBytes() {
+ return encode(name);
+ }
+
+ public String asString() {
+ return name;
+ }
+
+ @Override
+ public int hashCode() {
+ return name.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof RepositoryName)
+ return name.equals(((RepositoryName) other).name);
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "repository:" + asString();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java
new file mode 100644
index 0000000000..8c14d30452
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.util.List;
+
+import org.eclipse.jgit.lib.ProgressMonitor;
+import org.eclipse.jgit.storage.pack.PackWriter;
+
+final class RepresentationSelector extends BatchObjectLookup<DhtObjectToPack> {
+ private final PackWriter packer;
+
+ private final DhtObjectRepresentation rep;
+
+ RepresentationSelector(PackWriter packer, DhtReader reader,
+ ProgressMonitor monitor) {
+ super(reader, monitor);
+ setRetryMissingObjects(true);
+
+ this.packer = packer;
+ this.rep = new DhtObjectRepresentation();
+ }
+
+ protected void onResult(DhtObjectToPack obj, List<ObjectInfo> info) {
+ // Go through the objects backwards. This is necessary because
+ // info is sorted oldest->newest but PackWriter wants the reverse
+ // order to try and prevent delta chain cycles.
+ //
+ for (int i = info.size() - 1; 0 <= i; i--) {
+ rep.set(info.get(i));
+ packer.select(obj, rep);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java
new file mode 100644
index 0000000000..e088b361c4
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+/**
+ * Key for any row that the DHT will be asked to store.
+ * <p>
+ * Implementations of this interface know how to encode and decode themselves
+ * from a byte array format, expecting the DHT to use the byte array as the row
+ * key within the database.
+ * <p>
+ * It is strongly encouraged to use only row keys that are valid UTF-8 strings,
+ * as most DHT systems have client tools that can interact with rows using the
+ * UTF-8 encoding.
+ */
+public interface RowKey {
+ /** @return key formatted as byte array for storage in the DHT. */
+ public byte[] asBytes();
+
+ /** @return key formatted as a String for storage in the DHT. */
+ public String asString();
+
+ /** @return relatively unique hash code value for in-memory compares. */
+ public int hashCode();
+
+ /**
+ * Compare this key to another key for equality.
+ *
+ * @param other
+ * the other key instance, may be null.
+ * @return true if these keys reference the same row.
+ */
+ public boolean equals(Object other);
+
+ /** @return pretty printable string for debugging/reporting only. */
+ public String toString();
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java
new file mode 100644
index 0000000000..3069886283
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.errors.MissingObjectException;
+import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
+import org.eclipse.jgit.lib.ObjectId;
+
+final class SizeQueue<T extends ObjectId> extends QueueObjectLookup<T>
+ implements AsyncObjectSizeQueue<T> {
+ private ObjectWithInfo<T> currResult;
+
+ SizeQueue(DhtReader reader, Iterable<T> objectIds, boolean reportMissing) {
+ super(reader, reportMissing);
+ init(objectIds);
+ }
+
+ public boolean next() throws MissingObjectException, IOException {
+ currResult = nextObjectWithInfo();
+ return currResult != null;
+ }
+
+ public T getCurrent() {
+ return currResult.object;
+ }
+
+ public long getSize() {
+ return currResult.info.getSize();
+ }
+
+ public ObjectId getObjectId() {
+ return getCurrent();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java
new file mode 100644
index 0000000000..9ec379f0ec
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+/**
+ * Extension of {@link AsyncCallback} supporting partial results.
+ * <p>
+ * Instead of buffering all results for {@link #onSuccess(Object)}, the storage
+ * provider may choose to offer some results earlier by invoking the
+ * {@link #onPartialResult(Object)} method declared in this interface.
+ * <p>
+ * If any results were delivered early to {@link #onPartialResult(Object)} then
+ * {@link #onSuccess(Object)} is invoked with {@code null} when all results have
+ * been supplied and no more remain to be delivered.
+ * <p>
+ * If an error occurs, {@link #onFailure(DhtException)} will be invoked,
+ * potentially after one or more {@link #onPartialResult(Object)} notifications
+ * were already made. In an error condition, {@link #onSuccess(Object)} will not
+ * be invoked.
+ *
+ * @param <T>
+ * type of object returned from the operation on success.
+ */
+public interface StreamingCallback<T> extends AsyncCallback<T> {
+ /**
+ * Receives partial results from the operation.
+ *
+ * @param result
+ * the result value from the operation.
+ */
+ public void onPartialResult(T result);
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java
new file mode 100644
index 0000000000..4833375e46
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Helper to implement a synchronous method in terms of an asynchronous one.
+ * <p>
+ * Implementors can use this type to wait for an asynchronous computation to
+ * complete on a background thread by passing the Sync instance as though it
+ * were the AsyncCallback:
+ *
+ * <pre>
+ * Sync&lt;T&gt; sync = Sync.create();
+ * async(..., sync);
+ * return sync.get(timeout, TimeUnit.MILLISECONDS);
+ * </pre>
+ *
+ * @param <T>
+ * type of value object.
+ */
+public abstract class Sync<T> implements AsyncCallback<T> {
+ private static final Sync<?> NONE = new Sync<Object>() {
+ public void onSuccess(Object result) {
+ // Discard
+ }
+
+ public void onFailure(DhtException error) {
+ // Discard
+ }
+
+ @Override
+ public Object get(long timeout, TimeUnit unit) throws DhtException,
+ InterruptedException, TimeoutException {
+ return null;
+ }
+ };
+
+ /**
+ * Helper method to create a new sync object.
+ *
+ * @param <T>
+ * type of value object.
+ * @return a new instance.
+ */
+ public static <T> Sync<T> create() {
+ return new Value<T>();
+ }
+
+ /**
+ * Singleton callback that ignores onSuccess, onFailure.
+ *
+ * @param <T>
+ * type of value object.
+ * @return callback that discards all results.
+ */
+ @SuppressWarnings("unchecked")
+ public static <T> Sync<T> none() {
+ return (Sync<T>) NONE;
+ }
+
+ /**
+ * Wait for the asynchronous operation to complete.
+ * <p>
+ * To prevent application deadlock, waiting can only be performed with the
+ * supplied timeout.
+ *
+ * @param timeout
+ * amount of time to wait before failing.
+ * @return the returned value.
+ * @throws DhtException
+ * the asynchronous operation failed.
+ * @throws InterruptedException
+ * the current thread was interrupted before the operation
+ * completed.
+ * @throws TimeoutException
+ * the timeout elapsed before the operation completed.
+ */
+ public T get(Timeout timeout) throws DhtException, InterruptedException,
+ TimeoutException {
+ return get(timeout.getTime(), timeout.getUnit());
+ }
+
+ /**
+ * Wait for the asynchronous operation to complete.
+ * <p>
+ * To prevent application deadlock, waiting can only be performed with the
+ * supplied timeout.
+ *
+ * @param timeout
+ * amount of time to wait before failing.
+ * @param unit
+ * units of {@code timeout}. For example
+ * {@link TimeUnit#MILLISECONDS}.
+ * @return the returned value.
+ * @throws DhtException
+ * the asynchronous operation failed.
+ * @throws InterruptedException
+ * the current thread was interrupted before the operation
+ * completed.
+ * @throws TimeoutException
+ * the timeout elapsed before the operation completed.
+ */
+ public abstract T get(long timeout, TimeUnit unit) throws DhtException,
+ InterruptedException, TimeoutException;
+
+ private static class Value<T> extends Sync<T> {
+
+ private final CountDownLatch wait = new CountDownLatch(1);
+
+ private T data;
+
+ private DhtException error;
+
+ /**
+ * Wait for the asynchronous operation to complete.
+ * <p>
+ * To prevent application deadlock, waiting can only be performed with
+ * the supplied timeout.
+ *
+ * @param timeout
+ * amount of time to wait before failing.
+ * @param unit
+ * units of {@code timeout}. For example
+ * {@link TimeUnit#MILLISECONDS}.
+ * @return the returned value.
+ * @throws DhtException
+ * the asynchronous operation failed.
+ * @throws InterruptedException
+ * the current thread was interrupted before the operation
+ * completed.
+ * @throws TimeoutException
+ * the timeout elapsed before the operation completed.
+ */
+ public T get(long timeout, TimeUnit unit) throws DhtException,
+ InterruptedException, TimeoutException {
+ if (wait.await(timeout, unit)) {
+ if (error != null)
+ throw error;
+ return data;
+ }
+ throw new TimeoutException();
+ }
+
+ public void onSuccess(T obj) {
+ data = obj;
+ wait.countDown();
+ }
+
+ public void onFailure(DhtException err) {
+ error = err;
+ wait.countDown();
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java
new file mode 100644
index 0000000000..2e4f3a4cc9
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import java.text.MessageFormat;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.util.StringUtils;
+
+/** Length of time to wait for an operation before giving up. */
+public class Timeout {
+ /**
+ * Construct a new timeout, expressed in milliseconds.
+ *
+ * @param millis
+ * number of milliseconds to wait.
+ * @return the timeout.
+ */
+ public static Timeout milliseconds(int millis) {
+ return new Timeout(millis, TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Construct a new timeout, expressed in seconds.
+ *
+ * @param sec
+ * number of seconds to wait.
+ * @return the timeout.
+ */
+ public static Timeout seconds(int sec) {
+ return new Timeout(sec, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Construct a new timeout, expressed in (possibly fractional) seconds.
+ *
+ * @param sec
+ * number of seconds to wait.
+ * @return the timeout.
+ */
+ public static Timeout seconds(double sec) {
+ return new Timeout((long) (sec * 1000), TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Obtain a timeout from the configuration.
+ *
+ * @param cfg
+ * configuration to read.
+ * @param section
+ * section key to read.
+ * @param subsection
+ * subsection to read, may be null.
+ * @param name
+ * variable to read.
+ * @param defaultValue
+ * default to return if no timeout is specified in the
+ * configuration.
+ * @return the configured timeout.
+ */
+ public static Timeout getTimeout(Config cfg, String section,
+ String subsection, String name, Timeout defaultValue) {
+ String valStr = cfg.getString(section, subsection, name);
+ if (valStr == null)
+ return defaultValue;
+
+ valStr = valStr.trim();
+ if (valStr.length() == 0)
+ return defaultValue;
+
+ Matcher m = matcher("^([1-9][0-9]*(?:\\.[0-9]*)?)\\s*(.*)$", valStr);
+ if (!m.matches())
+ throw notTimeUnit(section, subsection, name, valStr);
+
+ String digits = m.group(1);
+ String unitName = m.group(2).trim();
+
+ long multiplier;
+ TimeUnit unit;
+ if ("".equals(unitName)) {
+ multiplier = 1;
+ unit = TimeUnit.MILLISECONDS;
+
+ } else if (anyOf(unitName, "ms", "millisecond", "milliseconds")) {
+ multiplier = 1;
+ unit = TimeUnit.MILLISECONDS;
+
+ } else if (anyOf(unitName, "s", "sec", "second", "seconds")) {
+ multiplier = 1;
+ unit = TimeUnit.SECONDS;
+
+ } else if (anyOf(unitName, "m", "min", "minute", "minutes")) {
+ multiplier = 60;
+ unit = TimeUnit.SECONDS;
+
+ } else if (anyOf(unitName, "h", "hr", "hour", "hours")) {
+ multiplier = 3600;
+ unit = TimeUnit.SECONDS;
+
+ } else
+ throw notTimeUnit(section, subsection, name, valStr);
+
+ if (digits.indexOf('.') == -1) {
+ try {
+ return new Timeout(multiplier * Long.parseLong(digits), unit);
+ } catch (NumberFormatException nfe) {
+ throw notTimeUnit(section, subsection, name, valStr);
+ }
+ } else {
+ double inputTime;
+ try {
+ inputTime = multiplier * Double.parseDouble(digits);
+ } catch (NumberFormatException nfe) {
+ throw notTimeUnit(section, subsection, name, valStr);
+ }
+
+ if (unit == TimeUnit.MILLISECONDS) {
+ TimeUnit newUnit = TimeUnit.NANOSECONDS;
+ long t = (long) (inputTime * newUnit.convert(1, unit));
+ return new Timeout(t, newUnit);
+
+ } else if (unit == TimeUnit.SECONDS && multiplier == 1) {
+ TimeUnit newUnit = TimeUnit.MILLISECONDS;
+ long t = (long) (inputTime * newUnit.convert(1, unit));
+ return new Timeout(t, newUnit);
+
+ } else {
+ return new Timeout((long) inputTime, unit);
+ }
+ }
+ }
+
+ private static Matcher matcher(String pattern, String valStr) {
+ return Pattern.compile(pattern).matcher(valStr);
+ }
+
+ private static boolean anyOf(String a, String... cases) {
+ for (String b : cases) {
+ if (StringUtils.equalsIgnoreCase(a, b))
+ return true;
+ }
+ return false;
+ }
+
+ private static IllegalArgumentException notTimeUnit(String section,
+ String subsection, String name, String valueString) {
+ String key = section
+ + (subsection != null ? "." + subsection : "")
+ + "." + name;
+ return new IllegalArgumentException(MessageFormat.format(
+ DhtText.get().notTimeUnit, key, valueString));
+ }
+
+ private final long time;
+
+ private final TimeUnit unit;
+
+ /**
+ * Construct a new timeout.
+ *
+ * @param time
+ * how long to wait.
+ * @param unit
+ * the unit that {@code time} was expressed in.
+ */
+ public Timeout(long time, TimeUnit unit) {
+ this.time = time;
+ this.unit = unit;
+ }
+
+ /** @return how long to wait, expressed as {@link #getUnit()}s. */
+ public long getTime() {
+ return time;
+ }
+
+ /** @return the unit of measure for {@link #getTime()}. */
+ public TimeUnit getUnit() {
+ return unit;
+ }
+
+ @Override
+ public int hashCode() {
+ return (int) time;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other instanceof Timeout)
+ return getTime() == ((Timeout) other).getTime()
+ && getUnit().equals(((Timeout) other).getUnit());
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return getTime() + " " + getUnit();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/TinyProtobuf.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/TinyProtobuf.java
new file mode 100644
index 0000000000..dcf3dfb172
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/TinyProtobuf.java
@@ -0,0 +1,755 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht;
+
+import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
+import static org.eclipse.jgit.lib.Constants.OBJECT_ID_STRING_LENGTH;
+
+import java.nio.ByteBuffer;
+import java.text.MessageFormat;
+
+import org.eclipse.jgit.lib.AnyObjectId;
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.util.RawParseUtils;
+
+/**
+ * A tiny implementation of a subset of the Google Protocol Buffers format.
+ * <p>
+ * For more information on the network format, see the canonical documentation
+ * at <a href="http://code.google.com/p/protobuf/">Google Protocol Buffers</a>.
+ */
+public class TinyProtobuf {
+ private static final int WIRE_VARINT = 0;
+
+ private static final int WIRE_FIXED_64 = 1;
+
+ private static final int WIRE_LEN_DELIM = 2;
+
+ private static final int WIRE_FIXED_32 = 5;
+
+ /**
+ * Create a new encoder.
+ *
+ * @param estimatedSize
+ * estimated size of the message. If the size is accurate,
+ * copying of the result can be avoided during
+ * {@link Encoder#asByteArray()}. If the size is too small, the
+ * buffer will grow dynamically.
+ * @return a new encoder.
+ */
+ public static Encoder encode(int estimatedSize) {
+ return new Encoder(new byte[estimatedSize]);
+ }
+
+ /**
+ * Create an encoder that estimates size.
+ *
+ * @return a new encoder.
+ */
+ public static Encoder size() {
+ return new Encoder(null);
+ }
+
+ /**
+ * Decode a buffer.
+ *
+ * @param buf
+ * the buffer to read.
+ * @return a new decoder.
+ */
+ public static Decoder decode(byte[] buf) {
+ return decode(buf, 0, buf.length);
+ }
+
+ /**
+ * Decode a buffer.
+ *
+ * @param buf
+ * the buffer to read.
+ * @param off
+ * offset to begin reading from {@code buf}.
+ * @param len
+ * total number of bytes to read from {@code buf}.
+ * @return a new decoder.
+ */
+ public static Decoder decode(byte[] buf, int off, int len) {
+ return new Decoder(buf, off, len);
+ }
+
+ /** An enumerated value that encodes/decodes as int32. */
+ public static interface Enum {
+ /** @return the wire value. */
+ public int value();
+ }
+
+ /** Decode fields from a binary protocol buffer. */
+ public static class Decoder {
+ private final byte[] buf;
+
+ private final int end;
+
+ private int ptr;
+
+ private int field;
+
+ private int type;
+
+ private int length;
+
+ private Decoder(byte[] buf, int off, int len) {
+ this.buf = buf;
+ this.ptr = off;
+ this.end = off + len;
+ }
+
+ /** @return get the field tag number, 0 on end of buffer. */
+ public int next() {
+ if (ptr == end)
+ return 0;
+
+ int fieldAndType = varint32();
+ field = fieldAndType >>> 3;
+ type = fieldAndType & 7;
+ return field;
+ }
+
+ /** Skip the current field's value. */
+ public void skip() {
+ switch (type) {
+ case WIRE_VARINT:
+ varint64();
+ break;
+ case WIRE_FIXED_64:
+ ptr += 8;
+ break;
+ case WIRE_LEN_DELIM:
+ ptr += varint32();
+ break;
+ case WIRE_FIXED_32:
+ ptr += 4;
+ break;
+ default:
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufUnsupportedFieldType, Integer
+ .valueOf(type)));
+ }
+ }
+
+ /** @return decode the current field as an int32. */
+ public int int32() {
+ checkFieldType(WIRE_VARINT);
+ return varint32();
+ }
+
+ /** @return decode the current field as an int64. */
+ public long int64() {
+ checkFieldType(WIRE_VARINT);
+ return varint64();
+ }
+
+ /**
+ * @param <T>
+ * the type of enumeration.
+ * @param all
+ * all of the supported values.
+ * @return decode the current field as an enumerated value.
+ */
+ public <T extends Enum> T intEnum(T[] all) {
+ checkFieldType(WIRE_VARINT);
+ int value = varint32();
+ for (T t : all) {
+ if (t.value() == value)
+ return t;
+ }
+ throw new IllegalStateException(MessageFormat.format(
+ DhtText.get().protobufWrongFieldType, Integer
+ .valueOf(field), Integer.valueOf(type), all[0]
+ .getClass().getSimpleName()));
+ }
+
+ /** @return decode the current field as a bool. */
+ public boolean bool() {
+ checkFieldType(WIRE_VARINT);
+ int val = varint32();
+ switch (val) {
+ case 0:
+ return false;
+ case 1:
+ return true;
+ default:
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufNotBooleanValue, Integer.valueOf(field),
+ Integer.valueOf(val)));
+ }
+ }
+
+ /** @return decode a fixed 64 bit value. */
+ public long fixed64() {
+ checkFieldType(WIRE_FIXED_64);
+ long val = buf[ptr + 0] & 0xff;
+ val |= ((long) (buf[ptr + 1] & 0xff)) << (1 * 8);
+ val |= ((long) (buf[ptr + 2] & 0xff)) << (2 * 8);
+ val |= ((long) (buf[ptr + 3] & 0xff)) << (3 * 8);
+ val |= ((long) (buf[ptr + 4] & 0xff)) << (4 * 8);
+ val |= ((long) (buf[ptr + 5] & 0xff)) << (5 * 8);
+ val |= ((long) (buf[ptr + 6] & 0xff)) << (6 * 8);
+ val |= ((long) (buf[ptr + 7] & 0xff)) << (7 * 8);
+ ptr += 8;
+ return val;
+ }
+
+ /** @return decode the current field as a string. */
+ public String string() {
+ checkFieldType(WIRE_LEN_DELIM);
+ int len = varint32();
+ String s = RawParseUtils.decode(buf, ptr, ptr + len);
+ ptr += len;
+ return s;
+ }
+
+ /** @return decode the current hex string to an ObjectId. */
+ public ObjectId stringObjectId() {
+ checkFieldType(WIRE_LEN_DELIM);
+ int len = varint32();
+ if (len != OBJECT_ID_STRING_LENGTH)
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufWrongFieldLength,
+ Integer.valueOf(field), Integer
+ .valueOf(OBJECT_ID_STRING_LENGTH), Integer
+ .valueOf(len)));
+
+ ObjectId id = ObjectId.fromString(buf, ptr);
+ ptr += OBJECT_ID_STRING_LENGTH;
+ return id;
+ }
+
+ /** @return decode a string from 8 hex digits. */
+ public int stringHex32() {
+ checkFieldType(WIRE_LEN_DELIM);
+ int len = varint32();
+ if (len != 8)
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufWrongFieldLength,
+ Integer.valueOf(field), Integer.valueOf(8), Integer
+ .valueOf(len)));
+ int val = KeyUtils.parse32(buf, ptr);
+ ptr += 8;
+ return val;
+ }
+
+ /** @return decode the current field as an array of bytes. */
+ public byte[] bytes() {
+ checkFieldType(WIRE_LEN_DELIM);
+ byte[] r = new byte[varint32()];
+ System.arraycopy(buf, ptr, r, 0, r.length);
+ ptr += r.length;
+ return r;
+ }
+
+ /** @return backing array of the current field. */
+ public byte[] bytesArray() {
+ return buf;
+ }
+
+ /** @return length of field, call before {@link #bytesOffset}. */
+ public int bytesLength() {
+ checkFieldType(WIRE_LEN_DELIM);
+ length = varint32();
+ return length;
+ }
+
+ /** @return starting offset of the field, after {@link #bytesLength()}. */
+ public int bytesOffset() {
+ int start = ptr;
+ ptr += length;
+ return start;
+ }
+
+ /** @return decode the current raw bytes to an ObjectId. */
+ public ObjectId bytesObjectId() {
+ checkFieldType(WIRE_LEN_DELIM);
+ int len = varint32();
+ if (len != OBJECT_ID_LENGTH)
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufWrongFieldLength,
+ Integer.valueOf(field), Integer
+ .valueOf(OBJECT_ID_LENGTH), Integer
+ .valueOf(len)));
+
+ ObjectId id = ObjectId.fromRaw(buf, ptr);
+ ptr += OBJECT_ID_LENGTH;
+ return id;
+ }
+
+ /** @return decode the current field as a nested message. */
+ public Decoder message() {
+ checkFieldType(WIRE_LEN_DELIM);
+ int len = varint32();
+ Decoder msg = decode(buf, ptr, len);
+ ptr += len;
+ return msg;
+ }
+
+ private int varint32() {
+ long v = varint64();
+ if (Integer.MAX_VALUE < v)
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufWrongFieldType, Integer.valueOf(field),
+ "int64", "int32"));
+ return (int) v;
+ }
+
+ private long varint64() {
+ int c = buf[ptr++];
+ long r = c & 0x7f;
+ int shift = 7;
+ while ((c & 0x80) != 0) {
+ c = buf[ptr++];
+ r |= ((long) (c & 0x7f)) << shift;
+ shift += 7;
+ }
+ return r;
+ }
+
+ private void checkFieldType(int expected) {
+ if (type != expected)
+ throw new IllegalStateException(MessageFormat.format(DhtText
+ .get().protobufWrongFieldType, Integer.valueOf(field),
+ Integer.valueOf(type), Integer.valueOf(expected)));
+ }
+ }
+
+ /** Encode values into a binary protocol buffer. */
+ public static class Encoder {
+ private byte[] buf;
+
+ private int ptr;
+
+ private Encoder(byte[] buf) {
+ this.buf = buf;
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store. Must be >= 0.
+ */
+ public void int32(int field, int value) {
+ int64(field, value);
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; omitted if 0.
+ */
+ public void int32IfNotZero(int field, int value) {
+ int64IfNotZero(field, value);
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; omitted if negative.
+ */
+ public void int32IfNotNegative(int field, int value) {
+ int64IfNotNegative(field, value);
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store. Must be >= 0.
+ */
+ public void int64(int field, long value) {
+ if (value < 0)
+ throw new IllegalArgumentException(
+ DhtText.get().protobufNegativeValuesNotSupported);
+
+ field(field, WIRE_VARINT);
+ varint(value);
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; omitted if 0.
+ */
+ public void int64IfNotZero(int field, long value) {
+ if (0 != value)
+ int64(field, value);
+ }
+
+ /**
+ * Encode a variable length positive integer.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; omitted if negative.
+ */
+ public void int64IfNotNegative(int field, long value) {
+ if (0 <= value)
+ int64(field, value);
+ }
+
+ /**
+ * Encode an enumerated value.
+ *
+ * @param <T>
+ * type of the enumerated values.
+ * @param field
+ * field tag number.
+ * @param value
+ * value to store; if null the field is omitted.
+ */
+ public <T extends Enum> void intEnum(int field, T value) {
+ if (value != null) {
+ field(field, WIRE_VARINT);
+ varint(value.value());
+ }
+ }
+
+ /**
+ * Encode a boolean value.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store.
+ */
+ public void bool(int field, boolean value) {
+ field(field, WIRE_VARINT);
+ varint(value ? 1 : 0);
+ }
+
+ /**
+ * Encode a boolean value, only if true.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store.
+ */
+ public void boolIfTrue(int field, boolean value) {
+ if (value)
+ bool(field, value);
+ }
+
+ /**
+ * Encode a fixed 64 value.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store.
+ */
+ public void fixed64(int field, long value) {
+ field(field, WIRE_FIXED_64);
+ if (buf != null) {
+ ensureSpace(8);
+
+ buf[ptr + 0] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 1] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 3] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 3] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 4] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 5] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 6] = (byte) value;
+ value >>>= 8;
+
+ buf[ptr + 7] = (byte) value;
+ }
+ ptr += 8;
+ }
+
+ /**
+ * Encode a length delimited bytes field.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; if null the field is omitted.
+ */
+ public void bytes(int field, byte[] value) {
+ if (value != null)
+ bytes(field, value, 0, value.length);
+ }
+
+ /**
+ * Encode a length delimited bytes field.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; if null the field is omitted.
+ */
+ public void bytes(int field, ByteBuffer value) {
+ if (value != null) {
+ if (!value.hasArray())
+ throw new IllegalArgumentException(DhtText.get().protobufNoArray);
+ byte[] valBuf = value.array();
+ int valPtr = value.arrayOffset() + value.position();
+ int valLen = value.limit() - value.position();
+ bytes(field, valBuf, valPtr, valLen);
+ }
+ }
+
+ /**
+ * Encode a length delimited bytes field.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; if null the field is omitted.
+ * @param off
+ * position to copy from.
+ * @param len
+ * number of bytes to copy.
+ */
+ public void bytes(int field, byte[] value, int off, int len) {
+ if (value != null) {
+ field(field, WIRE_LEN_DELIM);
+ varint(len);
+ copy(value, off, len);
+ }
+ }
+
+ /**
+ * Encode an ObjectId as a bytes (in raw binary format).
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store, as a raw binary; if null the field is
+ * omitted.
+ */
+ public void bytes(int field, AnyObjectId value) {
+ if (value != null) {
+ field(field, WIRE_LEN_DELIM);
+ varint(OBJECT_ID_LENGTH);
+ if (buf != null) {
+ ensureSpace(OBJECT_ID_LENGTH);
+ value.copyRawTo(buf, ptr);
+ }
+ ptr += OBJECT_ID_LENGTH;
+ }
+ }
+
+ /**
+ * Encode an ObjectId as a string (in hex format).
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store, as a hex string; if null the field is
+ * omitted.
+ */
+ public void string(int field, AnyObjectId value) {
+ if (value != null) {
+ field(field, WIRE_LEN_DELIM);
+ varint(OBJECT_ID_STRING_LENGTH);
+ if (buf != null) {
+ ensureSpace(OBJECT_ID_STRING_LENGTH);
+ value.copyTo(buf, ptr);
+ }
+ ptr += OBJECT_ID_STRING_LENGTH;
+ }
+ }
+
+ /**
+ * Encode a plain Java string.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * the value to store; if null the field is omitted.
+ */
+ public void string(int field, String value) {
+ if (value != null)
+ bytes(field, Constants.encode(value));
+ }
+
+ /**
+ * Encode a row key as a string.
+ *
+ * @param field
+ * field tag number.
+ * @param key
+ * the row key to store as a string; if null the field is
+ * omitted.
+ */
+ public void string(int field, RowKey key) {
+ if (key != null)
+ bytes(field, key.asBytes());
+ }
+
+ /**
+ * Encode an integer as an 8 byte hex string.
+ *
+ * @param field
+ * field tag number.
+ * @param value
+ * value to encode.
+ */
+ public void stringHex32(int field, int value) {
+ field(field, WIRE_LEN_DELIM);
+ varint(8);
+ if (buf != null) {
+ ensureSpace(8);
+ KeyUtils.format32(buf, ptr, value);
+ }
+ ptr += 8;
+ }
+
+ /**
+ * Encode a nested message.
+ *
+ * @param field
+ * field tag number.
+ * @param msg
+ * message to store; if null or empty the field is omitted.
+ */
+ public void message(int field, Encoder msg) {
+ if (msg != null && msg.ptr > 0)
+ bytes(field, msg.buf, 0, msg.ptr);
+ }
+
+ private void field(int field, int type) {
+ varint((field << 3) | type);
+ }
+
+ private void varint(long value) {
+ if (buf != null) {
+ if (buf.length - ptr < 10)
+ ensureSpace(varintSize(value));
+
+ do {
+ byte b = (byte) (value & 0x7f);
+ value >>>= 7;
+ if (value != 0)
+ b |= 0x80;
+ buf[ptr++] = b;
+ } while (value != 0);
+ } else {
+ ptr += varintSize(value);
+ }
+ }
+
+ private static int varintSize(long value) {
+ value >>>= 7;
+ int need = 1;
+ for (; value != 0; value >>>= 7)
+ need++;
+ return need;
+ }
+
+ private void copy(byte[] src, int off, int cnt) {
+ if (buf != null) {
+ ensureSpace(cnt);
+ System.arraycopy(src, off, buf, ptr, cnt);
+ }
+ ptr += cnt;
+ }
+
+ private void ensureSpace(int need) {
+ if (buf.length - ptr < need) {
+ byte[] n = new byte[Math.max(ptr + need, buf.length * 2)];
+ System.arraycopy(buf, 0, n, 0, ptr);
+ buf = n;
+ }
+ }
+
+ /** @return size of the protocol buffer message, in bytes. */
+ public int size() {
+ return ptr;
+ }
+
+ /** @return the current buffer, as a byte array. */
+ public byte[] asByteArray() {
+ if (ptr == buf.length)
+ return buf;
+ byte[] r = new byte[ptr];
+ System.arraycopy(buf, 0, r, 0, ptr);
+ return r;
+ }
+
+ /** @return the current buffer. */
+ public ByteBuffer asByteBuffer() {
+ return ByteBuffer.wrap(buf, 0, ptr);
+ }
+ }
+
+ private TinyProtobuf() {
+ // Don't make instances.
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java
new file mode 100644
index 0000000000..d5c5cc9ff7
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import java.util.Collection;
+import java.util.Set;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.ChunkMeta;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.PackChunk;
+import org.eclipse.jgit.storage.dht.StreamingCallback;
+
+/**
+ * Stores object data in compressed pack format.
+ * <p>
+ * Each chunk stores multiple objects, using the highly compressed and Git
+ * native pack file format. Chunks are sized during insertion, but average
+ * around 1 MB for historical chunks, and may be as small as a few KB for very
+ * recent chunks that were written in small bursts.
+ * <p>
+ * Objects whose compressed form is too large to fit into a single chunk are
+ * fragmented across multiple chunks, and the fragment information is used to
+ * put them back together in the correct order. Since the fragmenting occurs
+ * after data compression, random access to bytes of the large object is not
+ * currently possible.
+ * <p>
+ * Chunk keys are very well distributed, by embedding a uniformly random number
+ * at the start of the key, and also including a small time component. This
+ * layout permits chunks to be evenly spread across a cluster of disks or
+ * servers in a round-robin fashion (based on a hash of the leading bytes), but
+ * also offers some chance for older chunks to be located near each other and
+ * have that part of the storage system see less activity over time.
+ */
+public interface ChunkTable {
+ /**
+ * Asynchronously load one or more chunks
+ * <p>
+ * Callers are responsible for breaking up very large collections of chunk
+ * keys into smaller units, based on the reader's batch size option. Since
+ * chunks typically 1 MB each, 10-20 keys is a reasonable batch size, but
+ * depends on available JVM memory and performance of this method obtaining
+ * chunks from the database.
+ *
+ * @param options
+ * options to control reading.
+ * @param keys
+ * the chunk keys to obtain.
+ * @param callback
+ * receives the results when ready. If this is an instance of
+ * {@link StreamingCallback}, implementors should try to deliver
+ * results early.
+ */
+ public void get(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<PackChunk.Members>> callback);
+
+ /**
+ * Asynchronously load one or more chunk meta fields.
+ * <p>
+ * Usually meta is loaded by {@link #get(Context, Set, AsyncCallback)}, but
+ * some uses may require looking up the fragment data without having the
+ * entire chunk.
+ *
+ * @param options
+ * options to control reading.
+ * @param keys
+ * the chunk keys to obtain.
+ * @param callback
+ * receives the results when ready. If this is an instance of
+ * {@link StreamingCallback}, implementors should try to deliver
+ * results early.
+ */
+ public void getMeta(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<ChunkMeta>> callback);
+
+ /**
+ * Put some (or all) of a single chunk.
+ * <p>
+ * The higher level storage layer typically stores chunks in pieces. Its
+ * common to first store the data, then much later store the fragments and
+ * index. Sometimes all of the members are ready at once, and can be put
+ * together as a single unit. This method handles both approaches to storing
+ * a chunk.
+ * <p>
+ * Implementors must use a partial writing approach, for example:
+ *
+ * <pre>
+ * ColumnUpdateList list = ...;
+ * if (chunk.getChunkData() != null)
+ * list.addColumn(&quot;chunk_data&quot;, chunk.getChunkData());
+ * if (chunk.getChunkIndex() != null)
+ * list.addColumn(&quot;chunk_index&quot;, chunk.getChunkIndex());
+ * if (chunk.getFragments() != null)
+ * list.addColumn(&quot;fragments&quot;, chunk.getFragments());
+ * createOrUpdateRow(chunk.getChunkKey(), list);
+ * </pre>
+ *
+ * @param chunk
+ * description of the chunk to be stored.
+ * @param buffer
+ * buffer to enqueue the put onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void put(PackChunk.Members chunk, WriteBuffer buffer)
+ throws DhtException;
+
+ /**
+ * Completely remove a chunk and all of its data elements.
+ * <p>
+ * Chunk removal should occur as quickly as possible after the flush has
+ * completed, as the caller has already ensured the chunk is not in use.
+ *
+ * @param key
+ * key of the chunk to remove.
+ * @param buffer
+ * buffer to enqueue the remove onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java
new file mode 100644
index 0000000000..b0e7ff4874
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+/**
+ * Options used when accessing a {@link Database}.
+ * <p>
+ * <i>Warning:</i> This type may change from enumeration to class in the future.
+ */
+public enum Context {
+ /** Perform a fast read, but may miss results. */
+ FAST_MISSING_OK,
+
+ /** Read from a local replica. */
+ LOCAL,
+
+ /** Repair the local replica if a read failed. */
+ READ_REPAIR;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java
new file mode 100644
index 0000000000..fbad5d80e8
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+/**
+ * A distributed database implementation.
+ * <p>
+ * A DHT provider must implement this interface to return table references for
+ * each of the named tables. The database and the tables it returns are held as
+ * singletons, and thus must be thread-safe. If the underlying implementation
+ * needs to use individual "connections" for each operation, it is responsible
+ * for setting up a connection pool, borrowing and returning resources within
+ * each of the table APIs.
+ * <p>
+ * Most APIs on the tables are asynchronous and must perform their computation
+ * in the background using a different thread than the caller. Implementations
+ * that have only an underlying synchronous API should configure and use an
+ * {@link java.util.concurrent.ExecutorService} to perform computation in the
+ * background on a thread pool.
+ * <p>
+ * Tables returned by these methods should be singletons, as the higher level
+ * DHT implementation usually invokes these methods each time it needs to use a
+ * given table. The suggested implementation approach is:
+ *
+ * <pre>
+ * class MyDatabase implements Database {
+ * private final RepositoryIndexTable rep = new MyRepositoryIndex();
+ *
+ * private final RefTable ref = new MyRefTable();
+ *
+ * public RepositoryIndexTable repositoryIndex() {
+ * return rep;
+ * }
+ *
+ * public RefTable ref() {
+ * return ref;
+ * }
+ * }
+ * </pre>
+ */
+public interface Database {
+ /** @return a handle to the table listing known repositories. */
+ public RepositoryIndexTable repositoryIndex();
+
+ /** @return a handle to the table storing repository metadata. */
+ public RepositoryTable repository();
+
+ /** @return a handle to the table listing references in a repository. */
+ public RefTable ref();
+
+ /** @return a handle to the table listing known objects. */
+ public ObjectIndexTable objectIndex();
+
+ /** @return a handle to the table listing pack data chunks. */
+ public ChunkTable chunk();
+
+ /**
+ * Create a new WriteBuffer for the current thread.
+ * <p>
+ * Unlike other methods on this interface, the returned buffer <b>must</b>
+ * be a new object on every invocation. Buffers do not need to be
+ * thread-safe.
+ *
+ * @return a new buffer to handle pending writes.
+ */
+ public WriteBuffer newWriteBuffer();
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java
new file mode 100644
index 0000000000..9245815f69
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.ObjectIndexKey;
+import org.eclipse.jgit.storage.dht.ObjectInfo;
+
+/**
+ * Associates an {@link ObjectId} to the {@link ChunkKey} its stored in.
+ * <p>
+ * This table provides a global index listing every single object within the
+ * repository, and which chunks the object can be found it. Readers use this
+ * table to find an object when they are forced to start from a bare SHA-1 that
+ * was input by a user, or supplied over the network from a client.
+ */
+public interface ObjectIndexTable {
+ /**
+ * Asynchronously locate one or more objects in the repository.
+ * <p>
+ * Callers are responsible for breaking up very large collections of objects
+ * into smaller units, based on the reader's batch size option. 1,000 to
+ * 10,000 is a reasonable range for the reader to batch on.
+ *
+ * @param options
+ * options to control reading.
+ * @param objects
+ * set of object names to locate the chunks of.
+ * @param callback
+ * receives the results when ready.
+ */
+ public void get(Context options, Set<ObjectIndexKey> objects,
+ AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback);
+
+ /**
+ * Record the fact that {@code objId} can be found by {@code info}.
+ * <p>
+ * If there is already data for {@code objId} in the table, this method
+ * should add the new chunk onto the existing data list.
+ * <p>
+ * This method should use batched asynchronous puts as much as possible.
+ * Initial imports of an existing repository may require millions of add
+ * operations to this table, one for each object being imported.
+ *
+ * @param objId
+ * the unique ObjectId.
+ * @param info
+ * a chunk that is known to store {@code objId}.
+ * @param buffer
+ * buffer to enqueue the put onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer)
+ throws DhtException;
+
+ /**
+ * Remove a single chunk from an object.
+ * <p>
+ * If this is the last remaining chunk for the object, the object should
+ * also be removed from the table. Removal can be deferred, or can occur
+ * immediately. That is, {@code get()} may return the object with an empty
+ * collection, but to prevent unlimited disk usage the database should
+ * eventually remove the object.
+ *
+ * @param objId
+ * the unique ObjectId.
+ * @param chunk
+ * the chunk that needs to be removed from this object.
+ * @param buffer
+ * buffer to enqueue the remove onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java
new file mode 100644
index 0000000000..48171265c1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RefData;
+import org.eclipse.jgit.storage.dht.RefKey;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+
+/**
+ * Tracks all branches and tags for a repository.
+ * <p>
+ * Each repository has one or more references, pointing to the most recent
+ * revision on that branch, or to the tagged revision if its a tag.
+ */
+public interface RefTable {
+ /**
+ * Read all known references in the repository.
+ *
+ * @param options
+ * options to control reading.
+ * @param repository
+ * the repository to load the references from.
+ * @return map of all references. Empty map if there are no references.
+ * @throws DhtException
+ * the database cannot be read.
+ * @throws TimeoutException
+ * the operation to read the database timed out.
+ */
+ public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository)
+ throws DhtException, TimeoutException;
+
+ /**
+ * Compare a reference, and delete if it matches.
+ *
+ * @param refKey
+ * reference to delete.
+ * @param oldData
+ * the old data for the reference. The delete only occurs if the
+ * value is still equal to {@code oldData}.
+ * @return true if the delete was successful; false if the current value
+ * does not match {@code oldData}.
+ * @throws DhtException
+ * the database cannot be updated.
+ * @throws TimeoutException
+ * the operation to modify the database timed out.
+ */
+ public boolean compareAndRemove(RefKey refKey, RefData oldData)
+ throws DhtException, TimeoutException;
+
+ /**
+ * Compare a reference, and put if it matches.
+ *
+ * @param refKey
+ * reference to create or replace.
+ * @param oldData
+ * the old data for the reference. The put only occurs if the
+ * value is still equal to {@code oldData}. Use
+ * {@link RefData#NONE} if the reference should not exist and is
+ * being created.
+ * @param newData
+ * new value to store.
+ * @return true if the put was successful; false if the current value does
+ * not match {@code prior}.
+ * @throws DhtException
+ * the database cannot be updated.
+ * @throws TimeoutException
+ * the operation to modify the database timed out.
+ */
+ public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData)
+ throws DhtException, TimeoutException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java
new file mode 100644
index 0000000000..794db6e5e2
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.RepositoryName;
+
+/**
+ * Maps a repository name from a URL, to the internal {@link RepositoryKey}.
+ * <p>
+ * The internal identifier is used for all data storage, as its part of the row
+ * keys for each data row that makes up the repository. By using an internal
+ * key, repositories can be efficiently renamed in O(1) time, without changing
+ * existing data rows.
+ */
+public interface RepositoryIndexTable {
+ /**
+ * Find a repository by name.
+ *
+ * @param name
+ * name of the repository, from the URL.
+ * @return the internal key; null if not found.
+ * @throws DhtException
+ * @throws TimeoutException
+ */
+ public RepositoryKey get(RepositoryName name) throws DhtException,
+ TimeoutException;
+
+ /**
+ * Atomically record the association of name to identifier.
+ * <p>
+ * This method must use some sort of transaction system to ensure the name
+ * either points at {@code key} when complete, or fails fast with an
+ * exception if the name is used by a different key. This may require
+ * running some sort of lock management service in parallel to the database.
+ *
+ * @param name
+ * name of the repository.
+ * @param key
+ * internal key used to find the repository's data.
+ * @throws DhtException
+ * @throws TimeoutException
+ */
+ public void putUnique(RepositoryName name, RepositoryKey key)
+ throws DhtException, TimeoutException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java
new file mode 100644
index 0000000000..5921ca95c1
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import java.util.Collection;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.CachedPackInfo;
+import org.eclipse.jgit.storage.dht.CachedPackKey;
+import org.eclipse.jgit.storage.dht.ChunkInfo;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+
+/**
+ * Tracks high-level information about all known repositories.
+ */
+public interface RepositoryTable {
+ /**
+ * Generate a new unique RepositoryKey.
+ *
+ * @return a new unique key.
+ * @throws DhtException
+ * keys cannot be generated at this time.
+ */
+ public RepositoryKey nextKey() throws DhtException;
+
+ /**
+ * Record the existence of a chunk.
+ *
+ * @param repo
+ * repository owning the chunk.
+ * @param info
+ * information about the chunk.
+ * @param buffer
+ * buffer to enqueue the put onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer)
+ throws DhtException;
+
+ /**
+ * Remove the information about a chunk.
+ *
+ * @param repo
+ * repository owning the chunk.
+ * @param chunk
+ * the chunk that needs to be deleted.
+ * @param buffer
+ * buffer to enqueue the remove onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException;
+
+ /**
+ * Get the cached packs, if any.
+ *
+ * @param repo
+ * repository owning the packs.
+ * @return cached pack descriptions.
+ * @throws DhtException
+ * @throws TimeoutException
+ */
+ public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo)
+ throws DhtException, TimeoutException;
+
+ /**
+ * Record the existence of a cached pack.
+ *
+ * @param repo
+ * repository owning the pack.
+ * @param info
+ * information about the pack.
+ * @param buffer
+ * buffer to enqueue the put onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer)
+ throws DhtException;
+
+ /**
+ * Remove the existence of a cached pack.
+ *
+ * @param repo
+ * repository owning the pack.
+ * @param key
+ * information about the pack.
+ * @param buffer
+ * buffer to enqueue the put onto.
+ * @throws DhtException
+ * if the buffer flushed and an enqueued operation failed.
+ */
+ public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer)
+ throws DhtException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java
new file mode 100644
index 0000000000..5521ec2fb8
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+
+/** Potentially buffers writes until full, or until flush. */
+public interface WriteBuffer {
+ /**
+ * Flush any pending writes, and wait for them to complete.
+ *
+ * @throws DhtException
+ * one or more writes failed. As writes may occur in any order,
+ * the exact state of the database is unspecified.
+ */
+ public void flush() throws DhtException;
+
+ /**
+ * Abort pending writes, and wait for acknowledgment.
+ * <p>
+ * Once a buffer has been aborted, it cannot be reused. Application code
+ * must discard the buffer instance and use a different buffer to issue
+ * subsequent operations.
+ * <p>
+ * If writes have not been started yet, they should be discarded and not
+ * submitted to the storage system.
+ * <p>
+ * If writes have already been started asynchronously in the background,
+ * this method may try to cancel them, but must wait for the operation to
+ * either complete or abort before returning. This allows callers to clean
+ * up by scanning the storage system and making corrections to clean up any
+ * partial writes.
+ *
+ * @throws DhtException
+ * one or more already started writes failed.
+ */
+ public void abort() throws DhtException;
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java
new file mode 100644
index 0000000000..4eb26bd0d8
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import static java.util.Collections.singleton;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.Sync;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change;
+import org.eclipse.jgit.storage.dht.spi.util.AbstractWriteBuffer;
+
+/** WriteBuffer implementation for a {@link CacheDatabase}. */
+public class CacheBuffer extends AbstractWriteBuffer {
+ private final WriteBuffer dbBuffer;
+
+ private final CacheService client;
+
+ private final Sync<Void> none;
+
+ private List<CacheService.Change> pending;
+
+ private List<CacheService.Change> afterFlush;
+
+ /**
+ * Initialize a new buffer.
+ *
+ * @param dbBuffer
+ * the underlying database's own buffer.
+ * @param client
+ * connection to the cache service.
+ * @param options
+ * options controlling cache operations.
+ */
+ public CacheBuffer(WriteBuffer dbBuffer, CacheService client,
+ CacheOptions options) {
+ super(null, options.getWriteBufferSize());
+ this.dbBuffer = dbBuffer;
+ this.client = client;
+ this.none = Sync.none();
+ }
+
+ /**
+ * Schedule removal of a key from the cache.
+ * <p>
+ * Unlike {@link #removeAfterFlush(CacheKey)}, these removals can be flushed
+ * when the cache buffer is full, potentially before any corresponding
+ * removal is written to the underlying database.
+ *
+ * @param key
+ * key to remove.
+ * @throws DhtException
+ * a prior flush failed.
+ */
+ public void remove(CacheKey key) throws DhtException {
+ modify(CacheService.Change.remove(key));
+ }
+
+ /**
+ * Schedule a removal only after the underlying database flushes.
+ * <p>
+ * Unlike {@link #remove(CacheKey)}, these removals are buffered until the
+ * application calls {@link #flush()} and aren't sent to the cache service
+ * until after the underlying database flush() operation is completed
+ * successfully.
+ *
+ * @param key
+ * key to remove.
+ */
+ public void removeAfterFlush(CacheKey key) {
+ if (afterFlush == null)
+ afterFlush = newList();
+ afterFlush.add(CacheService.Change.remove(key));
+ }
+
+ /**
+ * Schedule storing (or replacing) a key in the cache.
+ *
+ * @param key
+ * key to store.
+ * @param value
+ * new value to store.
+ * @throws DhtException
+ * a prior flush failed.
+ */
+ public void put(CacheKey key, byte[] value) throws DhtException {
+ modify(CacheService.Change.put(key, value));
+ }
+
+ /**
+ * Schedule any cache change.
+ *
+ * @param op
+ * the cache operation.
+ * @throws DhtException
+ * a prior flush failed.
+ */
+ public void modify(CacheService.Change op) throws DhtException {
+ int sz = op.getKey().getBytes().length;
+ if (op.getData() != null)
+ sz += op.getData().length;
+ if (add(sz)) {
+ if (pending == null)
+ pending = newList();
+ pending.add(op);
+ queued(sz);
+ } else {
+ client.modify(singleton(op), wrap(none, sz));
+ }
+ }
+
+ /** @return the underlying database's own write buffer. */
+ public WriteBuffer getWriteBuffer() {
+ return dbBuffer;
+ }
+
+ @Override
+ protected void startQueuedOperations(int bytes) throws DhtException {
+ client.modify(pending, wrap(none, bytes));
+ pending = null;
+ }
+
+ public void flush() throws DhtException {
+ dbBuffer.flush();
+
+ if (afterFlush != null) {
+ for (CacheService.Change op : afterFlush)
+ modify(op);
+ afterFlush = null;
+ }
+
+ super.flush();
+ }
+
+ @Override
+ public void abort() throws DhtException {
+ pending = null;
+ afterFlush = null;
+
+ dbBuffer.abort();
+ super.abort();
+ }
+
+ private static List<Change> newList() {
+ return new ArrayList<CacheService.Change>();
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java
new file mode 100644
index 0000000000..22989cb93f
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java
@@ -0,0 +1,454 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import static java.util.Collections.singleton;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.ChunkMeta;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.PackChunk;
+import org.eclipse.jgit.storage.dht.StreamingCallback;
+import org.eclipse.jgit.storage.dht.Sync;
+import org.eclipse.jgit.storage.dht.TinyProtobuf;
+import org.eclipse.jgit.storage.dht.spi.ChunkTable;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change;
+
+/** Cache wrapper around ChunkTable. */
+public class CacheChunkTable implements ChunkTable {
+ private final ChunkTable db;
+
+ private final ExecutorService executor;
+
+ private final CacheService client;
+
+ private final Sync<Void> none;
+
+ private final Namespace nsChunk = Namespace.CHUNK;
+
+ private final Namespace nsMeta = Namespace.CHUNK_META;
+
+ /**
+ * Initialize a new wrapper.
+ *
+ * @param dbTable
+ * the underlying database's corresponding table.
+ * @param cacheDatabase
+ * the cache database.
+ */
+ public CacheChunkTable(ChunkTable dbTable, CacheDatabase cacheDatabase) {
+ this.db = dbTable;
+ this.executor = cacheDatabase.getExecutorService();
+ this.client = cacheDatabase.getClient();
+ this.none = Sync.none();
+ }
+
+ public void get(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<PackChunk.Members>> callback) {
+ List<CacheKey> toFind = new ArrayList<CacheKey>(keys.size());
+ for (ChunkKey k : keys)
+ toFind.add(nsChunk.key(k));
+ client.get(toFind, new ChunkFromCache(options, keys, callback));
+ }
+
+ public void getMeta(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<ChunkMeta>> callback) {
+ List<CacheKey> toFind = new ArrayList<CacheKey>(keys.size());
+ for (ChunkKey k : keys)
+ toFind.add(nsMeta.key(k));
+ client.get(toFind, new MetaFromCache(options, keys, callback));
+ }
+
+ public void put(PackChunk.Members chunk, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.put(chunk, buf.getWriteBuffer());
+
+ // Only store fragmented meta. This is all callers should ask for.
+ if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0)
+ buf.put(nsMeta.key(chunk.getChunkKey()), chunk.getMeta().asBytes());
+
+ if (chunk.hasChunkData())
+ buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk));
+ else
+ buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey()));
+ }
+
+ public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ buf.remove(nsChunk.key(key));
+ buf.remove(nsMeta.key(key));
+ db.remove(key, buf.getWriteBuffer());
+ }
+
+ private static byte[] encode(PackChunk.Members members) {
+ final byte[] meta;
+ if (members.hasMeta())
+ meta = members.getMeta().asBytes();
+ else
+ meta = null;
+
+ ByteBuffer chunkData = members.getChunkDataAsByteBuffer();
+ ByteBuffer chunkIndex = members.getChunkIndexAsByteBuffer();
+
+ TinyProtobuf.Encoder sizer = TinyProtobuf.size();
+ TinyProtobuf.Encoder e = sizer;
+ do {
+ e.bytes(1, chunkData);
+ e.bytes(2, chunkIndex);
+ e.bytes(3, meta);
+ if (e == sizer)
+ e = TinyProtobuf.encode(e.size());
+ else
+ return e.asByteArray();
+ } while (true);
+ }
+
+ private static PackChunk.Members decode(ChunkKey key, byte[] raw) {
+ PackChunk.Members members = new PackChunk.Members();
+ members.setChunkKey(key);
+
+ TinyProtobuf.Decoder d = TinyProtobuf.decode(raw);
+ for (;;) {
+ switch (d.next()) {
+ case 0:
+ return members;
+ case 1: {
+ int cnt = d.bytesLength();
+ int ptr = d.bytesOffset();
+ byte[] buf = d.bytesArray();
+ members.setChunkData(buf, ptr, cnt);
+ continue;
+ }
+ case 2: {
+ int cnt = d.bytesLength();
+ int ptr = d.bytesOffset();
+ byte[] buf = d.bytesArray();
+ members.setChunkIndex(buf, ptr, cnt);
+ continue;
+ }
+ case 3:
+ members.setMeta(ChunkMeta.fromBytes(key, d.message()));
+ continue;
+ default:
+ d.skip();
+ }
+ }
+ }
+
+ private class ChunkFromCache implements
+ StreamingCallback<Map<CacheKey, byte[]>> {
+ private final Object lock = new Object();
+
+ private final Context options;
+
+ private final Set<ChunkKey> remaining;
+
+ private final AsyncCallback<Collection<PackChunk.Members>> normalCallback;
+
+ private final StreamingCallback<Collection<PackChunk.Members>> streamingCallback;
+
+ private final List<PackChunk.Members> all;
+
+ ChunkFromCache(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<PackChunk.Members>> callback) {
+ this.options = options;
+ this.remaining = new HashSet<ChunkKey>(keys);
+ this.normalCallback = callback;
+
+ if (callback instanceof StreamingCallback<?>) {
+ streamingCallback = (StreamingCallback<Collection<PackChunk.Members>>) callback;
+ all = null;
+ } else {
+ streamingCallback = null;
+ all = new ArrayList<PackChunk.Members>(keys.size());
+ }
+ }
+
+ public void onPartialResult(Map<CacheKey, byte[]> result) {
+ for (Map.Entry<CacheKey, byte[]> ent : result.entrySet()) {
+ ChunkKey key = ChunkKey.fromBytes(ent.getKey().getBytes());
+ PackChunk.Members members = decode(key, ent.getValue());
+
+ if (streamingCallback != null) {
+ streamingCallback.onPartialResult(singleton(members));
+
+ synchronized (lock) {
+ remaining.remove(key);
+ }
+ } else {
+ synchronized (lock) {
+ all.add(members);
+ remaining.remove(key);
+ }
+ }
+ }
+ }
+
+ public void onSuccess(Map<CacheKey, byte[]> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) {
+ normalCallback.onSuccess(all);
+ } else {
+ db.get(options, remaining, new ChunkFromDatabase(all,
+ normalCallback, streamingCallback));
+ }
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ // TODO(spearce) We may want to just drop to database here.
+ normalCallback.onFailure(error);
+ }
+ }
+
+ private class ChunkFromDatabase implements
+ StreamingCallback<Collection<PackChunk.Members>> {
+ private final Object lock = new Object();
+
+ private final List<PackChunk.Members> all;
+
+ private final AsyncCallback<Collection<PackChunk.Members>> normalCallback;
+
+ private final StreamingCallback<Collection<PackChunk.Members>> streamingCallback;
+
+ ChunkFromDatabase(
+ List<PackChunk.Members> all,
+ AsyncCallback<Collection<PackChunk.Members>> normalCallback,
+ StreamingCallback<Collection<PackChunk.Members>> streamingCallback) {
+ this.all = all;
+ this.normalCallback = normalCallback;
+ this.streamingCallback = streamingCallback;
+ }
+
+ public void onPartialResult(Collection<PackChunk.Members> result) {
+ final List<PackChunk.Members> toPutIntoCache = copy(result);
+
+ if (streamingCallback != null)
+ streamingCallback.onPartialResult(result);
+ else {
+ synchronized (lock) {
+ all.addAll(result);
+ }
+ }
+
+ // Encoding is rather expensive, so move the cache population
+ // into it a different background thread to prevent the current
+ // database task from being starved of time.
+ //
+ executor.submit(new Runnable() {
+ public void run() {
+ for (PackChunk.Members members : toPutIntoCache) {
+ ChunkKey key = members.getChunkKey();
+ Change op = Change.put(nsChunk.key(key), encode(members));
+ client.modify(singleton(op), none);
+ }
+ }
+ });
+ }
+
+ private <T> List<T> copy(Collection<T> result) {
+ return new ArrayList<T>(result);
+ }
+
+ public void onSuccess(Collection<PackChunk.Members> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ normalCallback.onSuccess(all);
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ normalCallback.onFailure(error);
+ }
+ }
+
+ private class MetaFromCache implements
+ StreamingCallback<Map<CacheKey, byte[]>> {
+ private final Object lock = new Object();
+
+ private final Context options;
+
+ private final Set<ChunkKey> remaining;
+
+ private final AsyncCallback<Collection<ChunkMeta>> normalCallback;
+
+ private final StreamingCallback<Collection<ChunkMeta>> streamingCallback;
+
+ private final List<ChunkMeta> all;
+
+ MetaFromCache(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<ChunkMeta>> callback) {
+ this.options = options;
+ this.remaining = new HashSet<ChunkKey>(keys);
+ this.normalCallback = callback;
+
+ if (callback instanceof StreamingCallback<?>) {
+ streamingCallback = (StreamingCallback<Collection<ChunkMeta>>) callback;
+ all = null;
+ } else {
+ streamingCallback = null;
+ all = new ArrayList<ChunkMeta>(keys.size());
+ }
+ }
+
+ public void onPartialResult(Map<CacheKey, byte[]> result) {
+ for (Map.Entry<CacheKey, byte[]> ent : result.entrySet()) {
+ ChunkKey key = ChunkKey.fromBytes(ent.getKey().getBytes());
+ ChunkMeta meta = ChunkMeta.fromBytes(key, ent.getValue());
+
+ if (streamingCallback != null) {
+ streamingCallback.onPartialResult(singleton(meta));
+
+ synchronized (lock) {
+ remaining.remove(key);
+ }
+ } else {
+ synchronized (lock) {
+ all.add(meta);
+ remaining.remove(key);
+ }
+ }
+ }
+ }
+
+ public void onSuccess(Map<CacheKey, byte[]> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) {
+ normalCallback.onSuccess(all);
+ } else {
+ db.getMeta(options, remaining, new MetaFromDatabase(all,
+ normalCallback, streamingCallback));
+ }
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ // TODO(spearce) We may want to just drop to database here.
+ normalCallback.onFailure(error);
+ }
+ }
+
+ private class MetaFromDatabase implements
+ StreamingCallback<Collection<ChunkMeta>> {
+ private final Object lock = new Object();
+
+ private final List<ChunkMeta> all;
+
+ private final AsyncCallback<Collection<ChunkMeta>> normalCallback;
+
+ private final StreamingCallback<Collection<ChunkMeta>> streamingCallback;
+
+ MetaFromDatabase(List<ChunkMeta> all,
+ AsyncCallback<Collection<ChunkMeta>> normalCallback,
+ StreamingCallback<Collection<ChunkMeta>> streamingCallback) {
+ this.all = all;
+ this.normalCallback = normalCallback;
+ this.streamingCallback = streamingCallback;
+ }
+
+ public void onPartialResult(Collection<ChunkMeta> result) {
+ final List<ChunkMeta> toPutIntoCache = copy(result);
+
+ if (streamingCallback != null)
+ streamingCallback.onPartialResult(result);
+ else {
+ synchronized (lock) {
+ all.addAll(result);
+ }
+ }
+
+ // Encoding is rather expensive, so move the cache population
+ // into it a different background thread to prevent the current
+ // database task from being starved of time.
+ //
+ executor.submit(new Runnable() {
+ public void run() {
+ for (ChunkMeta meta : toPutIntoCache) {
+ ChunkKey key = meta.getChunkKey();
+ Change op = Change.put(nsMeta.key(key), meta.asBytes());
+ client.modify(singleton(op), none);
+ }
+ }
+ });
+ }
+
+ private <T> List<T> copy(Collection<T> result) {
+ return new ArrayList<T>(result);
+ }
+
+ public void onSuccess(Collection<ChunkMeta> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ normalCallback.onSuccess(all);
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ normalCallback.onFailure(error);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java
new file mode 100644
index 0000000000..da3ea5fd4c
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.concurrent.ExecutorService;
+
+import org.eclipse.jgit.storage.dht.spi.ChunkTable;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.dht.spi.RefTable;
+import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable;
+import org.eclipse.jgit.storage.dht.spi.RepositoryTable;
+
+/**
+ * Uses a cache for fast-lookups, but falls-back to another Database.
+ * <p>
+ * On a read miss, this database falls back to read another Database, and then
+ * puts the read value into the cache for later access.
+ */
+public class CacheDatabase implements Database {
+ private final Database database;
+
+ private final ExecutorService executorService;
+
+ private final CacheService client;
+
+ private final CacheOptions options;
+
+ private final CacheRepositoryIndexTable repositoryIndex;
+
+ private final CacheRepositoryTable repository;
+
+ private final CacheRefTable ref;
+
+ private final CacheObjectIndexTable objectIndex;
+
+ private final CacheChunkTable chunk;
+
+ /**
+ * Initialize a cache database.
+ *
+ * @param database
+ * underlying storage database, used for read-misses and all
+ * writes.
+ * @param executor
+ * executor service to perform expensive cache updates in the
+ * background.
+ * @param client
+ * implementation of the cache service.
+ * @param options
+ * configuration of the cache.
+ */
+ public CacheDatabase(Database database, ExecutorService executor,
+ CacheService client, CacheOptions options) {
+ this.database = database;
+ this.executorService = executor;
+ this.client = client;
+ this.options = options;
+
+ repositoryIndex = new CacheRepositoryIndexTable(database
+ .repositoryIndex(), this);
+
+ repository = new CacheRepositoryTable(database.repository(), this);
+ ref = new CacheRefTable(database.ref(), this);
+ objectIndex = new CacheObjectIndexTable(database.objectIndex(), this);
+ chunk = new CacheChunkTable(database.chunk(), this);
+ }
+
+ /** @return the underlying database the cache wraps. */
+ public Database getDatabase() {
+ return database;
+ }
+
+ /** @return executor pool for long operations. */
+ public ExecutorService getExecutorService() {
+ return executorService;
+ }
+
+ /** @return client connecting to the cache service. */
+ public CacheService getClient() {
+ return client;
+ }
+
+ /** @return connection options for the cache service. */
+ public CacheOptions getOptions() {
+ return options;
+ }
+
+ public RepositoryIndexTable repositoryIndex() {
+ return repositoryIndex;
+ }
+
+ public RepositoryTable repository() {
+ return repository;
+ }
+
+ public RefTable ref() {
+ return ref;
+ }
+
+ public ObjectIndexTable objectIndex() {
+ return objectIndex;
+ }
+
+ public ChunkTable chunk() {
+ return chunk;
+ }
+
+ public CacheBuffer newWriteBuffer() {
+ return new CacheBuffer(database.newWriteBuffer(), client, options);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java
new file mode 100644
index 0000000000..67c6c0ff08
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.Arrays;
+
+import org.eclipse.jgit.storage.dht.RowKey;
+import org.eclipse.jgit.util.RawParseUtils;
+
+/** Simple byte array based key for cache storage. */
+public class CacheKey {
+ private final Namespace ns;
+
+ private final byte[] key;
+
+ private volatile int hashCode;
+
+ /**
+ * Wrap a database key.
+ *
+ * @param ns
+ * the namespace the key is contained within.
+ * @param key
+ * the key to wrap.
+ */
+ public CacheKey(Namespace ns, RowKey key) {
+ this(ns, key.asBytes());
+ }
+
+ /**
+ * Wrap a byte array.
+ *
+ * @param ns
+ * the namespace the key is contained within.
+ * @param key
+ * the key to wrap.
+ */
+ public CacheKey(Namespace ns, byte[] key) {
+ this.ns = ns;
+ this.key = key;
+ }
+
+ /** @return namespace to segregate keys by. */
+ public Namespace getNamespace() {
+ return ns;
+ }
+
+ /** @return this key's bytes, within {@link #getNamespace()}. */
+ public byte[] getBytes() {
+ return key;
+ }
+
+ @Override
+ public int hashCode() {
+ if (hashCode == 0) {
+ int h = 5381;
+ for (int ptr = 0; ptr < key.length; ptr++)
+ h = ((h << 5) + h) + (key[ptr] & 0xff);
+ if (h == 0)
+ h = 1;
+ hashCode = h;
+ }
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof CacheKey) {
+ CacheKey m = (CacheKey) other;
+ return ns.equals(m.ns) && Arrays.equals(key, m.key);
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return ns + ":" + RawParseUtils.decode(key);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java
new file mode 100644
index 0000000000..0438dc09e7
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.ObjectIndexKey;
+import org.eclipse.jgit.storage.dht.ObjectInfo;
+import org.eclipse.jgit.storage.dht.StreamingCallback;
+import org.eclipse.jgit.storage.dht.Sync;
+import org.eclipse.jgit.storage.dht.TinyProtobuf;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change;
+
+/** Cache wrapper around ObjectIndexTable. */
+public class CacheObjectIndexTable implements ObjectIndexTable {
+ private final ObjectIndexTable db;
+
+ private final ExecutorService executor;
+
+ private final CacheService client;
+
+ private final Namespace ns = Namespace.OBJECT_INDEX;
+
+ /**
+ * Initialize a new wrapper.
+ *
+ * @param dbTable
+ * the underlying database's corresponding table.
+ * @param cacheDatabase
+ * the cache database.
+ */
+ public CacheObjectIndexTable(ObjectIndexTable dbTable,
+ CacheDatabase cacheDatabase) {
+ this.db = dbTable;
+ this.executor = cacheDatabase.getExecutorService();
+ this.client = cacheDatabase.getClient();
+ }
+
+ public void get(Context options, Set<ObjectIndexKey> objects,
+ AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) {
+ List<CacheKey> toFind = new ArrayList<CacheKey>(objects.size());
+ for (ObjectIndexKey k : objects)
+ toFind.add(ns.key(k));
+ client.get(toFind, new LoaderFromCache(options, objects, callback));
+ }
+
+ public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer)
+ throws DhtException {
+ // During addition, the cache is not populated. This prevents a
+ // race condition when the cache is cold. Readers need to scan
+ // the database and ensure the oldest ObjectInfo is loaded into
+ // the cache in order to allow PackChunk to break delta cycles.
+ //
+ // This does have a small performance penalty, as recently added
+ // objects are often read not long after they were written. But
+ // without good multi-system transaction support between the
+ // cache and the underlying storage we cannot do better.
+ //
+ db.add(objId, info, ((CacheBuffer) buffer).getWriteBuffer());
+ }
+
+ public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.remove(objId, chunk, buf.getWriteBuffer());
+
+ // TODO This suffers from a race condition. The removal from the
+ // cache can occur before the database update takes place, and a
+ // concurrent reader might re-populate the cache with the stale data.
+ //
+ buf.remove(ns.key(objId));
+ }
+
+ private static byte[] encode(Collection<ObjectInfo> list) {
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(128);
+ for (ObjectInfo info : list) {
+ TinyProtobuf.Encoder m = TinyProtobuf.encode(128);
+ m.bytes(1, info.getChunkKey().asBytes());
+ m.bytes(2, info.asBytes());
+ m.fixed64(3, info.getTime());
+ e.message(1, m);
+ }
+ return e.asByteArray();
+ }
+
+ private static ObjectInfo decodeItem(TinyProtobuf.Decoder m) {
+ ChunkKey key = null;
+ TinyProtobuf.Decoder data = null;
+ long time = -1;
+
+ for (;;) {
+ switch (m.next()) {
+ case 0:
+ return ObjectInfo.fromBytes(key, data, time);
+ case 1:
+ key = ChunkKey.fromBytes(m);
+ continue;
+ case 2:
+ data = m.message();
+ continue;
+ case 3:
+ time = m.fixed64();
+ continue;
+ default:
+ m.skip();
+ }
+ }
+ }
+
+ private static Collection<ObjectInfo> decode(byte[] raw) {
+ List<ObjectInfo> res = new ArrayList<ObjectInfo>(1);
+ TinyProtobuf.Decoder d = TinyProtobuf.decode(raw);
+ for (;;) {
+ switch (d.next()) {
+ case 0:
+ return res;
+ case 1:
+ res.add(decodeItem(d.message()));
+ continue;
+ default:
+ d.skip();
+ }
+ }
+ }
+
+ private class LoaderFromCache implements
+ StreamingCallback<Map<CacheKey, byte[]>> {
+ private final Object lock = new Object();
+
+ private final Context options;
+
+ private final Set<ObjectIndexKey> remaining;
+
+ private final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback;
+
+ private final StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback;
+
+ private final Map<ObjectIndexKey, Collection<ObjectInfo>> all;
+
+ LoaderFromCache(
+ Context options,
+ Set<ObjectIndexKey> objects,
+ AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) {
+ this.options = options;
+ this.remaining = new HashSet<ObjectIndexKey>(objects);
+ this.normalCallback = callback;
+
+ if (callback instanceof StreamingCallback<?>) {
+ streamingCallback = (StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>) callback;
+ all = null;
+ } else {
+ streamingCallback = null;
+ all = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>();
+ }
+ }
+
+ public void onPartialResult(Map<CacheKey, byte[]> result) {
+ Map<ObjectIndexKey, Collection<ObjectInfo>> tmp;
+ if (streamingCallback != null)
+ tmp = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>();
+ else
+ tmp = null;
+
+ for (Map.Entry<CacheKey, byte[]> e : result.entrySet()) {
+ ObjectIndexKey objKey;
+ Collection<ObjectInfo> list = decode(e.getValue());
+ objKey = ObjectIndexKey.fromBytes(e.getKey().getBytes());
+
+ if (tmp != null)
+ tmp.put(objKey, list);
+ else {
+ synchronized (lock) {
+ all.put(objKey, list);
+ remaining.remove(objKey);
+ }
+ }
+ }
+
+ if (tmp != null) {
+ streamingCallback.onPartialResult(tmp);
+ synchronized (lock) {
+ remaining.removeAll(tmp.keySet());
+ }
+ }
+ }
+
+ public void onSuccess(Map<CacheKey, byte[]> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) {
+ normalCallback.onSuccess(all);
+ } else {
+ db.get(options, remaining, new LoaderFromDatabase(all,
+ normalCallback, streamingCallback));
+ }
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ // TODO(spearce) We may want to just drop to database here.
+ normalCallback.onFailure(error);
+ }
+ }
+
+ private class LoaderFromDatabase implements
+ StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> {
+ private final Object lock = new Object();
+
+ private final Map<ObjectIndexKey, Collection<ObjectInfo>> all;
+
+ private final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback;
+
+ private final StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback;
+
+ LoaderFromDatabase(
+ Map<ObjectIndexKey, Collection<ObjectInfo>> all,
+ AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback,
+ StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback) {
+ this.all = all;
+ this.normalCallback = normalCallback;
+ this.streamingCallback = streamingCallback;
+ }
+
+ public void onPartialResult(
+ Map<ObjectIndexKey, Collection<ObjectInfo>> result) {
+ final Map<ObjectIndexKey, Collection<ObjectInfo>> toPut = copy(result);
+
+ if (streamingCallback != null)
+ streamingCallback.onPartialResult(result);
+ else {
+ synchronized (lock) {
+ all.putAll(result);
+ }
+ }
+
+ // Encoding is rather expensive, so move the cache population
+ // into it a different background thread to prevent the current
+ // database task from being starved of time.
+ //
+ executor.submit(new Runnable() {
+ public void run() {
+ List<Change> ops = new ArrayList<Change>(toPut.size());
+
+ for (Map.Entry<ObjectIndexKey, Collection<ObjectInfo>> e : all(toPut)) {
+ List<ObjectInfo> items = copy(e.getValue());
+ ObjectInfo.sort(items);
+ ops.add(Change.put(ns.key(e.getKey()), encode(items)));
+ }
+
+ client.modify(ops, Sync.<Void> none());
+ }
+ });
+ }
+
+ private <K, V> Map<K, V> copy(Map<K, V> map) {
+ return new HashMap<K, V>(map);
+ }
+
+ private <T> List<T> copy(Collection<T> result) {
+ return new ArrayList<T>(result);
+ }
+
+ private <K, V> Set<Map.Entry<K, V>> all(final Map<K, V> toPut) {
+ return toPut.entrySet();
+ }
+
+ public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> result) {
+ if (result != null && !result.isEmpty())
+ onPartialResult(result);
+
+ synchronized (lock) {
+ normalCallback.onSuccess(all);
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ normalCallback.onFailure(error);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java
new file mode 100644
index 0000000000..9eef55c3ff
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.storage.dht.Timeout;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+
+/** Options to configure the cache. */
+public class CacheOptions {
+ private Timeout timeout;
+
+ private int writeBufferSize;
+
+ /** Initialize default options. */
+ public CacheOptions() {
+ setTimeout(Timeout.milliseconds(500));
+ setWriteBufferSize(512 * 1024);
+ }
+
+ /** @return default timeout for all operations. */
+ public Timeout getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * Set the default timeout to wait on long operations.
+ *
+ * @param maxWaitTime
+ * new wait time.
+ * @return {@code this}
+ */
+ public CacheOptions setTimeout(Timeout maxWaitTime) {
+ if (maxWaitTime == null || maxWaitTime.getTime() < 0)
+ throw new IllegalArgumentException();
+ timeout = maxWaitTime;
+ return this;
+ }
+
+ /** @return size in bytes to buffer operations. */
+ public int getWriteBufferSize() {
+ return writeBufferSize;
+ }
+
+ /**
+ * Set the maximum number of outstanding bytes in a {@link WriteBuffer}.
+ *
+ * @param sizeInBytes
+ * maximum number of bytes.
+ * @return {@code this}
+ */
+ public CacheOptions setWriteBufferSize(int sizeInBytes) {
+ writeBufferSize = Math.max(1024, sizeInBytes);
+ return this;
+ }
+
+ /**
+ * Update properties by setting fields from the configuration.
+ * <p>
+ * If a property is not defined in the configuration, then it is left
+ * unmodified.
+ *
+ * @param rc
+ * configuration to read properties from.
+ * @return {@code this}
+ */
+ public CacheOptions fromConfig(final Config rc) {
+ setTimeout(Timeout.getTimeout(rc, "cache", "dht", "timeout", getTimeout()));
+ setWriteBufferSize(rc.getInt("cache", "dht", "writeBufferSize", getWriteBufferSize()));
+ return this;
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java
new file mode 100644
index 0000000000..5edb49eddf
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RefData;
+import org.eclipse.jgit.storage.dht.RefKey;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.RefTable;
+
+/**
+ * Cache wrapper around RefTable.
+ * <p>
+ * Currently this is a straight pass-through.
+ */
+public class CacheRefTable implements RefTable {
+ private final RefTable db;
+
+ /**
+ * Initialize a new wrapper.
+ *
+ * @param dbTable
+ * the underlying database's corresponding table.
+ * @param cacheDatabase
+ * the cache database.
+ */
+ public CacheRefTable(RefTable dbTable, CacheDatabase cacheDatabase) {
+ this.db = dbTable;
+ }
+
+ public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository)
+ throws DhtException, TimeoutException {
+ return db.getAll(options, repository);
+ }
+
+ public boolean compareAndRemove(RefKey refKey, RefData oldData)
+ throws DhtException, TimeoutException {
+ return db.compareAndRemove(refKey, oldData);
+ }
+
+ public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData)
+ throws DhtException, TimeoutException {
+ return db.compareAndPut(refKey, oldData, newData);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java
new file mode 100644
index 0000000000..5ff43910f3
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singleton;
+
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.RepositoryName;
+import org.eclipse.jgit.storage.dht.Sync;
+import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable;
+import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change;
+
+/** Cache wrapper around RepositoryIndexTable. */
+public class CacheRepositoryIndexTable implements RepositoryIndexTable {
+ private final RepositoryIndexTable db;
+
+ private final CacheService client;
+
+ private final CacheOptions options;
+
+ private final Namespace ns;
+
+ private final Sync<Void> none;
+
+ /**
+ * Initialize a new wrapper.
+ *
+ * @param dbTable
+ * the underlying database's corresponding table.
+ * @param cacheDatabase
+ * the cache database.
+ */
+ public CacheRepositoryIndexTable(RepositoryIndexTable dbTable,
+ CacheDatabase cacheDatabase) {
+ this.db = dbTable;
+ this.client = cacheDatabase.getClient();
+ this.options = cacheDatabase.getOptions();
+ this.ns = Namespace.REPOSITORY_INDEX;
+ this.none = Sync.none();
+ }
+
+ public RepositoryKey get(RepositoryName name) throws DhtException,
+ TimeoutException {
+ CacheKey memKey = ns.key(name);
+ Sync<Map<CacheKey, byte[]>> sync = Sync.create();
+ client.get(singleton(memKey), sync);
+
+ Map<CacheKey, byte[]> result;
+ try {
+ result = sync.get(options.getTimeout());
+ } catch (InterruptedException e) {
+ throw new TimeoutException();
+ } catch (TimeoutException timeout) {
+ // Fall through and read the database directly.
+ result = emptyMap();
+ }
+
+ byte[] data = result.get(memKey);
+ if (data != null) {
+ if (data.length == 0)
+ return null;
+ return RepositoryKey.fromBytes(data);
+ }
+
+ RepositoryKey key = db.get(name);
+ data = key != null ? key.asBytes() : new byte[0];
+ client.modify(singleton(Change.put(memKey, data)), none);
+ return key;
+ }
+
+ public void putUnique(RepositoryName name, RepositoryKey key)
+ throws DhtException, TimeoutException {
+ db.putUnique(name, key);
+
+ Sync<Void> sync = Sync.create();
+ CacheKey memKey = ns.key(name);
+ byte[] data = key.asBytes();
+ client.modify(singleton(Change.put(memKey, data)), sync);
+ try {
+ sync.get(options.getTimeout());
+ } catch (InterruptedException e) {
+ throw new TimeoutException();
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java
new file mode 100644
index 0000000000..b71c242625
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singleton;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.CachedPackInfo;
+import org.eclipse.jgit.storage.dht.CachedPackKey;
+import org.eclipse.jgit.storage.dht.ChunkInfo;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.Sync;
+import org.eclipse.jgit.storage.dht.TinyProtobuf;
+import org.eclipse.jgit.storage.dht.spi.RepositoryTable;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change;
+
+/** Cache wrapper around RepositoryTable. */
+public class CacheRepositoryTable implements RepositoryTable {
+ private final RepositoryTable db;
+
+ private final CacheService client;
+
+ private final CacheOptions options;
+
+ private final Namespace nsCachedPack = Namespace.CACHED_PACK;
+
+ private final Sync<Void> none;
+
+ /**
+ * Initialize a new wrapper.
+ *
+ * @param dbTable
+ * the underlying database's corresponding table.
+ * @param cacheDatabase
+ * the cache database.
+ */
+ public CacheRepositoryTable(RepositoryTable dbTable,
+ CacheDatabase cacheDatabase) {
+ this.db = dbTable;
+ this.client = cacheDatabase.getClient();
+ this.options = cacheDatabase.getOptions();
+ this.none = Sync.none();
+ }
+
+ public RepositoryKey nextKey() throws DhtException {
+ return db.nextKey();
+ }
+
+ public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.put(repo, info, buf.getWriteBuffer());
+ }
+
+ public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.remove(repo, chunk, buf.getWriteBuffer());
+ }
+
+ public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo)
+ throws DhtException, TimeoutException {
+ CacheKey memKey = nsCachedPack.key(repo);
+ Sync<Map<CacheKey, byte[]>> sync = Sync.create();
+ client.get(singleton(memKey), sync);
+
+ Map<CacheKey, byte[]> result;
+ try {
+ result = sync.get(options.getTimeout());
+ } catch (InterruptedException e) {
+ throw new TimeoutException();
+ } catch (TimeoutException timeout) {
+ // Fall through and read the database directly.
+ result = emptyMap();
+ }
+
+ byte[] data = result.get(memKey);
+ if (data != null) {
+ List<CachedPackInfo> r = new ArrayList<CachedPackInfo>();
+ TinyProtobuf.Decoder d = TinyProtobuf.decode(data);
+ for (;;) {
+ switch (d.next()) {
+ case 0:
+ return r;
+ case 1:
+ r.add(CachedPackInfo.fromBytes(d.message()));
+ continue;
+ default:
+ d.skip();
+ }
+ }
+ }
+
+ Collection<CachedPackInfo> r = db.getCachedPacks(repo);
+ TinyProtobuf.Encoder e = TinyProtobuf.encode(1024);
+ for (CachedPackInfo info : r)
+ e.bytes(1, info.asBytes());
+ client.modify(singleton(Change.put(memKey, e.asByteArray())), none);
+ return r;
+ }
+
+ public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.put(repo, info, buf.getWriteBuffer());
+ buf.removeAfterFlush(nsCachedPack.key(repo));
+ }
+
+ public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer)
+ throws DhtException {
+ CacheBuffer buf = (CacheBuffer) buffer;
+ db.remove(repo, key, buf.getWriteBuffer());
+ buf.removeAfterFlush(nsCachedPack.key(repo));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java
new file mode 100644
index 0000000000..31616b51c5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.Collection;
+import java.util.Map;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.StreamingCallback;
+
+/** Connects to the network based memory cache server(s). */
+public interface CacheService {
+ /**
+ * Lookup one or more cache keys and return the results.
+ * <p>
+ * Callers are responsible for breaking up very large collections of chunk
+ * keys into smaller units, based on the reader's batch size option.
+ *
+ * @param keys
+ * keys to locate.
+ * @param callback
+ * receives the results when ready. If this is an instance of
+ * {@link StreamingCallback}, implementors should try to deliver
+ * results early.
+ */
+ void get(Collection<CacheKey> keys,
+ AsyncCallback<Map<CacheKey, byte[]>> callback);
+
+ /**
+ * Modify one or more cache keys.
+ *
+ * @param changes
+ * changes to apply to the cache.
+ * @param callback
+ * receives notification when the changes have been applied.
+ */
+ void modify(Collection<Change> changes, AsyncCallback<Void> callback);
+
+ /** A change to the cache. */
+ public static class Change {
+ /** Operation the change describes. */
+ public static enum Type {
+ /** Store (or replace) the key. */
+ PUT,
+
+ /** Only store the key if not already stored. */
+ PUT_IF_ABSENT,
+
+ /** Remove the associated key. */
+ REMOVE;
+ }
+
+ /**
+ * Initialize a put operation.
+ *
+ * @param key
+ * the key to store.
+ * @param data
+ * the value to store.
+ * @return the operation.
+ */
+ public static Change put(CacheKey key, byte[] data) {
+ return new Change(Type.PUT, key, data);
+ }
+
+ /**
+ * Initialize a put operation.
+ *
+ * @param key
+ * the key to store.
+ * @param data
+ * the value to store.
+ * @return the operation.
+ */
+ public static Change putIfAbsent(CacheKey key, byte[] data) {
+ return new Change(Type.PUT_IF_ABSENT, key, data);
+ }
+
+ /**
+ * Initialize a remove operation.
+ *
+ * @param key
+ * the key to remove.
+ * @return the operation.
+ */
+ public static Change remove(CacheKey key) {
+ return new Change(Type.REMOVE, key, null);
+ }
+
+ private final Type type;
+
+ private final CacheKey key;
+
+ private final byte[] data;
+
+ /**
+ * Initialize a new change.
+ *
+ * @param type
+ * @param key
+ * @param data
+ */
+ public Change(Type type, CacheKey key, byte[] data) {
+ this.type = type;
+ this.key = key;
+ this.data = data;
+ }
+
+ /** @return type of change that will take place. */
+ public Type getType() {
+ return type;
+ }
+
+ /** @return the key that will be modified. */
+ public CacheKey getKey() {
+ return key;
+ }
+
+ /** @return new data value if this is a PUT type of change. */
+ public byte[] getData() {
+ return data;
+ }
+
+ @Override
+ public String toString() {
+ return getType() + " " + getKey();
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java
new file mode 100644
index 0000000000..76dc311987
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.cache;
+
+import java.util.Arrays;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.storage.dht.RowKey;
+import org.eclipse.jgit.storage.dht.spi.ChunkTable;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable;
+import org.eclipse.jgit.util.RawParseUtils;
+
+/** Defines a space within the cache cluster. */
+public class Namespace {
+ /** Namespace used by the {@link ChunkTable}. */
+ public static final Namespace CHUNK = create("chunk");
+
+ /** Namespace used by the {@link ChunkTable} for meta field only. */
+ public static final Namespace CHUNK_META = create("chunkMeta");
+
+ /** Namespace used by the {@link ObjectIndexTable}. */
+ public static final Namespace OBJECT_INDEX = create("objectIndex");
+
+ /** Namespace used by the {@link RepositoryIndexTable}. */
+ public static final Namespace REPOSITORY_INDEX = create("repositoryIndex");
+
+ /** Namespace used by the cached pack information. */
+ public static final Namespace CACHED_PACK = create("cachedPack");
+
+ /**
+ * Create a namespace from a string name.
+ *
+ * @param name
+ * the name to wrap.
+ * @return the namespace.
+ */
+ public static Namespace create(String name) {
+ return new Namespace(Constants.encode(name));
+ }
+
+ /**
+ * Create a namespace from a byte array.
+ *
+ * @param name
+ * the name to wrap.
+ * @return the namespace.
+ */
+ public static Namespace create(byte[] name) {
+ return new Namespace(name);
+ }
+
+ private final byte[] name;
+
+ private volatile int hashCode;
+
+ private Namespace(byte[] name) {
+ this.name = name;
+ }
+
+ /** @return this namespace, encoded in UTF-8. */
+ public byte[] getBytes() {
+ return name;
+ }
+
+ /**
+ * Construct a MemKey within this namespace.
+ *
+ * @param key
+ * the key to include.
+ * @return key within this namespace.
+ */
+ public CacheKey key(byte[] key) {
+ return new CacheKey(this, key);
+ }
+
+ /**
+ * Construct a MemKey within this namespace.
+ *
+ * @param key
+ * the key to include.
+ * @return key within this namespace.
+ */
+ public CacheKey key(RowKey key) {
+ return new CacheKey(this, key);
+ }
+
+ @Override
+ public int hashCode() {
+ if (hashCode == 0) {
+ int h = 5381;
+ for (int ptr = 0; ptr < name.length; ptr++)
+ h = ((h << 5) + h) + (name[ptr] & 0xff);
+ if (h == 0)
+ h = 1;
+ hashCode = h;
+ }
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this)
+ return true;
+ if (other instanceof Namespace)
+ return Arrays.equals(name, ((Namespace) other).name);
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return RawParseUtils.decode(name);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java
new file mode 100644
index 0000000000..8a04dbb6d5
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.ChunkMeta;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.PackChunk;
+import org.eclipse.jgit.storage.dht.spi.ChunkTable;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+
+final class MemChunkTable implements ChunkTable {
+ private final MemTable table = new MemTable();
+
+ private final ColumnMatcher colData = new ColumnMatcher("data");
+
+ private final ColumnMatcher colIndex = new ColumnMatcher("index");
+
+ private final ColumnMatcher colMeta = new ColumnMatcher("meta");
+
+ public void get(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<PackChunk.Members>> callback) {
+ int cnt = keys.size();
+ List<PackChunk.Members> out = new ArrayList<PackChunk.Members>(cnt);
+
+ for (ChunkKey chunk : keys) {
+ byte[] row = chunk.asBytes();
+ MemTable.Cell cell;
+
+ cell = table.get(row, colData.name());
+ if (cell == null)
+ continue;
+
+ PackChunk.Members m = new PackChunk.Members();
+ m.setChunkKey(chunk);
+ m.setChunkData(cell.getValue());
+
+ cell = table.get(row, colIndex.name());
+ if (cell != null)
+ m.setChunkIndex(cell.getValue());
+
+ cell = table.get(row, colMeta.name());
+ if (cell != null)
+ m.setMeta(ChunkMeta.fromBytes(chunk, cell.getValue()));
+
+ out.add(m);
+ }
+
+ callback.onSuccess(out);
+ }
+
+ public void getMeta(Context options, Set<ChunkKey> keys,
+ AsyncCallback<Collection<ChunkMeta>> callback) {
+ int cnt = keys.size();
+ List<ChunkMeta> out = new ArrayList<ChunkMeta>(cnt);
+
+ for (ChunkKey chunk : keys) {
+ byte[] row = chunk.asBytes();
+ MemTable.Cell cell = table.get(row, colMeta.name());
+ if (cell != null)
+ out.add(ChunkMeta.fromBytes(chunk, cell.getValue()));
+ }
+
+ callback.onSuccess(out);
+ }
+
+ public void put(PackChunk.Members chunk, WriteBuffer buffer)
+ throws DhtException {
+ byte[] row = chunk.getChunkKey().asBytes();
+
+ if (chunk.hasChunkData())
+ table.put(row, colData.name(), chunk.getChunkData());
+
+ if (chunk.hasChunkIndex())
+ table.put(row, colIndex.name(), chunk.getChunkIndex());
+
+ if (chunk.hasMeta())
+ table.put(row, colMeta.name(), chunk.getMeta().asBytes());
+ }
+
+ public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException {
+ table.deleteRow(key.asBytes());
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java
new file mode 100644
index 0000000000..e6f4f7acac
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.ObjectIndexKey;
+import org.eclipse.jgit.storage.dht.ObjectInfo;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+
+final class MemObjectIndexTable implements ObjectIndexTable {
+ private final MemTable table = new MemTable();
+
+ private final ColumnMatcher colInfo = new ColumnMatcher("info:");
+
+ public void get(Context options, Set<ObjectIndexKey> objects,
+ AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) {
+ Map<ObjectIndexKey, Collection<ObjectInfo>> out = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>();
+
+ for (ObjectIndexKey objId : objects) {
+ for (MemTable.Cell cell : table.scanFamily(objId.asBytes(), colInfo)) {
+ Collection<ObjectInfo> info = out.get(objId);
+ if (info == null) {
+ info = new ArrayList<ObjectInfo>(4);
+ out.put(objId, info);
+ }
+
+ ChunkKey chunk = ChunkKey.fromBytes(
+ colInfo.suffix(cell.getName()));
+ byte[] value = cell.getValue();
+ long time = cell.getTimestamp();
+ info.add(ObjectInfo.fromBytes(chunk, value, time));
+ }
+ }
+
+ callback.onSuccess(out);
+ }
+
+ public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer)
+ throws DhtException {
+ ChunkKey chunk = info.getChunkKey();
+ table.put(objId.asBytes(), colInfo.append(chunk.asBytes()),
+ info.asBytes());
+ }
+
+ public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException {
+ table.delete(objId.asBytes(), colInfo.append(chunk.asBytes()));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java
new file mode 100644
index 0000000000..6c41f20c4a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RefData;
+import org.eclipse.jgit.storage.dht.RefKey;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.spi.Context;
+import org.eclipse.jgit.storage.dht.spi.RefTable;
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+
+final class MemRefTable implements RefTable {
+ private final MemTable table = new MemTable();
+
+ private final ColumnMatcher colRef = new ColumnMatcher("ref:");
+
+ public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository)
+ throws DhtException, TimeoutException {
+ Map<RefKey, RefData> out = new HashMap<RefKey, RefData>();
+ for (MemTable.Cell cell : table.scanFamily(repository.asBytes(), colRef)) {
+ RefKey ref = RefKey.fromBytes(colRef.suffix(cell.getName()));
+ RefData val = RefData.fromBytes(cell.getValue());
+ out.put(ref, val);
+ }
+ return out;
+ }
+
+ public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData)
+ throws DhtException, TimeoutException {
+ RepositoryKey repo = refKey.getRepositoryKey();
+ return table.compareAndSet( //
+ repo.asBytes(), //
+ colRef.append(refKey.asBytes()), //
+ oldData != RefData.NONE ? oldData.asBytes() : null, //
+ newData.asBytes());
+ }
+
+ public boolean compareAndRemove(RefKey refKey, RefData oldData)
+ throws DhtException, TimeoutException {
+ RepositoryKey repo = refKey.getRepositoryKey();
+ return table.compareAndSet( //
+ repo.asBytes(), //
+ colRef.append(refKey.asBytes()), //
+ oldData != RefData.NONE ? oldData.asBytes() : null, //
+ null);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java
new file mode 100644
index 0000000000..46a1fd619a
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.text.MessageFormat;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.DhtText;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.RepositoryName;
+import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable;
+import org.eclipse.jgit.storage.dht.spi.memory.MemTable.Cell;
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+
+final class MemRepositoryIndexTable implements RepositoryIndexTable {
+ private final MemTable table = new MemTable();
+
+ private final ColumnMatcher colId = new ColumnMatcher("id");
+
+ public RepositoryKey get(RepositoryName name) throws DhtException,
+ TimeoutException {
+ Cell cell = table.get(name.asBytes(), colId.name());
+ if (cell == null)
+ return null;
+ return RepositoryKey.fromBytes(cell.getValue());
+ }
+
+ public void putUnique(RepositoryName name, RepositoryKey key)
+ throws DhtException, TimeoutException {
+ boolean ok = table.compareAndSet( //
+ name.asBytes(), //
+ colId.name(), //
+ null, //
+ key.asBytes());
+ if (!ok)
+ throw new DhtException(MessageFormat.format(
+ DhtText.get().repositoryAlreadyExists, name.asString()));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java
new file mode 100644
index 0000000000..01e90de3ba
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.eclipse.jgit.storage.dht.CachedPackInfo;
+import org.eclipse.jgit.storage.dht.CachedPackKey;
+import org.eclipse.jgit.storage.dht.ChunkInfo;
+import org.eclipse.jgit.storage.dht.ChunkKey;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.RepositoryKey;
+import org.eclipse.jgit.storage.dht.spi.RepositoryTable;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+
+final class MemRepositoryTable implements RepositoryTable {
+ private final AtomicInteger nextId = new AtomicInteger();
+
+ private final MemTable table = new MemTable();
+
+ private final ColumnMatcher colChunkInfo = new ColumnMatcher("chunk-info:");
+
+ private final ColumnMatcher colCachedPack = new ColumnMatcher("cached-pack:");
+
+ public RepositoryKey nextKey() throws DhtException {
+ return RepositoryKey.create(nextId.incrementAndGet());
+ }
+
+ public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer)
+ throws DhtException {
+ table.put(repo.asBytes(),
+ colChunkInfo.append(info.getChunkKey().asBytes()),
+ info.asBytes());
+ }
+
+ public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer)
+ throws DhtException {
+ table.delete(repo.asBytes(), colChunkInfo.append(chunk.asBytes()));
+ }
+
+ public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo)
+ throws DhtException, TimeoutException {
+ List<CachedPackInfo> out = new ArrayList<CachedPackInfo>(4);
+ for (MemTable.Cell cell : table.scanFamily(repo.asBytes(), colCachedPack))
+ out.add(CachedPackInfo.fromBytes(cell.getValue()));
+ return out;
+ }
+
+ public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer)
+ throws DhtException {
+ table.put(repo.asBytes(),
+ colCachedPack.append(info.getRowKey().asBytes()),
+ info.asBytes());
+ }
+
+ public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer)
+ throws DhtException {
+ table.delete(repo.asBytes(), colCachedPack.append(key.asBytes()));
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java
new file mode 100644
index 0000000000..ec28b34064
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher;
+import org.eclipse.jgit.util.RawParseUtils;
+import org.eclipse.jgit.util.SystemReader;
+
+/**
+ * Tiny in-memory NoSQL style table.
+ * <p>
+ * This table is thread-safe, but not very efficient. It uses a single lock to
+ * protect its internal data structure from concurrent access, and stores all
+ * data as byte arrays. To reduce memory usage, the arrays passed by the caller
+ * during put or compareAndSet are used as-is in the internal data structure,
+ * and may be returned later. Callers should not modify byte arrays once they
+ * are stored in the table, or when obtained from the table.
+ */
+public class MemTable {
+ private final Map<Key, Map<Key, Cell>> map;
+
+ private final Object lock;
+
+ /** Initialize an empty table. */
+ public MemTable() {
+ map = new HashMap<Key, Map<Key, Cell>>();
+ lock = new Object();
+ }
+
+ /**
+ * Put a value into a cell.
+ *
+ * @param row
+ * @param col
+ * @param val
+ */
+ public void put(byte[] row, byte[] col, byte[] val) {
+ synchronized (lock) {
+ Key rowKey = new Key(row);
+ Map<Key, Cell> r = map.get(rowKey);
+ if (r == null) {
+ r = new HashMap<Key, Cell>(4);
+ map.put(rowKey, r);
+ }
+ r.put(new Key(col), new Cell(row, col, val));
+ }
+ }
+
+ /**
+ * Delete an entire row.
+ *
+ * @param row
+ */
+ public void deleteRow(byte[] row) {
+ synchronized (lock) {
+ map.remove(new Key(row));
+ }
+ }
+
+ /**
+ * Delete a cell.
+ *
+ * @param row
+ * @param col
+ */
+ public void delete(byte[] row, byte[] col) {
+ synchronized (lock) {
+ Key rowKey = new Key(row);
+ Map<Key, Cell> r = map.get(rowKey);
+ if (r == null)
+ return;
+
+ r.remove(new Key(col));
+ if (r.isEmpty())
+ map.remove(rowKey);
+ }
+ }
+
+ /**
+ * Compare and put or delete a cell.
+ * <p>
+ * This method performs an atomic compare-and-swap operation on the named
+ * cell. If the cell does not yet exist, it will be created. If the cell
+ * exists, it will be replaced, and if {@code newVal} is null, the cell will
+ * be deleted.
+ *
+ * @param row
+ * @param col
+ * @param oldVal
+ * if null, the cell must not exist, otherwise the cell's current
+ * value must exactly equal this value for the update to occur.
+ * @param newVal
+ * if null, the cell will be removed, otherwise the cell will be
+ * created or updated to this value.
+ * @return true if successful, false if {@code oldVal} does not match.
+ */
+ public boolean compareAndSet(byte[] row, byte[] col, byte[] oldVal,
+ byte[] newVal) {
+ synchronized (lock) {
+ Key rowKey = new Key(row);
+ Key colKey = new Key(col);
+
+ Map<Key, Cell> r = map.get(rowKey);
+ if (r == null) {
+ r = new HashMap<Key, Cell>(4);
+ map.put(rowKey, r);
+ }
+
+ Cell oldCell = r.get(colKey);
+ if (!same(oldCell, oldVal)) {
+ if (r.isEmpty())
+ map.remove(rowKey);
+ return false;
+ }
+
+ if (newVal != null) {
+ r.put(colKey, new Cell(row, col, newVal));
+ return true;
+ }
+
+ r.remove(colKey);
+ if (r.isEmpty())
+ map.remove(rowKey);
+ return true;
+ }
+ }
+
+ private static boolean same(Cell oldCell, byte[] expVal) {
+ if (oldCell == null)
+ return expVal == null;
+
+ if (expVal == null)
+ return false;
+
+ return Arrays.equals(oldCell.value, expVal);
+ }
+
+ /**
+ * Get a single cell, or null.
+ *
+ * @param row
+ * @param col
+ * @return the cell, or null.
+ */
+ public Cell get(byte[] row, byte[] col) {
+ synchronized (lock) {
+ Map<Key, Cell> r = map.get(new Key(row));
+ return r != null ? r.get(new Key(col)) : null;
+ }
+ }
+
+ /**
+ * Scan all cells in a row.
+ *
+ * @param row
+ * the row to scan.
+ * @param family
+ * if not null, the family to filter and return.
+ * @return iterator for the cells. Cells may appear in any order, including
+ * random. Never null.
+ */
+ public Iterable<Cell> scanFamily(byte[] row, ColumnMatcher family) {
+ synchronized (lock) {
+ Map<Key, Cell> r = map.get(new Key(row));
+ if (r == null)
+ return Collections.emptyList();
+
+ if (family == null)
+ return new ArrayList<Cell>(r.values());
+
+ ArrayList<Cell> out = new ArrayList<Cell>(4);
+ for (Cell cell : r.values()) {
+ if (family.sameFamily(cell.getName()))
+ out.add(cell);
+ }
+ return out;
+ }
+ }
+
+ private static class Key {
+ final byte[] key;
+
+ Key(byte[] key) {
+ this.key = key;
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 5381;
+ for (int ptr = 0; ptr < key.length; ptr++)
+ hash = ((hash << 5) + hash) + (key[ptr] & 0xff);
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other)
+ return true;
+ if (other instanceof Key)
+ return Arrays.equals(key, ((Key) other).key);
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return RawParseUtils.decode(key);
+ }
+ }
+
+ /** A cell value in a column. */
+ public static class Cell {
+ final byte[] row;
+
+ final byte[] name;
+
+ final byte[] value;
+
+ final long timestamp;
+
+ Cell(byte[] row, byte[] name, byte[] value) {
+ this.row = row;
+ this.name = name;
+ this.value = value;
+ this.timestamp = SystemReader.getInstance().getCurrentTime();
+ }
+
+ /** @return key of the row holding the cell. */
+ public byte[] getRow() {
+ return row;
+ }
+
+ /** @return name of the cell's column. */
+ public byte[] getName() {
+ return name;
+ }
+
+ /** @return the cell's value. */
+ public byte[] getValue() {
+ return value;
+ }
+
+ /** @return system clock time of last modification. */
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ @Override
+ public String toString() {
+ return RawParseUtils.decode(name);
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java
new file mode 100644
index 0000000000..065055b520
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.memory;
+
+import java.io.IOException;
+
+import org.eclipse.jgit.lib.Repository;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.DhtRepository;
+import org.eclipse.jgit.storage.dht.DhtRepositoryBuilder;
+import org.eclipse.jgit.storage.dht.spi.ChunkTable;
+import org.eclipse.jgit.storage.dht.spi.Database;
+import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable;
+import org.eclipse.jgit.storage.dht.spi.RefTable;
+import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable;
+import org.eclipse.jgit.storage.dht.spi.RepositoryTable;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+
+/**
+ * Stores Git repositories in non-persistent JVM heap memory.
+ * <p>
+ * This database type is only suitable for unit testing, and other toy
+ * applications. All chunk data is held within the JVM heap as byte arrays,
+ * which is not the most efficient representation available.
+ */
+public class MemoryDatabase implements Database {
+ private final RepositoryIndexTable repositoryIndex;
+
+ private final RepositoryTable repository;
+
+ private final RefTable ref;
+
+ private final ObjectIndexTable objectIndex;
+
+ private final ChunkTable chunk;
+
+ /** Initialize an empty database. */
+ public MemoryDatabase() {
+ repositoryIndex = new MemRepositoryIndexTable();
+ repository = new MemRepositoryTable();
+ ref = new MemRefTable();
+ objectIndex = new MemObjectIndexTable();
+ chunk = new MemChunkTable();
+ }
+
+ /**
+ * Open a repository by name on this database.
+ *
+ * @param name
+ * the name of the repository.
+ * @return the repository instance. If the repository does not yet exist,
+ * the caller can use {@link Repository#create(boolean)} to create.
+ * @throws IOException
+ */
+ public DhtRepository open(String name) throws IOException {
+ return (DhtRepository) new DhtRepositoryBuilder<DhtRepositoryBuilder, DhtRepository, MemoryDatabase>()
+ .setDatabase(this) //
+ .setRepositoryName(name) //
+ .setMustExist(false) //
+ .build();
+ }
+
+ public RepositoryIndexTable repositoryIndex() {
+ return repositoryIndex;
+ }
+
+ public RepositoryTable repository() {
+ return repository;
+ }
+
+ public RefTable ref() {
+ return ref;
+ }
+
+ public ObjectIndexTable objectIndex() {
+ return objectIndex;
+ }
+
+ public ChunkTable chunk() {
+ return chunk;
+ }
+
+ public WriteBuffer newWriteBuffer() {
+ return new WriteBuffer() {
+ public void flush() throws DhtException {
+ // Do nothing.
+ }
+
+ public void abort() throws DhtException {
+ // Do nothing.
+ }
+ };
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java
new file mode 100644
index 0000000000..d40cbe31ad
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.util;
+
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.eclipse.jgit.storage.dht.AsyncCallback;
+import org.eclipse.jgit.storage.dht.DhtException;
+import org.eclipse.jgit.storage.dht.DhtTimeoutException;
+import org.eclipse.jgit.storage.dht.spi.WriteBuffer;
+
+/**
+ * Abstract buffer service built on top of an ExecutorService.
+ * <p>
+ * Writes are combined together into batches, to reduce RPC overhead when there
+ * are many small writes occurring. Batches are sent asynchronously when they
+ * reach 512 KiB worth of key/column/value data. The calling application is
+ * throttled when the outstanding writes are equal to the buffer size, waiting
+ * until the cluster has replied with success or failure.
+ * <p>
+ * This buffer implementation is not thread-safe, it assumes only one thread
+ * will use the buffer instance. (It does however correctly synchronize with the
+ * background tasks it spawns.)
+ */
+public abstract class AbstractWriteBuffer implements WriteBuffer {
+ private final static int AUTO_FLUSH_SIZE = 512 * 1024;
+
+ private final ExecutorService executor;
+
+ private final int bufferSize;
+
+ private final List<Future<?>> running;
+
+ private final Semaphore spaceAvailable;
+
+ private int queuedCount;
+
+ private boolean flushing;
+
+ private Callable<?> finalTask;
+
+ /**
+ * Initialize a buffer with a backing executor service.
+ *
+ * @param executor
+ * service to run mutation tasks on.
+ * @param bufferSize
+ * maximum number of bytes to have pending at once.
+ */
+ protected AbstractWriteBuffer(ExecutorService executor, int bufferSize) {
+ this.executor = executor;
+ this.bufferSize = bufferSize;
+ this.running = new LinkedList<Future<?>>();
+ this.spaceAvailable = new Semaphore(bufferSize);
+ }
+
+ /**
+ * Notify the buffer data is being added onto it.
+ * <p>
+ * This method waits until the buffer has sufficient space for the requested
+ * data, thereby throttling the calling application code. It returns true if
+ * its recommendation is for the buffer subclass to copy the data onto its
+ * internal buffer and defer starting until later. It returns false if the
+ * recommendation is to start the operation immediately, due to the large
+ * size of the request.
+ * <p>
+ * Buffer implementors should keep in mind that the return value is offered
+ * as advice only, they may choose to implement different behavior.
+ *
+ * @param size
+ * an estimated number of bytes that the buffer will be
+ * responsible for until the operation completes. This should
+ * include the row keys and column headers, in addition to the
+ * data values.
+ * @return true to enqueue the operation; false to start it right away.
+ * @throws DhtException
+ * the current thread was interrupted before space became
+ * available in the buffer.
+ */
+ protected boolean add(int size) throws DhtException {
+ acquireSpace(size);
+ return size < AUTO_FLUSH_SIZE;
+ }
+
+ /**
+ * Notify the buffer bytes were enqueued.
+ *
+ * @param size
+ * the estimated number of bytes that were enqueued.
+ * @throws DhtException
+ * a previously started operation completed and failed.
+ */
+ protected void queued(int size) throws DhtException {
+ queuedCount += size;
+
+ if (AUTO_FLUSH_SIZE < queuedCount) {
+ startQueuedOperations(queuedCount);
+ queuedCount = 0;
+ }
+ }
+
+ /**
+ * Start all queued operations.
+ * <p>
+ * This method is invoked by {@link #queued(int)} or by {@link #flush()}
+ * when there is a non-zero number of bytes already enqueued as a result of
+ * prior {@link #add(int)} and {#link {@link #queued(int)} calls.
+ * <p>
+ * Implementors should use {@link #start(Callable, int)} to begin their
+ * mutation tasks in the background.
+ *
+ * @param bufferedByteCount
+ * number of bytes that were already enqueued. This count should
+ * be passed to {@link #start(Callable, int)}.
+ * @throws DhtException
+ * a previously started operation completed and failed.
+ */
+ protected abstract void startQueuedOperations(int bufferedByteCount)
+ throws DhtException;
+
+ public void flush() throws DhtException {
+ try {
+ flushing = true;
+
+ if (0 < queuedCount) {
+ startQueuedOperations(queuedCount);
+ queuedCount = 0;
+ }
+
+ // If a task was created above, try to use the current thread
+ // instead of burning an executor thread for the final work.
+
+ if (finalTask != null) {
+ try {
+ waitFor(finalTask);
+ } finally {
+ finalTask = null;
+ }
+ }
+
+ checkRunningTasks(true);
+ } finally {
+ flushing = false;
+ }
+ }
+
+ public void abort() throws DhtException {
+ checkRunningTasks(true);
+ }
+
+ private void acquireSpace(int sz) throws DhtException {
+ try {
+ final int permits = permitsForSize(sz);
+ if (spaceAvailable.tryAcquire(permits))
+ return;
+
+ if (0 < queuedCount) {
+ startQueuedOperations(queuedCount);
+ queuedCount = 0;
+ }
+
+ spaceAvailable.acquire(permits);
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+ }
+ }
+
+ private int permitsForSize(int size) {
+ // Do not acquire more than the configured buffer size,
+ // even if the actual write size is larger. Trying to
+ // acquire more would never succeed.
+
+ if (size <= 0)
+ size = 1;
+ return Math.min(size, bufferSize);
+ }
+
+ /**
+ * Start a mutation task.
+ *
+ * @param <T>
+ * any type the task might return.
+ * @param task
+ * the mutation task. The result of the task is discarded, so
+ * callers should perform result validation within the task.
+ * @param size
+ * number of bytes that are buffered within the task.
+ * @throws DhtException
+ * a prior task has completed, and failed.
+ */
+ protected <T> void start(final Callable<T> task, int size)
+ throws DhtException {
+ final int permits = permitsForSize(size);
+ final Callable<T> op = new Callable<T>() {
+ public T call() throws Exception {
+ try {
+ return task.call();
+ } finally {
+ spaceAvailable.release(permits);
+ }
+ }
+ };
+
+ if (flushing && finalTask == null) {
+ // If invoked by flush(), don't start on an executor.
+ //
+ finalTask = op;
+ return;
+ }
+
+ if (!flushing)
+ checkRunningTasks(false);
+ running.add(executor.submit(op));
+ }
+
+ /**
+ * Wrap a callback to update the buffer.
+ * <p>
+ * Flushing the buffer will wait for the returned callback to complete.
+ *
+ * @param <T>
+ * any type the task might return.
+ * @param callback
+ * callback invoked when the task has finished.
+ * @param size
+ * number of bytes that are buffered within the task.
+ * @return wrapped callback that will update the buffer state when the
+ * callback is invoked.
+ * @throws DhtException
+ * a prior task has completed, and failed.
+ */
+ protected <T> AsyncCallback<T> wrap(final AsyncCallback<T> callback,
+ int size) throws DhtException {
+ int permits = permitsForSize(size);
+ WrappedCallback<T> op = new WrappedCallback<T>(callback, permits);
+ checkRunningTasks(false);
+ running.add(op);
+ return op;
+ }
+
+ private void checkRunningTasks(boolean wait) throws DhtException {
+ if (running.isEmpty())
+ return;
+
+ Iterator<Future<?>> itr = running.iterator();
+ while (itr.hasNext()) {
+ Future<?> task = itr.next();
+ if (task.isDone() || wait) {
+ itr.remove();
+ waitFor(task);
+ }
+ }
+ }
+
+ private static void waitFor(Callable<?> task) throws DhtException {
+ try {
+ task.call();
+ } catch (DhtException err) {
+ throw err;
+ } catch (Exception err) {
+ throw new DhtException(err);
+ }
+ }
+
+ private static void waitFor(Future<?> task) throws DhtException {
+ try {
+ task.get();
+
+ } catch (InterruptedException e) {
+ throw new DhtTimeoutException(e);
+
+ } catch (ExecutionException err) {
+
+ Throwable t = err;
+ while (t != null) {
+ if (t instanceof DhtException)
+ throw (DhtException) t;
+ t = t.getCause();
+ }
+
+ throw new DhtException(err);
+ }
+ }
+
+ private final class WrappedCallback<T> implements AsyncCallback<T>,
+ Future<T> {
+ private final AsyncCallback<T> callback;
+
+ private final int permits;
+
+ private final CountDownLatch sync;
+
+ private volatile boolean done;
+
+ WrappedCallback(AsyncCallback<T> callback, int permits) {
+ this.callback = callback;
+ this.permits = permits;
+ this.sync = new CountDownLatch(1);
+ }
+
+ public void onSuccess(T result) {
+ try {
+ callback.onSuccess(result);
+ } finally {
+ done();
+ }
+ }
+
+ public void onFailure(DhtException error) {
+ try {
+ callback.onFailure(error);
+ } finally {
+ done();
+ }
+ }
+
+ private void done() {
+ spaceAvailable.release(permits);
+ done = true;
+ sync.countDown();
+ }
+
+ public boolean cancel(boolean mayInterrupt) {
+ return false;
+ }
+
+ public T get() throws InterruptedException, ExecutionException {
+ sync.await();
+ return null;
+ }
+
+ public T get(long time, TimeUnit unit) throws InterruptedException,
+ ExecutionException, TimeoutException {
+ sync.await(time, unit);
+ return null;
+ }
+
+ public boolean isCancelled() {
+ return false;
+ }
+
+ public boolean isDone() {
+ return done;
+ }
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java
new file mode 100644
index 0000000000..17ef5dd908
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.util;
+
+import java.util.Arrays;
+
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.storage.dht.RowKey;
+import org.eclipse.jgit.util.RawParseUtils;
+
+/** Utility to deal with columns named as byte arrays. */
+public class ColumnMatcher {
+ private final byte[] name;
+
+ /**
+ * Create a new column matcher for the given named string.
+ *
+ * @param nameStr
+ * the column name, as a string.
+ */
+ public ColumnMatcher(String nameStr) {
+ name = Constants.encode(nameStr);
+ }
+
+ /** @return the column name, encoded in UTF-8. */
+ public byte[] name() {
+ return name;
+ }
+
+ /**
+ * Check if the column is an exact match.
+ *
+ * @param col
+ * the column as read from the database.
+ * @return true only if {@code col} is exactly the same as this column.
+ */
+ public boolean sameName(byte[] col) {
+ return Arrays.equals(name, col);
+ }
+
+ /**
+ * Check if the column is a member of this family.
+ * <p>
+ * This method checks that {@link #name()} (the string supplied to the
+ * constructor) is a prefix of {@code col}.
+ *
+ * @param col
+ * the column as read from the database.
+ * @return true if {@code col} is a member of this column family.
+ */
+ public boolean sameFamily(byte[] col) {
+ if (name.length < col.length) {
+ for (int i = 0; i < name.length; i++) {
+ if (name[i] != col[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Extract the portion of the column name that comes after the family.
+ *
+ * @param col
+ * the column as read from the database.
+ * @return everything after the family name.
+ */
+ public byte[] suffix(byte[] col) {
+ byte[] r = new byte[col.length - name.length];
+ System.arraycopy(col, name.length, r, 0, r.length);
+ return r;
+ }
+
+ /**
+ * Append a suffix onto this column name.
+ *
+ * @param suffix
+ * name component to appear after the family name.
+ * @return the joined name, ready for storage in the database.
+ */
+ public byte[] append(RowKey suffix) {
+ return append(suffix.asBytes());
+ }
+
+ /**
+ * Append a suffix onto this column name.
+ *
+ * @param suffix
+ * name component to appear after the family name.
+ * @return the joined name, ready for storage in the database.
+ */
+ public byte[] append(byte[] suffix) {
+ byte[] r = new byte[name.length + suffix.length];
+ System.arraycopy(name, 0, r, 0, name.length);
+ System.arraycopy(suffix, 0, r, name.length, suffix.length);
+ return r;
+ }
+
+ @Override
+ public String toString() {
+ return RawParseUtils.decode(name);
+ }
+}
diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java
new file mode 100644
index 0000000000..ed0b918c28
--- /dev/null
+++ b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ * names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.storage.dht.spi.util;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/** Optional executor support for implementors to build on top of. */
+public class ExecutorTools {
+ /**
+ * Get the default executor service for this JVM.
+ * <p>
+ * The default executor service is created the first time it is requested,
+ * and is shared with all future requests. It uses a fixed sized thread pool
+ * that is allocated 2 threads per CPU. Each thread is configured to be a
+ * daemon thread, permitting the JVM to do a clean shutdown when the
+ * application thread stop, even if work is still pending in the service.
+ *
+ * @return the default executor service.
+ */
+ public static ExecutorService getDefaultExecutorService() {
+ return DefaultExecutors.service;
+ }
+
+ private static class DefaultExecutors {
+ static final ExecutorService service;
+ static {
+ int ncpu = Runtime.getRuntime().availableProcessors();
+ ThreadFactory threadFactory = new ThreadFactory() {
+ private final AtomicInteger cnt = new AtomicInteger();
+
+ private final ThreadGroup group = new ThreadGroup("JGit-DHT");
+
+ public Thread newThread(Runnable body) {
+ int id = cnt.incrementAndGet();
+ String name = "JGit-DHT-Worker-" + id;
+ ClassLoader myCL = getClass().getClassLoader();
+
+ Thread thread = new Thread(group, body, name);
+ thread.setDaemon(true);
+ thread.setContextClassLoader(myCL);
+ return thread;
+ }
+ };
+ service = Executors.newFixedThreadPool(2 * ncpu, threadFactory);
+ }
+ }
+
+ private ExecutorTools() {
+ // Static helper class, do not make instances.
+ }
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/lib/ObjectId.java b/org.eclipse.jgit/src/org/eclipse/jgit/lib/ObjectId.java
index 4d6dab4bb6..2f04751a1d 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/lib/ObjectId.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/lib/ObjectId.java
@@ -227,7 +227,7 @@ public class ObjectId extends AnyObjectId implements Serializable {
* the string to read from. Must be 40 characters long.
* @return the converted object id.
*/
- public static final ObjectId fromString(final String str) {
+ public static ObjectId fromString(final String str) {
if (str.length() != Constants.OBJECT_ID_STRING_LENGTH)
throw new IllegalArgumentException("Invalid id: " + str);
return fromHexString(Constants.encodeASCII(str), 0);
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/file/ObjectDirectoryPackParser.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/file/ObjectDirectoryPackParser.java
index d8df339d5e..91e50ad9d1 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/file/ObjectDirectoryPackParser.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/file/ObjectDirectoryPackParser.java
@@ -202,6 +202,11 @@ public class ObjectDirectoryPackParser extends PackParser {
}
@Override
+ protected void onPackHeader(long objectCount) throws IOException {
+ // Ignored, the count is not required.
+ }
+
+ @Override
protected void onBeginWholeObject(long streamPosition, int type,
long inflatedSize) throws IOException {
crc.reset();
@@ -232,6 +237,12 @@ public class ObjectDirectoryPackParser extends PackParser {
}
@Override
+ protected void onInflatedObjectData(PackedObjectInfo obj, int typeCode,
+ byte[] data) throws IOException {
+ // ObjectDirectory ignores this event.
+ }
+
+ @Override
protected void onObjectHeader(Source src, byte[] raw, int pos, int len)
throws IOException {
crc.update(raw, pos, len);
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackOutputStream.java b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackOutputStream.java
index a7925b52b7..d9002e70b0 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackOutputStream.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/storage/pack/PackOutputStream.java
@@ -231,7 +231,7 @@ public final class PackOutputStream extends OutputStream {
}
/** @return total number of bytes written since stream start. */
- long length() {
+ public long length() {
return count;
}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/transport/PackParser.java b/org.eclipse.jgit/src/org/eclipse/jgit/transport/PackParser.java
index f5e763bde9..6f0c6c3b36 100644
--- a/org.eclipse.jgit/src/org/eclipse/jgit/transport/PackParser.java
+++ b/org.eclipse.jgit/src/org/eclipse/jgit/transport/PackParser.java
@@ -580,6 +580,7 @@ public abstract class PackParser {
PackedObjectInfo oe;
oe = newInfo(tempObjectId, visit.delta, visit.parent.id);
oe.setOffset(visit.delta.position);
+ onInflatedObjectData(oe, type, visit.data);
addObjectAndTrack(oe);
visit.id = oe;
@@ -768,6 +769,8 @@ public abstract class PackParser {
JGitText.get().unsupportedPackVersion, vers));
objectCount = NB.decodeUInt32(buf, p + 8);
use(hdrln);
+
+ onPackHeader(objectCount);
}
private void readPackFooter() throws IOException {
@@ -875,6 +878,7 @@ public abstract class PackParser {
objectDigest.update(Constants.encodeASCII(sz));
objectDigest.update((byte) 0);
+ final byte[] data;
boolean checkContentLater = false;
if (type == Constants.OBJ_BLOB) {
byte[] readBuffer = buffer();
@@ -891,9 +895,10 @@ public abstract class PackParser {
tempObjectId.fromRaw(objectDigest.digest(), 0);
checkContentLater = isCheckObjectCollisions()
&& readCurs.has(tempObjectId);
+ data = null;
} else {
- final byte[] data = inflateAndReturn(Source.INPUT, sz);
+ data = inflateAndReturn(Source.INPUT, sz);
objectDigest.update(data);
tempObjectId.fromRaw(objectDigest.digest(), 0);
verifySafeObject(tempObjectId, type, data);
@@ -902,6 +907,8 @@ public abstract class PackParser {
PackedObjectInfo obj = newInfo(tempObjectId, null, null);
obj.setOffset(pos);
onEndWholeObject(obj);
+ if (data != null)
+ onInflatedObjectData(obj, type, data);
addObjectAndTrack(obj);
if (checkContentLater)
deferredCheckBlobs.add(obj);
@@ -1144,6 +1151,31 @@ public abstract class PackParser {
int len) throws IOException;
/**
+ * Invoked for commits, trees, tags, and small blobs.
+ *
+ * @param obj
+ * the object info, populated.
+ * @param typeCode
+ * the type of the object.
+ * @param data
+ * inflated data for the object.
+ * @throws IOException
+ * the object cannot be archived.
+ */
+ protected abstract void onInflatedObjectData(PackedObjectInfo obj,
+ int typeCode, byte[] data) throws IOException;
+
+ /**
+ * Provide the implementation with the original stream's pack header.
+ *
+ * @param objCnt
+ * number of objects expected in the stream.
+ * @throws IOException
+ * the implementation refuses to work with this many objects.
+ */
+ protected abstract void onPackHeader(long objCnt) throws IOException;
+
+ /**
* Provide the implementation with the original stream's pack footer.
*
* @param hash
diff --git a/pom.xml b/pom.xml
index 6d8c4050a2..5826ce4d40 100644
--- a/pom.xml
+++ b/pom.xml
@@ -405,15 +405,18 @@
<modules>
<module>org.eclipse.jgit</module>
+ <module>org.eclipse.jgit.storage.dht</module>
+ <module>org.eclipse.jgit.ant</module>
<module>org.eclipse.jgit.ui</module>
<module>org.eclipse.jgit.http.server</module>
<module>org.eclipse.jgit.iplog</module>
<module>org.eclipse.jgit.pgm</module>
<module>org.eclipse.jgit.junit</module>
- <module>org.eclipse.jgit.junit.http</module>
+ <module>org.eclipse.jgit.junit.http</module>
<module>org.eclipse.jgit.test</module>
<module>org.eclipse.jgit.http.test</module>
+ <module>org.eclipse.jgit.storage.dht.test</module>
</modules>
</project>