diff options
Diffstat (limited to 'org.eclipse.jgit.storage.dht')
97 files changed, 0 insertions, 18067 deletions
diff --git a/org.eclipse.jgit.storage.dht/.classpath b/org.eclipse.jgit.storage.dht/.classpath deleted file mode 100644 index d7edf529a2..0000000000 --- a/org.eclipse.jgit.storage.dht/.classpath +++ /dev/null @@ -1,8 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<classpath> - <classpathentry kind="src" path="src"/> - <classpathentry kind="src" path="resources"/> - <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/J2SE-1.5"/> - <classpathentry kind="con" path="org.eclipse.pde.core.requiredPlugins"/> - <classpathentry kind="output" path="bin"/> -</classpath> diff --git a/org.eclipse.jgit.storage.dht/.fbprefs b/org.eclipse.jgit.storage.dht/.fbprefs deleted file mode 100644 index 81a0767ff6..0000000000 --- a/org.eclipse.jgit.storage.dht/.fbprefs +++ /dev/null @@ -1,125 +0,0 @@ -#FindBugs User Preferences -#Mon May 04 16:24:13 PDT 2009 -detectorAppendingToAnObjectOutputStream=AppendingToAnObjectOutputStream|true -detectorBadAppletConstructor=BadAppletConstructor|false -detectorBadResultSetAccess=BadResultSetAccess|true -detectorBadSyntaxForRegularExpression=BadSyntaxForRegularExpression|true -detectorBadUseOfReturnValue=BadUseOfReturnValue|true -detectorBadlyOverriddenAdapter=BadlyOverriddenAdapter|true -detectorBooleanReturnNull=BooleanReturnNull|true -detectorCallToUnsupportedMethod=CallToUnsupportedMethod|true -detectorCheckImmutableAnnotation=CheckImmutableAnnotation|true -detectorCheckTypeQualifiers=CheckTypeQualifiers|true -detectorCloneIdiom=CloneIdiom|false -detectorComparatorIdiom=ComparatorIdiom|true -detectorConfusedInheritance=ConfusedInheritance|true -detectorConfusionBetweenInheritedAndOuterMethod=ConfusionBetweenInheritedAndOuterMethod|true -detectorCrossSiteScripting=CrossSiteScripting|true -detectorDoInsideDoPrivileged=DoInsideDoPrivileged|true -detectorDontCatchIllegalMonitorStateException=DontCatchIllegalMonitorStateException|true -detectorDontUseEnum=DontUseEnum|true -detectorDroppedException=DroppedException|true -detectorDumbMethodInvocations=DumbMethodInvocations|true -detectorDumbMethods=DumbMethods|true -detectorDuplicateBranches=DuplicateBranches|true -detectorEmptyZipFileEntry=EmptyZipFileEntry|true -detectorEqualsOperandShouldHaveClassCompatibleWithThis=EqualsOperandShouldHaveClassCompatibleWithThis|true -detectorFinalizerNullsFields=FinalizerNullsFields|true -detectorFindBadCast2=FindBadCast2|true -detectorFindBadForLoop=FindBadForLoop|true -detectorFindCircularDependencies=FindCircularDependencies|false -detectorFindDeadLocalStores=FindDeadLocalStores|true -detectorFindDoubleCheck=FindDoubleCheck|true -detectorFindEmptySynchronizedBlock=FindEmptySynchronizedBlock|true -detectorFindFieldSelfAssignment=FindFieldSelfAssignment|true -detectorFindFinalizeInvocations=FindFinalizeInvocations|true -detectorFindFloatEquality=FindFloatEquality|true -detectorFindHEmismatch=FindHEmismatch|true -detectorFindInconsistentSync2=FindInconsistentSync2|true -detectorFindJSR166LockMonitorenter=FindJSR166LockMonitorenter|true -detectorFindLocalSelfAssignment2=FindLocalSelfAssignment2|true -detectorFindMaskedFields=FindMaskedFields|true -detectorFindMismatchedWaitOrNotify=FindMismatchedWaitOrNotify|true -detectorFindNakedNotify=FindNakedNotify|true -detectorFindNonSerializableStoreIntoSession=FindNonSerializableStoreIntoSession|true -detectorFindNonSerializableValuePassedToWriteObject=FindNonSerializableValuePassedToWriteObject|true -detectorFindNonShortCircuit=FindNonShortCircuit|true -detectorFindNullDeref=FindNullDeref|true -detectorFindNullDerefsInvolvingNonShortCircuitEvaluation=FindNullDerefsInvolvingNonShortCircuitEvaluation|true -detectorFindOpenStream=FindOpenStream|true -detectorFindPuzzlers=FindPuzzlers|true -detectorFindRefComparison=FindRefComparison|true -detectorFindReturnRef=FindReturnRef|true -detectorFindRunInvocations=FindRunInvocations|true -detectorFindSelfComparison=FindSelfComparison|true -detectorFindSelfComparison2=FindSelfComparison2|true -detectorFindSleepWithLockHeld=FindSleepWithLockHeld|true -detectorFindSpinLoop=FindSpinLoop|true -detectorFindSqlInjection=FindSqlInjection|true -detectorFindTwoLockWait=FindTwoLockWait|true -detectorFindUncalledPrivateMethods=FindUncalledPrivateMethods|true -detectorFindUnconditionalWait=FindUnconditionalWait|true -detectorFindUninitializedGet=FindUninitializedGet|true -detectorFindUnrelatedTypesInGenericContainer=FindUnrelatedTypesInGenericContainer|true -detectorFindUnreleasedLock=FindUnreleasedLock|true -detectorFindUnsatisfiedObligation=FindUnsatisfiedObligation|true -detectorFindUnsyncGet=FindUnsyncGet|true -detectorFindUselessControlFlow=FindUselessControlFlow|true -detectorFormatStringChecker=FormatStringChecker|true -detectorHugeSharedStringConstants=HugeSharedStringConstants|true -detectorIDivResultCastToDouble=IDivResultCastToDouble|true -detectorIncompatMask=IncompatMask|true -detectorInconsistentAnnotations=InconsistentAnnotations|true -detectorInefficientMemberAccess=InefficientMemberAccess|false -detectorInefficientToArray=InefficientToArray|true -detectorInfiniteLoop=InfiniteLoop|true -detectorInfiniteRecursiveLoop=InfiniteRecursiveLoop|true -detectorInfiniteRecursiveLoop2=InfiniteRecursiveLoop2|false -detectorInheritanceUnsafeGetResource=InheritanceUnsafeGetResource|true -detectorInitializationChain=InitializationChain|true -detectorInstantiateStaticClass=InstantiateStaticClass|true -detectorInvalidJUnitTest=InvalidJUnitTest|true -detectorIteratorIdioms=IteratorIdioms|true -detectorLazyInit=LazyInit|true -detectorLoadOfKnownNullValue=LoadOfKnownNullValue|true -detectorMethodReturnCheck=MethodReturnCheck|true -detectorMultithreadedInstanceAccess=MultithreadedInstanceAccess|true -detectorMutableLock=MutableLock|true -detectorMutableStaticFields=MutableStaticFields|true -detectorNaming=Naming|true -detectorNumberConstructor=NumberConstructor|true -detectorOverridingEqualsNotSymmetrical=OverridingEqualsNotSymmetrical|true -detectorPreferZeroLengthArrays=PreferZeroLengthArrays|true -detectorPublicSemaphores=PublicSemaphores|false -detectorQuestionableBooleanAssignment=QuestionableBooleanAssignment|true -detectorReadReturnShouldBeChecked=ReadReturnShouldBeChecked|true -detectorRedundantInterfaces=RedundantInterfaces|true -detectorRepeatedConditionals=RepeatedConditionals|true -detectorRuntimeExceptionCapture=RuntimeExceptionCapture|true -detectorSerializableIdiom=SerializableIdiom|true -detectorStartInConstructor=StartInConstructor|true -detectorStaticCalendarDetector=StaticCalendarDetector|true -detectorStringConcatenation=StringConcatenation|true -detectorSuperfluousInstanceOf=SuperfluousInstanceOf|true -detectorSuspiciousThreadInterrupted=SuspiciousThreadInterrupted|true -detectorSwitchFallthrough=SwitchFallthrough|true -detectorSynchronizeAndNullCheckField=SynchronizeAndNullCheckField|true -detectorSynchronizeOnClassLiteralNotGetClass=SynchronizeOnClassLiteralNotGetClass|true -detectorSynchronizingOnContentsOfFieldToProtectField=SynchronizingOnContentsOfFieldToProtectField|true -detectorURLProblems=URLProblems|true -detectorUncallableMethodOfAnonymousClass=UncallableMethodOfAnonymousClass|true -detectorUnnecessaryMath=UnnecessaryMath|true -detectorUnreadFields=UnreadFields|true -detectorUseObjectEquals=UseObjectEquals|false -detectorUselessSubclassMethod=UselessSubclassMethod|false -detectorVarArgsProblems=VarArgsProblems|true -detectorVolatileUsage=VolatileUsage|true -detectorWaitInLoop=WaitInLoop|true -detectorWrongMapIterator=WrongMapIterator|true -detectorXMLFactoryBypass=XMLFactoryBypass|true -detector_threshold=2 -effort=default -excludefilter0=findBugs/FindBugsExcludeFilter.xml -filter_settings=Medium|BAD_PRACTICE,CORRECTNESS,MT_CORRECTNESS,PERFORMANCE,STYLE|false -filter_settings_neg=MALICIOUS_CODE,NOISE,I18N,SECURITY,EXPERIMENTAL| -run_at_full_build=true diff --git a/org.eclipse.jgit.storage.dht/.gitignore b/org.eclipse.jgit.storage.dht/.gitignore deleted file mode 100644 index 934e0e06ff..0000000000 --- a/org.eclipse.jgit.storage.dht/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/bin -/target diff --git a/org.eclipse.jgit.storage.dht/.project b/org.eclipse.jgit.storage.dht/.project deleted file mode 100644 index db60c5557f..0000000000 --- a/org.eclipse.jgit.storage.dht/.project +++ /dev/null @@ -1,34 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<projectDescription> - <name>org.eclipse.jgit.storage.dht</name> - <comment></comment> - <projects> - </projects> - <buildSpec> - <buildCommand> - <name>org.eclipse.jdt.core.javabuilder</name> - <arguments> - </arguments> - </buildCommand> - <buildCommand> - <name>org.eclipse.pde.ManifestBuilder</name> - <arguments> - </arguments> - </buildCommand> - <buildCommand> - <name>org.eclipse.pde.SchemaBuilder</name> - <arguments> - </arguments> - </buildCommand> - <buildCommand> - <name>org.eclipse.pde.api.tools.apiAnalysisBuilder</name> - <arguments> - </arguments> - </buildCommand> - </buildSpec> - <natures> - <nature>org.eclipse.jdt.core.javanature</nature> - <nature>org.eclipse.pde.PluginNature</nature> - <nature>org.eclipse.pde.api.tools.apiAnalysisNature</nature> - </natures> -</projectDescription> diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs deleted file mode 100644 index 66ac15c47c..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.resources.prefs +++ /dev/null @@ -1,3 +0,0 @@ -#Mon Aug 11 16:46:12 PDT 2008 -eclipse.preferences.version=1 -encoding/<project>=UTF-8 diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs deleted file mode 100644 index 006e07ede5..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.core.runtime.prefs +++ /dev/null @@ -1,3 +0,0 @@ -#Mon Mar 24 18:55:50 EDT 2008 -eclipse.preferences.version=1 -line.separator=\n diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs deleted file mode 100644 index 76557139ec..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.core.prefs +++ /dev/null @@ -1,334 +0,0 @@ -#Fri Oct 02 18:43:47 PDT 2009 -eclipse.preferences.version=1 -org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled -org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.5 -org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve -org.eclipse.jdt.core.compiler.compliance=1.5 -org.eclipse.jdt.core.compiler.debug.lineNumber=generate -org.eclipse.jdt.core.compiler.debug.localVariable=generate -org.eclipse.jdt.core.compiler.debug.sourceFile=generate -org.eclipse.jdt.core.compiler.doc.comment.support=enabled -org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=warning -org.eclipse.jdt.core.compiler.problem.assertIdentifier=error -org.eclipse.jdt.core.compiler.problem.autoboxing=warning -org.eclipse.jdt.core.compiler.problem.deprecation=warning -org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled -org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled -org.eclipse.jdt.core.compiler.problem.discouragedReference=warning -org.eclipse.jdt.core.compiler.problem.emptyStatement=warning -org.eclipse.jdt.core.compiler.problem.enumIdentifier=error -org.eclipse.jdt.core.compiler.problem.fallthroughCase=warning -org.eclipse.jdt.core.compiler.problem.fieldHiding=warning -org.eclipse.jdt.core.compiler.problem.finalParameterBound=warning -org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=error -org.eclipse.jdt.core.compiler.problem.forbiddenReference=error -org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error -org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=warning -org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=warning -org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=error -org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error -org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled -org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=enabled -org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=enabled -org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private -org.eclipse.jdt.core.compiler.problem.localVariableHiding=warning -org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error -org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore -org.eclipse.jdt.core.compiler.problem.missingJavadocComments=error -org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled -org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=protected -org.eclipse.jdt.core.compiler.problem.missingJavadocTagDescription=return_tag -org.eclipse.jdt.core.compiler.problem.missingJavadocTags=error -org.eclipse.jdt.core.compiler.problem.missingJavadocTagsOverriding=disabled -org.eclipse.jdt.core.compiler.problem.missingJavadocTagsVisibility=private -org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotation=ignore -org.eclipse.jdt.core.compiler.problem.missingSerialVersion=warning -org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error -org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error -org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore -org.eclipse.jdt.core.compiler.problem.nullReference=warning -org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=warning -org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore -org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=error -org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning -org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore -org.eclipse.jdt.core.compiler.problem.redundantNullCheck=warning -org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled -org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=error -org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled -org.eclipse.jdt.core.compiler.problem.typeParameterHiding=warning -org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=warning -org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=warning -org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=warning -org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=error -org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore -org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=warning -org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled -org.eclipse.jdt.core.compiler.problem.unusedImport=error -org.eclipse.jdt.core.compiler.problem.unusedLabel=error -org.eclipse.jdt.core.compiler.problem.unusedLocal=error -org.eclipse.jdt.core.compiler.problem.unusedParameter=warning -org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled -org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled -org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error -org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error -org.eclipse.jdt.core.compiler.source=1.5 -org.eclipse.jdt.core.formatter.align_type_members_on_columns=false -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16 -org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16 -org.eclipse.jdt.core.formatter.alignment_for_assignment=0 -org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16 -org.eclipse.jdt.core.formatter.alignment_for_compact_if=16 -org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80 -org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0 -org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16 -org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16 -org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16 -org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16 -org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16 -org.eclipse.jdt.core.formatter.blank_lines_after_imports=1 -org.eclipse.jdt.core.formatter.blank_lines_after_package=1 -org.eclipse.jdt.core.formatter.blank_lines_before_field=1 -org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0 -org.eclipse.jdt.core.formatter.blank_lines_before_imports=1 -org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1 -org.eclipse.jdt.core.formatter.blank_lines_before_method=1 -org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1 -org.eclipse.jdt.core.formatter.blank_lines_before_package=0 -org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1 -org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1 -org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line -org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line -org.eclipse.jdt.core.formatter.comment.clear_blank_lines=false -org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false -org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false -org.eclipse.jdt.core.formatter.comment.format_block_comments=true -org.eclipse.jdt.core.formatter.comment.format_comments=true -org.eclipse.jdt.core.formatter.comment.format_header=false -org.eclipse.jdt.core.formatter.comment.format_html=true -org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=true -org.eclipse.jdt.core.formatter.comment.format_line_comments=true -org.eclipse.jdt.core.formatter.comment.format_source_code=true -org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true -org.eclipse.jdt.core.formatter.comment.indent_root_tags=true -org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert -org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert -org.eclipse.jdt.core.formatter.comment.line_length=80 -org.eclipse.jdt.core.formatter.compact_else_if=true -org.eclipse.jdt.core.formatter.continuation_indentation=2 -org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2 -org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true -org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true -org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true -org.eclipse.jdt.core.formatter.indent_empty_lines=false -org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true -org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true -org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true -org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false -org.eclipse.jdt.core.formatter.indentation.size=4 -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation=insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_member=insert -org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert -org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert -org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert -org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert -org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert -org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert -org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert -org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert -org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert -org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert -org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert -org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert -org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert -org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert -org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert -org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert -org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert -org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false -org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false -org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false -org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false -org.eclipse.jdt.core.formatter.lineSplit=80 -org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false -org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false -org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0 -org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1 -org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true -org.eclipse.jdt.core.formatter.tabulation.char=tab -org.eclipse.jdt.core.formatter.tabulation.size=4 -org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false -org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs deleted file mode 100644 index 7b2cdca106..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.jdt.ui.prefs +++ /dev/null @@ -1,62 +0,0 @@ -#Thu Aug 26 12:30:58 CDT 2010 -eclipse.preferences.version=1 -editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true -formatter_profile=_JGit Format -formatter_settings_version=11 -org.eclipse.jdt.ui.ignorelowercasenames=true -org.eclipse.jdt.ui.importorder=java;javax;org;com; -org.eclipse.jdt.ui.ondemandthreshold=99 -org.eclipse.jdt.ui.staticondemandthreshold=99 -org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?><templates/> -sp_cleanup.add_default_serial_version_id=true -sp_cleanup.add_generated_serial_version_id=false -sp_cleanup.add_missing_annotations=false -sp_cleanup.add_missing_deprecated_annotations=true -sp_cleanup.add_missing_methods=false -sp_cleanup.add_missing_nls_tags=false -sp_cleanup.add_missing_override_annotations=true -sp_cleanup.add_missing_override_annotations_interface_methods=false -sp_cleanup.add_serial_version_id=false -sp_cleanup.always_use_blocks=true -sp_cleanup.always_use_parentheses_in_expressions=false -sp_cleanup.always_use_this_for_non_static_field_access=false -sp_cleanup.always_use_this_for_non_static_method_access=false -sp_cleanup.convert_to_enhanced_for_loop=false -sp_cleanup.correct_indentation=false -sp_cleanup.format_source_code=true -sp_cleanup.format_source_code_changes_only=true -sp_cleanup.make_local_variable_final=false -sp_cleanup.make_parameters_final=false -sp_cleanup.make_private_fields_final=true -sp_cleanup.make_type_abstract_if_missing_method=false -sp_cleanup.make_variable_declarations_final=false -sp_cleanup.never_use_blocks=false -sp_cleanup.never_use_parentheses_in_expressions=true -sp_cleanup.on_save_use_additional_actions=true -sp_cleanup.organize_imports=false -sp_cleanup.qualify_static_field_accesses_with_declaring_class=false -sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true -sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true -sp_cleanup.qualify_static_member_accesses_with_declaring_class=false -sp_cleanup.qualify_static_method_accesses_with_declaring_class=false -sp_cleanup.remove_private_constructors=true -sp_cleanup.remove_trailing_whitespaces=true -sp_cleanup.remove_trailing_whitespaces_all=true -sp_cleanup.remove_trailing_whitespaces_ignore_empty=false -sp_cleanup.remove_unnecessary_casts=false -sp_cleanup.remove_unnecessary_nls_tags=false -sp_cleanup.remove_unused_imports=false -sp_cleanup.remove_unused_local_variables=false -sp_cleanup.remove_unused_private_fields=true -sp_cleanup.remove_unused_private_members=false -sp_cleanup.remove_unused_private_methods=true -sp_cleanup.remove_unused_private_types=true -sp_cleanup.sort_members=false -sp_cleanup.sort_members_all=false -sp_cleanup.use_blocks=false -sp_cleanup.use_blocks_only_for_return_and_throw=false -sp_cleanup.use_parentheses_in_expressions=false -sp_cleanup.use_this_for_non_static_field_access=false -sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=true -sp_cleanup.use_this_for_non_static_method_access=false -sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=true diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.tasks.ui.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.tasks.ui.prefs deleted file mode 100644 index 823c0f56ae..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.tasks.ui.prefs +++ /dev/null @@ -1,4 +0,0 @@ -#Tue Jul 19 20:11:28 CEST 2011 -eclipse.preferences.version=1 -project.repository.kind=bugzilla -project.repository.url=https\://bugs.eclipse.org/bugs diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.team.ui.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.team.ui.prefs deleted file mode 100644 index 0cba949fb7..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.mylyn.team.ui.prefs +++ /dev/null @@ -1,3 +0,0 @@ -#Tue Jul 19 20:11:28 CEST 2011 -commit.comment.template=${task.description} \n\nBug\: ${task.key} -eclipse.preferences.version=1 diff --git a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.pde.api.tools.prefs b/org.eclipse.jgit.storage.dht/.settings/org.eclipse.pde.api.tools.prefs deleted file mode 100644 index cd148d9049..0000000000 --- a/org.eclipse.jgit.storage.dht/.settings/org.eclipse.pde.api.tools.prefs +++ /dev/null @@ -1,94 +0,0 @@ -#Tue Oct 18 00:52:01 CEST 2011 -ANNOTATION_ELEMENT_TYPE_ADDED_METHOD_WITHOUT_DEFAULT_VALUE=Error -ANNOTATION_ELEMENT_TYPE_CHANGED_TYPE_CONVERSION=Error -ANNOTATION_ELEMENT_TYPE_REMOVED_FIELD=Error -ANNOTATION_ELEMENT_TYPE_REMOVED_METHOD=Error -ANNOTATION_ELEMENT_TYPE_REMOVED_TYPE_MEMBER=Error -API_COMPONENT_ELEMENT_TYPE_REMOVED_API_TYPE=Error -API_COMPONENT_ELEMENT_TYPE_REMOVED_REEXPORTED_API_TYPE=Error -API_COMPONENT_ELEMENT_TYPE_REMOVED_REEXPORTED_TYPE=Error -API_COMPONENT_ELEMENT_TYPE_REMOVED_TYPE=Error -CLASS_ELEMENT_TYPE_ADDED_METHOD=Error -CLASS_ELEMENT_TYPE_ADDED_RESTRICTIONS=Error -CLASS_ELEMENT_TYPE_ADDED_TYPE_PARAMETER=Error -CLASS_ELEMENT_TYPE_CHANGED_CONTRACTED_SUPERINTERFACES_SET=Error -CLASS_ELEMENT_TYPE_CHANGED_DECREASE_ACCESS=Error -CLASS_ELEMENT_TYPE_CHANGED_NON_ABSTRACT_TO_ABSTRACT=Error -CLASS_ELEMENT_TYPE_CHANGED_NON_FINAL_TO_FINAL=Error -CLASS_ELEMENT_TYPE_CHANGED_TYPE_CONVERSION=Error -CLASS_ELEMENT_TYPE_REMOVED_CONSTRUCTOR=Error -CLASS_ELEMENT_TYPE_REMOVED_FIELD=Error -CLASS_ELEMENT_TYPE_REMOVED_METHOD=Error -CLASS_ELEMENT_TYPE_REMOVED_SUPERCLASS=Error -CLASS_ELEMENT_TYPE_REMOVED_TYPE_MEMBER=Error -CLASS_ELEMENT_TYPE_REMOVED_TYPE_PARAMETER=Error -CONSTRUCTOR_ELEMENT_TYPE_ADDED_TYPE_PARAMETER=Error -CONSTRUCTOR_ELEMENT_TYPE_CHANGED_DECREASE_ACCESS=Error -CONSTRUCTOR_ELEMENT_TYPE_CHANGED_VARARGS_TO_ARRAY=Error -CONSTRUCTOR_ELEMENT_TYPE_REMOVED_TYPE_PARAMETER=Error -ENUM_ELEMENT_TYPE_CHANGED_CONTRACTED_SUPERINTERFACES_SET=Error -ENUM_ELEMENT_TYPE_CHANGED_TYPE_CONVERSION=Error -ENUM_ELEMENT_TYPE_REMOVED_ENUM_CONSTANT=Error -ENUM_ELEMENT_TYPE_REMOVED_FIELD=Error -ENUM_ELEMENT_TYPE_REMOVED_METHOD=Error -ENUM_ELEMENT_TYPE_REMOVED_TYPE_MEMBER=Error -FIELD_ELEMENT_TYPE_ADDED_VALUE=Error -FIELD_ELEMENT_TYPE_CHANGED_DECREASE_ACCESS=Error -FIELD_ELEMENT_TYPE_CHANGED_FINAL_TO_NON_FINAL_STATIC_CONSTANT=Error -FIELD_ELEMENT_TYPE_CHANGED_NON_FINAL_TO_FINAL=Error -FIELD_ELEMENT_TYPE_CHANGED_NON_STATIC_TO_STATIC=Error -FIELD_ELEMENT_TYPE_CHANGED_STATIC_TO_NON_STATIC=Error -FIELD_ELEMENT_TYPE_CHANGED_TYPE=Error -FIELD_ELEMENT_TYPE_CHANGED_VALUE=Error -FIELD_ELEMENT_TYPE_REMOVED_TYPE_ARGUMENT=Error -FIELD_ELEMENT_TYPE_REMOVED_VALUE=Error -ILLEGAL_EXTEND=Warning -ILLEGAL_IMPLEMENT=Warning -ILLEGAL_INSTANTIATE=Warning -ILLEGAL_OVERRIDE=Warning -ILLEGAL_REFERENCE=Warning -INTERFACE_ELEMENT_TYPE_ADDED_FIELD=Error -INTERFACE_ELEMENT_TYPE_ADDED_METHOD=Error -INTERFACE_ELEMENT_TYPE_ADDED_RESTRICTIONS=Error -INTERFACE_ELEMENT_TYPE_ADDED_SUPER_INTERFACE_WITH_METHODS=Error -INTERFACE_ELEMENT_TYPE_ADDED_TYPE_PARAMETER=Error -INTERFACE_ELEMENT_TYPE_CHANGED_CONTRACTED_SUPERINTERFACES_SET=Error -INTERFACE_ELEMENT_TYPE_CHANGED_TYPE_CONVERSION=Error -INTERFACE_ELEMENT_TYPE_REMOVED_FIELD=Error -INTERFACE_ELEMENT_TYPE_REMOVED_METHOD=Error -INTERFACE_ELEMENT_TYPE_REMOVED_TYPE_MEMBER=Error -INTERFACE_ELEMENT_TYPE_REMOVED_TYPE_PARAMETER=Error -INVALID_JAVADOC_TAG=Ignore -INVALID_REFERENCE_IN_SYSTEM_LIBRARIES=Error -LEAK_EXTEND=Warning -LEAK_FIELD_DECL=Warning -LEAK_IMPLEMENT=Warning -LEAK_METHOD_PARAM=Warning -LEAK_METHOD_RETURN_TYPE=Warning -METHOD_ELEMENT_TYPE_ADDED_RESTRICTIONS=Error -METHOD_ELEMENT_TYPE_ADDED_TYPE_PARAMETER=Error -METHOD_ELEMENT_TYPE_CHANGED_DECREASE_ACCESS=Error -METHOD_ELEMENT_TYPE_CHANGED_NON_ABSTRACT_TO_ABSTRACT=Error -METHOD_ELEMENT_TYPE_CHANGED_NON_FINAL_TO_FINAL=Error -METHOD_ELEMENT_TYPE_CHANGED_NON_STATIC_TO_STATIC=Error -METHOD_ELEMENT_TYPE_CHANGED_STATIC_TO_NON_STATIC=Error -METHOD_ELEMENT_TYPE_CHANGED_VARARGS_TO_ARRAY=Error -METHOD_ELEMENT_TYPE_REMOVED_ANNOTATION_DEFAULT_VALUE=Error -METHOD_ELEMENT_TYPE_REMOVED_TYPE_PARAMETER=Error -TYPE_PARAMETER_ELEMENT_TYPE_ADDED_CLASS_BOUND=Error -TYPE_PARAMETER_ELEMENT_TYPE_ADDED_INTERFACE_BOUND=Error -TYPE_PARAMETER_ELEMENT_TYPE_CHANGED_CLASS_BOUND=Error -TYPE_PARAMETER_ELEMENT_TYPE_CHANGED_INTERFACE_BOUND=Error -TYPE_PARAMETER_ELEMENT_TYPE_REMOVED_CLASS_BOUND=Error -TYPE_PARAMETER_ELEMENT_TYPE_REMOVED_INTERFACE_BOUND=Error -UNUSED_PROBLEM_FILTERS=Warning -automatically_removed_unused_problem_filters=false -eclipse.preferences.version=1 -incompatible_api_component_version=Error -incompatible_api_component_version_include_major_without_breaking_change=Disabled -incompatible_api_component_version_include_minor_without_api_change=Disabled -invalid_since_tag_version=Error -malformed_since_tag=Error -missing_since_tag=Error -report_api_breakage_when_major_version_incremented=Disabled -report_resolution_errors_api_component=Warning diff --git a/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF b/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF deleted file mode 100644 index 2093849c22..0000000000 --- a/org.eclipse.jgit.storage.dht/META-INF/MANIFEST.MF +++ /dev/null @@ -1,26 +0,0 @@ -Manifest-Version: 1.0 -Bundle-ManifestVersion: 2 -Bundle-Name: %plugin_name -Bundle-SymbolicName: org.eclipse.jgit.storage.dht -Bundle-Version: 2.1.0.qualifier -Bundle-Localization: plugin -Bundle-Vendor: %provider_name -Export-Package: org.eclipse.jgit.storage.dht;version="2.1.0", - org.eclipse.jgit.storage.dht.spi;version="2.1.0", - org.eclipse.jgit.storage.dht.spi.cache;version="2.1.0", - org.eclipse.jgit.storage.dht.spi.util;version="2.1.0", - org.eclipse.jgit.storage.dht.spi.memory;version="2.1.0" -Bundle-ActivationPolicy: lazy -Bundle-RequiredExecutionEnvironment: J2SE-1.5 -Import-Package: com.google.protobuf;version="[2.4.0,2.5.0)", - org.eclipse.jgit.errors;version="[2.1.0,2.2.0)", - org.eclipse.jgit.generated.storage.dht.proto;version="[2.1.0,2.2.0)", - org.eclipse.jgit.lib;version="[2.1.0,2.2.0)", - org.eclipse.jgit.nls;version="[2.1.0,2.2.0)", - org.eclipse.jgit.revwalk;version="[2.1.0,2.2.0)", - org.eclipse.jgit.transport;version="[2.1.0,2.2.0)", - org.eclipse.jgit.treewalk;version="[2.1.0,2.2.0)", - org.eclipse.jgit.storage.file;version="[2.1.0,2.2.0)", - org.eclipse.jgit.storage.pack;version="[2.1.0,2.2.0)", - org.eclipse.jgit.util;version="[2.1.0,2.2.0)", - org.eclipse.jgit.util.io;version="[2.1.0,2.2.0)" diff --git a/org.eclipse.jgit.storage.dht/README b/org.eclipse.jgit.storage.dht/README deleted file mode 100644 index 1e07d377e7..0000000000 --- a/org.eclipse.jgit.storage.dht/README +++ /dev/null @@ -1,89 +0,0 @@ -JGit Storage on DHT -------------------- - -This implementation still has some pending issues: - -* DhtInserter must skip existing objects - - DirCache writes all trees to the ObjectInserter, letting the - inserter figure out which trees we already have, and which are new. - DhtInserter should buffer trees into a chunk, then before writing - the chunk to the DHT do a batch lookup to find the existing - ObjectInfo (if any). If any exist, the chunk should be compacted to - eliminate these objects, and if there is room in the chunk for more - objects, it should go back to the DhtInserter to be filled further - before flushing. - - This implies the DhtInserter needs to work on multiple chunks at - once, and may need to combine chunks together when there is more - than one partial chunk. - -* DhtPackParser must check for collisions - - Because ChunkCache blindly assumes any copy of an object is an OK - copy of an object, DhtPackParser needs to validate all new objects - at the end of its importing phase, before it links the objects into - the ObjectIndexTable. Most objects won't already exist, but some - may, and those that do must either be removed from their chunk, or - have their content byte-for-byte validated. - - Removal from a chunk just means deleting it from the chunk's local - index, and not writing it to the global ObjectIndexTable. This - creates a hole in the chunk which is wasted space, and that isn't - very useful. Fortunately objects that fit fully within one chunk - may be easy to inflate and double check, as they are small. Objects - that are big span multiple chunks, and the new chunks can simply be - deleted from the ChunkTable, leaving the original chunks. - - Deltas can be checked quickly by inflating the delta and checking - only the insertion point text, comparing that to the existing data - in the repository. Unfortunately the repository is likely to use a - different delta representation, which means at least one of them - will need to be fully inflated to check the delta against. - -* DhtPackParser should handle small-huge-small-huge - - Multiple chunks need to be open at once, in case we get a bad - pattern of small-object, huge-object, small-object, huge-object. In - this case the small-objects should be put together into the same - chunk, to prevent having too many tiny chunks. This is tricky to do - with OFS_DELTA. A long OFS_DELTA requires all prior chunks to be - closed out so we know their lengths. - -* RepresentationSelector performance bad on Cassandra - - The 1.8 million batch lookups done for linux-2.6 kills Cassandra, it - cannot handle this read load. - -* READ_REPAIR isn't fully accurate - - There are a lot of places where the generic DHT code should be - helping to validate the local replica is consistent, and where it is - not, help the underlying storage system to heal the local replica by - reading from a remote replica and putting it back to the local one. - Most of this should be handled in the DHT SPI layer, but the generic - DHT code should be giving better hints during get() method calls. - -* LOCAL / WORLD writes - - Many writes should be done locally first, before they replicate to - the other replicas, as they might be backed out on an abort. - - Likewise some writes must take place across sufficient replicas to - ensure the write is not lost... and this may include ensuring that - earlier local-only writes have actually been committed to all - replicas. This committing to replicas might be happening in the - background automatically after the local write (e.g. Cassandra will - start to send writes made by one node to other nodes, but doesn't - promise they finish). But parts of the code may need to force this - replication to complete before the higher level git operation ends. - -* Forks/alternates - - Forking is common, but we should avoid duplicating content into the - fork if the base repository has it. This requires some sort of - change to the key structure so that chunks are owned by an object - pool, and the object pool owns the repositories that use it. GC - proceeds at the object pool level, rather than the repository level, - but might want to take some of the reference namespace into account - to avoid placing forked less-common content near primary content. diff --git a/org.eclipse.jgit.storage.dht/about.html b/org.eclipse.jgit.storage.dht/about.html deleted file mode 100644 index 01a2671875..0000000000 --- a/org.eclipse.jgit.storage.dht/about.html +++ /dev/null @@ -1,59 +0,0 @@ -<?xml version="1.0" encoding="ISO-8859-1" ?> -<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> -<html xmlns="http://www.w3.org/1999/xhtml"> - -<head> -<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" /> -<title>Eclipse Distribution License - Version 1.0</title> -<style type="text/css"> - body { - size: 8.5in 11.0in; - margin: 0.25in 0.5in 0.25in 0.5in; - tab-interval: 0.5in; - } - p { - margin-left: auto; - margin-top: 0.5em; - margin-bottom: 0.5em; - } - p.list { - margin-left: 0.5in; - margin-top: 0.05em; - margin-bottom: 0.05em; - } - </style> - -</head> - -<body lang="EN-US"> - -<p><b>Eclipse Distribution License - v 1.0</b></p> - -<p>Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. </p> - -<p>All rights reserved.</p> -<p>Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: -<ul><li>Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. </li> -<li>Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. </li> -<li>Neither the name of the Eclipse Foundation, Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. </li></ul> -</p> -<p>THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.</p> - -</body> - -</html> diff --git a/org.eclipse.jgit.storage.dht/build.properties b/org.eclipse.jgit.storage.dht/build.properties deleted file mode 100644 index b67aba1a41..0000000000 --- a/org.eclipse.jgit.storage.dht/build.properties +++ /dev/null @@ -1,6 +0,0 @@ -source.. = src/ -output.. = bin/ -bin.includes = META-INF/,\ - .,\ - plugin.properties,\ - about.html diff --git a/org.eclipse.jgit.storage.dht/plugin.properties b/org.eclipse.jgit.storage.dht/plugin.properties deleted file mode 100644 index aff758ffea..0000000000 --- a/org.eclipse.jgit.storage.dht/plugin.properties +++ /dev/null @@ -1,2 +0,0 @@ -plugin_name=JGit DHT Storage -provider_name=Eclipse.org diff --git a/org.eclipse.jgit.storage.dht/pom.xml b/org.eclipse.jgit.storage.dht/pom.xml deleted file mode 100644 index e54b1229a3..0000000000 --- a/org.eclipse.jgit.storage.dht/pom.xml +++ /dev/null @@ -1,181 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!-- - Copyright (C) 2011, Google Inc. - and other copyright owners as documented in the project's IP log. - - This program and the accompanying materials are made available - under the terms of the Eclipse Distribution License v1.0 which - accompanies this distribution, is reproduced below, and is - available at http://www.eclipse.org/org/documents/edl-v10.php - - All rights reserved. - - Redistribution and use in source and binary forms, with or - without modification, are permitted provided that the following - conditions are met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - - Neither the name of the Eclipse Foundation, Inc. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior - written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---> - -<project xmlns="http://maven.apache.org/POM/4.0.0" - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" - xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> - <modelVersion>4.0.0</modelVersion> - - <parent> - <groupId>org.eclipse.jgit</groupId> - <artifactId>org.eclipse.jgit-parent</artifactId> - <version>2.1.0-SNAPSHOT</version> - </parent> - - <artifactId>org.eclipse.jgit.storage.dht</artifactId> - <name>JGit - DHT Storage</name> - - <description> - Git repository storage on a distributed hashtable - </description> - - <properties> - <translate-qualifier/> - </properties> - - <dependencies> - <dependency> - <groupId>org.eclipse.jgit</groupId> - <artifactId>org.eclipse.jgit</artifactId> - <version>${project.version}</version> - </dependency> - - <dependency> - <groupId>org.eclipse.jgit</groupId> - <artifactId>org.eclipse.jgit.generated.storage.dht.proto</artifactId> - <version>${project.version}</version> - </dependency> - </dependencies> - - <build> - <sourceDirectory>src/</sourceDirectory> - - <resources> - <resource> - <directory>.</directory> - <includes> - <include>plugin.properties</include> - <include>about.html</include> - </includes> - </resource> - <resource> - <directory>resources/</directory> - </resource> - </resources> - - <plugins> - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-source-plugin</artifactId> - <inherited>true</inherited> - <executions> - <execution> - <id>attach-sources</id> - <phase>process-classes</phase> - <goals> - <goal>jar</goal> - </goals> - <configuration> - <archive> - <manifestFile>${source-bundle-manifest}</manifestFile> - </archive> - </configuration> - </execution> - </executions> - </plugin> - - <plugin> - <artifactId>maven-jar-plugin</artifactId> - <configuration> - <archive> - <manifestFile>${bundle-manifest}</manifestFile> - </archive> - </configuration> - </plugin> - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>findbugs-maven-plugin</artifactId> - <configuration> - <findbugsXmlOutput>true</findbugsXmlOutput> - <failOnError>false</failOnError> - </configuration> - <executions> - <execution> - <goals> - <goal>check</goal> - </goals> - </execution> - </executions> - </plugin> - - <plugin> - <groupId>org.apache.maven.plugins</groupId> - <artifactId>maven-pmd-plugin</artifactId> - <configuration> - <sourceEncoding>utf-8</sourceEncoding> - <minimumTokens>100</minimumTokens> - <targetJdk>1.5</targetJdk> - <format>xml</format> - <failOnViolation>false</failOnViolation> - </configuration> - <executions> - <execution> - <goals> - <goal>cpd-check</goal> - </goals> - </execution> - </executions> - </plugin> - - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>clirr-maven-plugin</artifactId> - </plugin> - </plugins> - </build> - - <reporting> - <plugins> - <plugin> - <groupId>org.codehaus.mojo</groupId> - <artifactId>clirr-maven-plugin</artifactId> - <version>${clirr-version}</version> - <configuration> - <comparisonVersion>${jgit-last-release-version}</comparisonVersion> - <minSeverity>info</minSeverity> - </configuration> - </plugin> - </plugins> - </reporting> -</project> diff --git a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties deleted file mode 100644 index d53147abf6..0000000000 --- a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/DhtText.properties +++ /dev/null @@ -1,32 +0,0 @@ -cannotInsertObject=Cannot insert any objects into a ChunkWriter -corruptChunk=Chunk {0} is corrupt and does not match its name -corruptCompressedObject=Corrupt deflate stream in {0} at {1} -cycleInDeltaChain=Cycle in delta chain {0} offset {1} -databaseRequired=Database is required -expectedObjectSizeDuringCopyAsIs=Object {0} has size of 0 -invalidCachedPackInfo=Invalid CachedPackInfo on {0} {1} -invalidChunkKey=Invalid ChunkKey {0} -invalidChunkMeta=Invalid ChunkMeta on {0} -invalidObjectIndexKey=Invalid ObjectIndexKey {0} -invalidObjectInfo=Invalid ObjectInfo for {0} from {1} -invalidRefData=Invalid RefData on {0} -missingChunk=Missing {0} -missingLongOffsetBase=Missing base for offset -{1} in meta of {0} -nameRequired=Name or key is required -noSavedTypeForBase=No type information for base object at {0} -notTimeUnit=Invalid time unit value: {0}={1} -objectListSelectingName=Selecting list name -objectListCountingFrom=Counting objects in {0} -objectTypeUnknown=unknown -packParserInvalidPointer=Invalid pointer inside pack parser: {0}, chunk {1}, offset {2}. -packParserRollbackFailed=DhtPackParser rollback failed -recordingObjects=Recording objects -repositoryAlreadyExists=Repository {0} already exists -repositoryMustBeBare=Only bare repositories are supported -shortCompressedObject=Short deflate stream in {0} at {1} -timeoutChunkMeta=Timeout waiting for ChunkMeta -timeoutLocatingRepository=Timeout locating {0} -tooManyObjectsInPack={0} is too many objects in a pack file -unsupportedChunkIndex=Unsupported index version {0} in {1} -unsupportedObjectTypeInChunk=Unknown object type {0} in {1} at {2} -wrongChunkPositionInCachedPack=Cached pack {0} put chunk {1} at {2} but delta in {3} expects it at {4} diff --git a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html b/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html deleted file mode 100644 index c2c8b4c245..0000000000 --- a/org.eclipse.jgit.storage.dht/resources/org/eclipse/jgit/storage/dht/dht-schema.html +++ /dev/null @@ -1,1151 +0,0 @@ -<html> -<head> -<title>Git on DHT Schema</title> - -<style type='text/css'> -body { font-size: 10pt; } -h1 { font-size: 16pt; } -h2 { font-size: 12pt; } -h3 { font-size: 10pt; } - -body { - margin-left: 8em; - margin-right: 8em; -} -h1 { margin-left: -3em; } -h2 { margin-left: -2em; } -h3 { margin-left: -1em; } -hr { margin-left: -4em; margin-right: -4em; } - -.coltoc { - font-size: 8pt; - font-family: monospace; -} - -.rowkey { - margin-left: 1em; - padding-top: 0.2em; - padding-left: 1em; - padding-right: 1em; - width: 54em; - border: 1px dotted red; - background-color: #efefef; - white-space: nowrap; -} -.rowkey .header { - font-weight: bold; - padding-right: 1em; -} -.rowkey .var { - font-style: italic; - font-family: monospace; -} -.rowkey .lit { - font-weight: bold; - font-family: monospace; -} -.rowkey .example { - font-family: monospace; -} -.rowkey p { - white-space: normal; -} - -.colfamily { - margin-top: 0.5em; - padding-top: 0.2em; - padding-left: 1em; - padding-right: 1em; - width: 55em; - border: 1px dotted blue; - background-color: #efefef; - white-space: nowrap; -} -.colfamily .header { - font-weight: bold; - padding-right: 1em; -} -.colfamily .var { - font-style: italic; - font-family: monospace; -} -.colfamily .lit { - font-family: monospace; -} -.colfamily .example { - font-family: monospace; -} -.colfamily p { - white-space: normal; -} - -.summary_table { - border-collapse: collapse; - border-spacing: 0; -} -.summary_table .desc { - font-size: 8pt; - white-space: nowrap; - text-align: right; - width: 20em; -} -.summary_table td { - border: 1px dotted lightgray; - padding-top: 2px; - padding-bottom: 2px; - padding-left: 5px; - padding-right: 5px; - vertical-align: top; -} -.summary_table tr.no_border td { - border: none; -} -</style> -</head> -<body> - -<h1>Git on DHT Schema</h1> - -<p>Storing Git repositories on a Distributed Hash Table (DHT) may -improve scaling for large traffic, but also simplifies management when -there are many small repositories.</p> - -<h2>Table of Contents</h2> -<ul> - <li><a href="#concepts">Concepts</a></li> - <li><a href="#summary">Summary</a></li> - <li><a href="#security">Data Security</a></li> - - <li>Tables: - <ul> - <li><a href="#REPOSITORY_INDEX">Table REPOSITORY_INDEX</a> - ( - <a href="#REPOSITORY_INDEX.id" class="toccol">id</a> - )</li> - - <li><a href="#REPOSITORY">Table REPOSITORY</a> - ( - <a href="#REPOSITORY.chunk-info" class="toccol">chunk-info</a>, - <a href="#REPOSITORY.cached-pack" class="toccol">cached-pack</a> - )</li> - - <li><a href="#REF">Table REF</a> - ( - <a href="#REF.target" class="toccol">target</a> - )</li> - - <li><a href="#OBJECT_INDEX">Table OBJECT_INDEX</a> - ( - <a href="#OBJECT_INDEX.info" class="toccol">info</a> - )</li> - - <li><a href="#CHUNK">Table CHUNK</a> - ( - <a href="#CHUNK.chunk" class="toccol">chunk</a>, - <a href="#CHUNK.index" class="toccol">index</a>, - <a href="#CHUNK.meta" class="toccol">meta</a> - )</li> - </ul> - </li> - - <li>Protocol Messages: - <ul> - <li><a href="#message_RefData">RefData</a></li> - <li><a href="#message_ObjectInfo">ObjectInfo</a></li> - <li><a href="#message_ChunkInfo">ChunkInfo</a></li> - <li><a href="#message_ChunkMeta">ChunkMeta</a></li> - <li><a href="#message_CachedPackInfo">CachedPackInfo</a></li> - </ul> - </li> -</ul> - -<a name="concepts"><h2>Concepts</h2></a> - -<p><i>Git Repository</i>: Stores the version control history for a -single project. Each repository is a directed acyclic graph (DAG) -composed of objects. Revision history for a project is described by a -commit object pointing to the complete set of files that make up that -version of the project, and a pointer to the commit that came -immediately before it. Repositories also contain references, -associating a human readable branch or tag name to a specific commit -object. Tommi Virtanen has a -<a href="http://eagain.net/articles/git-for-computer-scientists/">more -detailed description of the Git DAG</a>.</p> - -<p><i>Object</i>: Git objects are named by applying the SHA-1 hash -algorithm to their contents. There are 4 object types: commit, tree, -blob, tag. Objects are typically stored deflated using libz deflate, -but may also be delta compressed against another similar object, -further reducing the storage required. The big factor for Git -repository size is usually object count, e.g. the linux-2.6 repository -contains 1.8 million objects.</p> - -<p><i>Reference</i>: Associates a human readable symbolic name, such -as <code>refs/heads/master</code> to a specific Git object, usually a -commit or tag. References are updated to point to the most recent -object whenever changes are committed to the repository.</p> - -<p><i>Git Pack File</i>: A container stream holding many objects in a -highly compressed format. On the local filesystem, Git uses pack files -to reduce both inode and space usage by combining millions of objects -into a single data stream. On the network, Git uses pack files as the -basic network protocol to transport objects from one system's -repository to another.</p> - -<p><i>Garbage Collection</i>: Scanning the Git object graph to locate -objects that are reachable, and others that are unreachable. Git also -generally performs data recompression during this task to produce more -optimal deltas between objects, reducing overall disk usage and data -transfer sizes. This is independent of any GC that may be performed by -the DHT to clean up old cells.</p> - -<p>The basic storage strategy employed by this schema is to break a -standard Git pack file into chunks, approximately 1 MiB in size. Each -chunk is stored as one row in the <a href="#CHUNK">CHUNK</a> table. -During reading, chunks are paged into the application on demand, but -may also be prefetched using prefetch hints. Rules are used to break -the standard pack into chunks, these rules help to improve reference -locality and reduce the number of chunk loads required to service -common operations. In a nutshell, the DHT is used as a virtual memory -system for pages about 1 MiB in size.</p> - -<a name="summary"><h2>Summary</h2></a> - -<p>The schema consists of a handful of tables. Size estimates are -given for one copy of the linux-2.6 Git repository, a relative tortue -test case that contains 1.8 million objects and is 425 MiB when stored -on the local filesystem. All sizes are before any replication made by -the DHT, or its underlying storage system.</p> - -<table style='margin-left: 2em' class='summary_table'> - <tr> - <th>Table</th> - <th>Rows</th> - <th>Cells/Row</th> - <th>Bytes</th> - <th>Bytes/Row</th> - </tr> - - <tr> - <td><a href="#REPOSITORY_INDEX">REPOSITORY_INDEX</a> - <div class='desc'>Map host+path to surrogate key.</div></td> - <td align='right'>1</td> - <td align='right'>1</td> - <td align='right'>< 100 bytes</td> - <td align='right'>< 100 bytes</td> - </tr> - - <tr> - <td><a href="#REPOSITORY">REPOSITORY</a> - <div class='desc'>Accounting and replica management.</div></td> - <td align='right'>1</td> - <td align='right'>403</td> - <td align='right'>65 KiB</td> - <td align='right'>65 KiB</td> - </tr> - - <tr> - <td><a href="#REF">REF</a> - <div class='desc'>Bind branch/tag name to Git object.</div></td> - <td align='right'>211</td> - <td align='right'>1</td> - <td align='right'>14 KiB</td> - <td align='right'>67 bytes</td> - </tr> - - <tr> - <td><a href="#OBJECT_INDEX">OBJECT_INDEX</a> - <div class='desc'>Locate Git object by SHA-1 name.</div></td> - <td align='right'>1,861,833</td> - <td align='right'>1</td> - <td align='right'>154 MiB</td> - <td align='right'>87 bytes</td> - </tr> - - <tr> - <td><a href="#CHUNK">CHUNK</a> - <div class='desc'>Complete Git object storage.</div></td> - <td align='right'>402</td> - <td align='right'>3</td> - <td align='right'>417 MiB</td> - <td align='right'>~ 1 MiB</td> - </tr> - - <tr class='no_border'> - <td align='right'><i>Total</i></td> - <td align='right'>1,862,448</td> - <td align='right'></td> - <td align='right'>571 MiB</td> - <td align='right'></td> - </tr> -</table> - -<a name="security"><h2>Data Security</h2></a> - -<p>If data encryption is necessary to protect file contents, the <a -href="#CHUNK.chunk">CHUNK.chunk</a> column can be encrypted with a -block cipher such as AES. This column contains the revision commit -messages, file paths, and file contents. By encrypting one column, the -majority of the repository data is secured. As each cell value is -about 1 MiB and contains a trailing 4 bytes of random data, an ECB -mode of operation may be sufficient. Because the cells are already -very highly compressed using the Git data compression algorithms, -there is no increase in disk usage due to encryption.</p> - -<p>Branch and tag names (<a href="#REF">REF</a> row keys) are not -encrypted. If these need to be secured the portion after the ':' would -need to be encrypted with a block cipher. However these strings are -very short and very common (HEAD, refs/heads/master, refs/tags/v1.0), -making encryption difficult. A variation on the schema might move all -rows for a repository into a single protocol messsage, then encrypt -the protobuf into a single cell. Unfortunately this strategy has a -high update cost, and references change frequently.</p> - -<p>Object SHA-1 names (<a href="#OBJECT_INDEX">OBJECT_INDEX</a> row -keys and <a href="#CHUNK.index">CHUNK.index</a> values) are not -encrypted. This allows a reader to determine if a repository contains -a specific revision, but does not allow them to inspect the contents -of the revision. The CHUNK.index column could also be encrypted with a -block cipher when CHUNK.chunk is encrypted (see above), however the -OBJECT_INDEX table row keys cannot be encrypted if abbrevation -expansion is to be supported for end-users of the repository. The row -keys must be unencrypted as abbreviation resolution is performed by a -prefix range scan over the keys.</p> - -<p>The remaining tables and columns contain only statistics (e.g. -object counts or cell sizes), or internal surrogate keys -(repository_id, chunk_key) and do not require encryption.</p> - -<hr /> -<a name="REPOSITORY_INDEX"><h2>Table REPOSITORY_INDEX</h2></a> - -<p>Maps a repository name, as presented in the URL by an end-user or -client application, into its internal repository_id value. This -mapping allows the repository name to be quickly modified (e.g. -renamed) without needing to update the larger data rows of the -repository.</p> - -<p>The exact semantics of the repository_name format is left as a -deployment decision, but DNS hostname, '/', repository name would be -one common usage.</p> - -<h3>Row Key</h3> -<div class='rowkey'> - <div> - <span class='header'>Row Key:</span> - <span class='var'>repository_name</span> - </div> - - <p>Human readable name of the repository, typically derived from the - HTTP <code>Host</code> header and path in the URL.</p> - - <p>Examples:</p> - <ul> - <li><span class='example'>com.example.git/pub/git/foo.git</span></li> - <li><span class='example'>com.example.git/~user/mystuff.git</span></li> - </ul> -</div> - -<h3>Columns</h3> -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="REPOSITORY_INDEX.id"><span class='lit'>id:</span></a> - </div> - - <p>The repository_id, as an 8-digit hex ASCII string.</p> -</div> - -<h3>Size Estimate</h3> - -<p>Less than 100,000 rows. More likely estimate is 1,000 rows. -Total disk usage under 512 KiB, assuming 1,000 names and 256 -characters per name.</p> - -<h3>Updates</h3> - -<p>Only on repository creation or rename, which is infrequent (<10 -rows/month). Updates are performed in a row-level transaction, to -ensure a name is either assigned uniquely, or fails.</p> - -<h3>Reads</h3> - -<p>Reads are tried first against memcache, then against the DHT if the -entry did not exist in memcache. Successful reads against the DHT are -put back into memcache in the background.</p> - -<a name="REPOSITORY"><h2>Table REPOSITORY</h2></a> - -<p>Tracks top-level information about each repository.</p> - -<h3>Row Key</h3> -<div class='rowkey'> - <div> - <span class='header'>Row Key:</span> - <span class='var'>repository_id</span> - </div> - - <p>The repository_id, as an 8-digit hex ASCII string.</p> -</div> - -<p>Typically this is assigned sequentially, then has the bits reversed -to evenly spread repositories throughout the DHT. For example the -first repository is <code>80000000</code>, and the second is -<code>40000000</code>.</p> - -<h3>Columns</h3> -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="REPOSITORY.chunk-info"><span class='lit'>chunk-info:</span></a> - <span class='var'>chunk_key[short]</span> - </div> - - <p>Cell value is the protocol message <a - href="#message_ChunkInfo">ChunkInfo</a> describing the chunk's - contents. Most of the message's fields are only useful for quota - accounting and reporting.</p> -</div> - -<p>This column exists to track all of the chunks that make up a -repository's object set. Garbage collection and quota accounting tasks -can primarily drive off this column, rather than scanning the much -larger <a href="#CHUNK">CHUNK</a> table with a regular expression on -the chunk row key.</p> - -<p>As each chunk averages 1 MiB in size, the linux-2.6 repository -(at 373 MiB) has about 400 chunks and thus about 400 chunk-info -cells. The chromium repository (at 1 GiB) has about 1000 chunk-info -cells. It would not be uncommon to have 2000 chunk-info cells.</p> - -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="REPOSITORY.cached-pack"><span class='lit'>cached-pack:</span></a> - <span class='var'>NNNNx38</span> - <span class='lit'>.</span> - <span class='var'>VVVVx38</span> - </div> - - <p>Variables:</p> - <ul> - <li><span class='var'>NNNNx38</span> = 40 hex digit name of the cached pack</li> - <li><span class='var'>VVVVx38</span> = 40 hex digit version of the cached pack</li> - </ul> - - <p>Examples:</p> - <ul> - <li><span class='example'>4e32fb97103981e7dd53dcc786640fa4fdb444b8.8975104a03d22e54f7060502e687599d1a2c2516</span></li> - </ul> - - <p>Cell value is the protocol message <a - href="#message_CachedPackInfo">CachedPackInfo</a> describing the - chunks that make up a cached pack.</p> -</div> - -<p>The <code>cached-pack</code> column family describes large lists of -chunks that when combined together in a specific order create a valid -Git pack file directly streamable to a client. This avoids needing to -enumerate and pack the entire repository on each request.</p> - -<p>The cached-pack name (NNNNx38 above) is the SHA-1 of the objects -contained within the pack, in binary, sorted. This is the standard -naming convention for pack files on the local filesystem. The version -(VVVVx38 above) is the SHA-1 of the chunk keys, sorted. The version -makes the cached-pack cell unique, if any single bit in the compressed -data is modified a different version will be generated, and a -different cell will be used to describe the alternate version of the -same data. The version is necessary to prevent repacks of the same -object set (but with different compression settings or results) from -stepping on active readers.</p> - -<h2>Size Estimate</h2> - -<p>1 row per repository (~1,000 repositories), however the majority of -the storage cost is in the <code>chunk-info</code> column family, -which can have more than 2000 cells per repository.</p> - -<p>Each <code>chunk-info</code> cell is on average 147 bytes. For a -large repository like chromium.git (over 1000 chunks) this is only 147 -KiB for the entire row.</p> - -<p>Each <code>cached-pack</code> cell is on average 5350 bytes. Most -repositories have 1 of these cells, 2 while the repository is being -repacked on the server side to update the cached-pack data.</p> - -<h2>Updates</h2> - -<p>Information about each ~1 MiB chunk of pack data received over the -network is stored as a unique column in the <code>chunk-info</code> -column family.</p> - -<p>Most pushes are at least 2 chunks (commit, tree), with 50 pushes -per repository per day being possible (50,000 new cells/day).</p> - -<p><b>TODO:</b> Average push rates?</p> - -<h2>Reads</h2> - -<p><i>Serving clients:</i> Read all cells in the -<code>cached-pack</code> column family, typically only 1-5 cells. The -cells are cached in memcache and read from there first.</p> - -<p><i>Garbage collection:</i> Read all cells in the -<code>chunk-info</code> column family to determine which chunks are -owned by this repository, without scanning the <a href="#CHUNK">CHUNK</a> table. -Delete <code>chunk-info</code> after the corresponding <a href="#CHUNK">CHUNK</a> -row has been deleted. Unchanged chunks have their info left alone.</p> - -<a name="REF"><h2>Table REF</h2></a> - -<p>Associates a human readable branch (e.g. -<code>refs/heads/master</code>) or tag (e.g. -<code>refs/tags/v1.0</code>) name to the Git -object that represents that current state of -the repository.</p> - -<h3>Row Key</h3> -<div class='rowkey'> - <div> - <span class='header'>Row Key:</span> - <span class='var'>repository_id</span> - <span class='lit'>:</span> - <span class='var'>ref_name</span> - </div> - - <p>Variables:</p> - <ul> - <li><span class='var'>repository_id</span> = Repository owning the reference (see above)</li> - <li><span class='var'>ref_name</span> = Name of the reference, UTF-8 string</li> - </ul> - - <p>Examples:</p> - <ul> - <li><span class='example'>80000000:HEAD</span></li> - <li><span class='example'>80000000:refs/heads/master</span></li> - <br /> - <li><span class='example'>40000000:HEAD</span></li> - <li><span class='example'>40000000:refs/heads/master</span></li> - </ul> -</div> - -<p>The separator <code>:</code> used in the row key was chosen because -this character is not permitted in a Git reference name.</p> - -<h3>Columns</h3> -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="REF.target"><span class='lit'>target:</span></a> - </div> - - <p>Cell value is the protocol message - <a href="#message_RefData">RefData</a> describing the - current SHA-1 the reference points to, and the chunk - it was last observed in. The chunk hint allows skipping - a read of <a href="#OBJECT_INDEX">OBJECT_INDEX</a>.</p> - - <p>Several versions (5) are stored for emergency rollbacks. - Additional versions beyond 5 are cleaned up during table garbage - collection as managed by the DHT's cell GC.</p> -</div> - -<h3>Size Estimate</h3> - -<p><i>Normal Git usage:</i> ~10 branches per repository, ~200 tags. -For 1,000 repositories, about 200,000 rows total. Average row size is -about 240 bytes/row before compression (67 after), or 48M total.</p> - -<p><i>Gerrit Code Review usage:</i> More than 300 new rows per day. -Each snapshot of each change under review is one reference.</p> - -<h3>Updates</h3> - -<p>Writes are performed by doing an atomic compare-and-swap (through a -transaction), changing the RefData protocol buffer.</p> - -<h3>Reads</h3> - -<p>Reads perform prefix scan for all rows starting with -<code>repository_id:</code>. Plans exist to cache these reads within a -custom service, avoiding most DHT queries.</p> - -<a name="OBJECT_INDEX"><h2>Table OBJECT_INDEX</h2></a> - -<p>The Git network protocol has clients sending object SHA-1s to the -server, with no additional context or information. End-users may also -type a SHA-1 into a web search box. This table provides a mapping of -the object SHA-1 to which chunk(s) store the object's data. The table -is sometimes also called the 'global index', since it names where -every single object is stored.</p> - -<h3>Row Key</h3> -<div class='rowkey'> - <div> - <span class='header'>Row Key:</span> - <span class='var'>NN</span> - <span class='lit'>.</span> - <span class='var'>repository_id</span> - <span class='lit'>.</span> - <span class='var'>NNx40</span> - </div> - - <p>Variables:</p> - <ul> - <li><span class='var'>NN</span> = First 2 hex digits of object SHA-1</li> - <li><span class='var'>repository_id</span> = Repository owning the object (see above)</li> - <li><span class='var'>NNx40</span> = Complete object SHA-1 name, in hex</li> - </ul> - - <p>Examples:</p> - <ul> - <li><span class='example'>2b.80000000.2b5c9037c81c38b3b9abc29a3a87a4abcd665ed4</span></li> - <li><span class='example'>8f.40000000.8f270a441569b127cc4af8a6ef601d94d9490efb</span></li> - </ul> -</div> - -<p>The first 2 hex digits (<code>NN</code>) distribute object keys -within the same repository around the DHT keyspace, preventing a busy -repository from creating too much of a hot-spot within the DHT. To -simplify key generation, these 2 digits are repeated after the -repository_id, as part of the 40 hex digit object name.</p> - -<p>Keys must be clustered by repository_id to support extending -abbreviations. End-users may supply an abbreviated SHA-1 of 4 or more -digits (up to 39) and ask the server to complete them to a full 40 -digit SHA-1 if the server has the relevant object within the -repository's object set.</p> - -<p>A schema variant that did not include the repository_id as part of -the row key was considered, but discarded because completing a short -4-6 digit abbreviated SHA-1 would be impractical once there were -billions of objects stored in the DHT. Git end-users expect to be able -to use 4 or 6 digit abbreviations on very small repositories, as the -number of objects is low and thus the number of bits required to -uniquely name an object within that object set is small.</p> - -<h3>Columns</h3> -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="OBJECT_INDEX.info"><span class='lit'>info:</span></a> - <span class='var'>chunk_key[short]</span> - </div> - - <p>Cell value is the protocol message - <a href="#message_ObjectInfo">ObjectInfo</a> describing how the object - named by the row key is stored in the chunk named by the column name.</p> - - <p>Cell timestamp matters. The <b>oldest cell</b> within the - entire column family is favored during reads. As chunk_key is - unique, versions within a single column aren't relevant.</p> -</div> - -<h3>Size Estimate</h3> - -<p>Average row size per object/chunk pair is 144 bytes uncompressed -(87 compressed), based on the linux-2.6 repository. The linux-2.6 -repository has 1.8 million objects, and is growing at a rate of about -300,000 objects/year. Total usage for linux-2.6 is above 154M.</p> - -<p>Most rows contain only 1 cell, as the object appears in only 1 -chunk within that repository.</p> - -<p><i>Worst case:</i> 1.8 million rows/repository * 1,000 repositories -is around 1.8 billion rows and 182G.</p> - -<h3>Updates</h3> - -<p>One write per object received over the network; typically performed -as part of an asynchronous batch. Each batch is sized around 512 KiB -(about 3000 rows). Because of SHA-1's uniform distribution, row keys -are first sorted and then batched into buckets of about 3000 rows. To -prevent too much activity from going to one table segment at a time -the complete object list is segmented into up to 32 groups which are -written in round-robin order.</p> - -<p>A full push of the linux-2.6 repository writes 1.8 million -rows as there are 1.8 million objects in the pack stream.</p> - -<p>During normal insert or receive operations, each received object is -a blind write to add one new <code>info:chunk_key[short]</code> cell -to the row. During repack, all cells in the <code>info</code> column -family are replaced with a single cell.</p> - -<h3>Reads</h3> - -<p>During common ancestor negotiation reads occur in batches of 64-128 -full row keys, uniformly distributed throughout the key space. Most of -these reads are misses, the OBJECT_INDEX table does not contain the -key offered by the client. A successful negotation for most developers -requires at least two rounds of 64 objects back-to-back over HTTP. Due -to the high miss rate on this table, an in-memory bloom filter may be -important for performance.</p> - -<p>To support the high read-rate (and high miss-rate) during common -ancestor negotation, an alternative to an in-memory bloom filter -within the DHT is to downoad the entire set of keys into an alternate -service job for recently accessed repositories. This service can only -be used if <i>all</i> of the keys for the same repository_id are -hosted within the service. Given this is under 36 GiB for the worst -case 1.8 billion rows mentioned above, this may be feasible. Loading -the table can be performed by fetching <a -href="#REPOSITORY.chunk-info">REPOSITORY.chunk-info</a> and then -performing parallel gets for the <a -href="#CHUNK.index">CHUNK.index</a> column, and scanning the local -indexes to construct the list of known objects.</p> - -<p>During repacking with no delta reuse, worst case scenario requires -reading all records with the same repository_id (for linux-2.6 this -is 1.8 million rows). Reads are made in a configurable batch size, -right now this is set at 2048 keys/batch, with 4 concurrent batches in -flight at a time.</p> - -<p>Reads are tried first against memcache, then against the DHT if the -entry did not exist in memcache. Successful reads against the DHT are -put back into memcache in the background.</p> - -<a name="CHUNK"><h2>Table CHUNK</h2></a> - -<p>Stores the object data for a repository, containing commit history, -directory structure, and file revisions. Each chunk is typically 1 MiB -in size, excluding the index and meta columns.</p> - -<h3>Row Key</h3> -<div class='rowkey'> - <div> - <span class='header'>Row Key:</span> - <span class='var'>HH</span> - <span class='lit'>.</span> - <span class='var'>repository_id</span> - <span class='lit'>.</span> - <span class='var'>HHx40</span> - </div> - - <p>Variables:</p> - <ul> - <li><span class='var'>HH</span> = First 2 hex digits of chunk SHA-1</li> - <li><span class='var'>repository_id</span> = Repository owning the chunk (see above)</li> - <li><span class='var'>HHx40</span> = Complete chunk SHA-1, in hex</li> - </ul> - - <p>Examples:</p> - <ul> - <li><span class='example'>09.80000000.09e0eb57543be633b004b672cbebdf335aa4d53f</span> <i>(full key)</i></li> - </ul> -</div> - -<p>Chunk keys are computed by first computing the SHA-1 of the -<code>chunk:</code> column, which is the compressed object contents -stored within the chunk. As the chunk data includes a 32 bit salt in -the trailing 4 bytes, this value is random even for the exact same -object input.</p> - -<p>The leading 2 hex digit <code>HH</code> component distributes -chunks for the same repository (and over the same time period) evenly -around the DHT keyspace, preventing any portion from becoming too -hot.</p> - -<h3>Columns</h3> -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="CHUNK.chunk"><span class='lit'>chunk:</span></a> - </div> - - <p>Multiple objects in Git pack-file format, about 1 MiB in size. - The data is already very highly compressed by Git and is not further - compressable by the DHT.</p> -</div> - -<p>This column is essentially the standard Git pack-file format, -without the standard header or trailer. Objects can be stored in -either whole format (object content is simply deflated inline) -or in delta format (reference to a delta base is followed by -deflated sequence of copy and/or insert instructions to recreate -the object content). The OBJ_OFS_DELTA format is preferred -for deltas, since it tends to use a shorter encoding than the -OBJ_REF_DELTA format. Offsets beyond the start of the chunk are -actually offsets to other chunks, and must be resolved using the -<code>meta.base_chunk.relative_start</code> field.</p> - -<p>Because the row key is derived from the SHA-1 of this column, the -trailing 4 bytes is randomly generated at insertion time, to make it -impractical for remote clients to predict the name of the chunk row. -This allows the stream parser to bindly insert rows without first -checking for row existance, or worry about replacing an existing -row and causing data corruption.</p> - -<p>This column value is essentially opaque to the DHT.</p> - -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="CHUNK.index"><span class='lit'>index:</span></a> - </div> - - <p>Binary searchable table listing object SHA-1 and starting offset - of that object within the <code>chunk:</code> data stream. The data - in this index is essentially random (due to the SHA-1s stored in - binary) and thus is not compressable.</p> -</div> - -<p>Sorted list of SHA-1s of each object that is stored in this chunk, -along with the offset. This column allows efficient random access to -any object within the chunk, without needing to perform a remote read -against <a href="#OBJECT_INDEX">OBJECT_INDEX</a> table. The column is -very useful at read time, where pointers within Git objects will -frequently reference other objects stored in the same chunk.</p> - -<p>This column is sometimes called the local index, since it is local -only to the chunk and thus differs from the global index stored in the -<a href="#OBJECT_INDEX">OBJECT_INDEX</a> table.</p> - -<p>The column size is 24 bytes per object stored in the chunk. Commit -chunks store on average 2200 commits/chunk, so a commit chunk index is -about 52,800 bytes.</p> - -<p>This column value is essentially opaque to the DHT.</p> - -<div class='colfamily'> - <div> - <span class='header'>Column:</span> - <a name="CHUNK.meta"><span class='lit'>meta:</span></a> - </div> - - <p>Cell value is the protocol message - <a href="#message_ChunkMeta">ChunkMeta</a> describing prefetch - hints, object fragmentation, and delta base locations. Unlike - <code>chunk:</code> and <code>index:</code>, this column is - somewhat compressable.</p> -</div> - -<p>The meta column provides information critical for reading the -chunk's data. (Unlike <a href="#message_ChunkInfo">ChunkInfo</a> in -the <a href="#REPOSITORY">REPOSITORY</a> table, which is used only for -accounting.)</p> - -<p>The most important element is the BaseChunk nested message, -describing a chunk that contains a base object required to inflate -an object that is stored in this chunk as a delta.</p> - -<h3>Chunk Contents</h3> - -<p>Chunks try to store only a single object type, however mixed object -type chunks are supported. The rule to store only one object type per -chunk improves data locality, reducing the number of chunks that need -to be accessed from the DHT in order to perform a particular Git -operation. Clustering commits together into a 'commit chunk' improves -data locality during log/history walking operations, while clustering -trees together into a 'tree chunk' improves data locality during the -early stages of packing or difference generation.</p> - -<p>Chunks reuse the standard Git pack data format to support direct -streaming of a chunk's <code>chunk:</code> column to clients, without -needing to perform any data manipulation on the server. This enables -high speed data transfer from the DHT to the client.</p> - -<h3>Large Object Fragmentation</h3> - -<p>If a chunk contains more than one object, all objects within the -chunk must store their complete compressed form within the chunk. This -limits an object to less than 1 MiB of compressed data.</p> - -<p>Larger objects whose compressed size is bigger than 1 MiB are -fragmented into multiple chunks. The first chunk contains the object's -pack header, and the first 1 MiB of compressed data. Subsequent data -is stored in additional chunks. The additional chunk keys are stored -in the <code>meta.fragment</code> field. Each chunk that is part of -the same large object redundantly stores the exact same meta -value.</p> - -<h3>Size Estimate</h3> - -<p>Approximately the same size if the repository was stored on the -local filesystem. For the linux-2.6 repository (373M / 1.8 million -objects), about 417M (373.75M in <code>chunk:</code>, 42.64M in -<code>index:</code>, 656K in <code>meta:</code>).</p> - -<p>Row count is close to size / 1M (373M / 1M = 373 rows), but may be -slightly higher (e.g. 402) due to fractional chunks on the end of -large fragmented objects, or where the single object type rule caused a -chunk to close before it was full.</p> - -<p>For the complete Android repository set, disk usage is ~13G.</p> - -<h3>Updates</h3> - -<p>This table is (mostly) append-only. Write operations blast in ~1 -MiB chunks, as the key format assures writers the new row does not -already exist. Chunks are randomly scattered by the hashing function, -and are not buffered very deep by writers.</p> - -<p><i>Interactive writes:</i> Small operations impacting only 1-5 -chunks will write all columns in a single operation. Most chunks of -this varity will be very small, 1-10 objects per chunk and about 1-10 -KiB worth of compressed data inside of the <code>chunk:</code> column. -This class of write represents a single change made by one developer -that must be shared back out immediately.</p> - -<p><i>Large pushes:</i> Large operations impacting tens to hundreds of -chunks will first write the <code>chunk:</code> column, then come back -later and populate the <code>index:</code> and <code>meta:</code> -columns once all chunks have been written. The delayed writing of -index and meta during large operations is required because the -information for these columns is not available until the entire data -stream from the Git client has been received and scanned. As the Git -server may not have sufficient memory to store all chunk data (373M or -1G!), its written out first to free up memory.</p> - -<p><i>Garbage collection:</i> Chunks that are not optimally sized -(less than the target ~1 MiB), optimally localized (too many graph -pointers outside of the chunk), or compressed (Git found a smaller way -to store the same content) will be replaced by first writing new -chunks, and then deleting the old chunks.</p> - -<p>Worst case, this could churn as many as 402 rows and 373M worth of -data for the linux-2.6 repository. Special consideration will be made -to try and avoid replacing chunks whose <code>WWWW</code> key -component is 'sufficiently old' and whose content is already -sufficiently sized and compressed. This will help to limit churn to -only more recently dated chunks, which are smaller in size.</p> - -<h3>Reads</h3> - -<p>All columns are read together as a unit. Memcache is checked first, -with reads falling back to the DHT if the cache does not have the -chunk.</p> - -<p>Reasonably accurate prefetching is supported through background -threads and prefetching metadata embedded in the <a -href="#message_CachedPackInfo">CachedPackInfo</a> and <a -href="#message_ChunkMeta">ChunkMeta</a> protocol messages used by -readers.</p> - -<hr /> -<h2>Protocol Messages</h2> - -<pre> -package git_store; -option java_package = "org.eclipse.jgit.storage.dht.proto"; - - - // Entry in RefTable describing the target of the reference. - // Either symref *OR* target must be populated, but never both. - // -<a name="message_RefData">message RefData</a> { - // An ObjectId with an optional hint about where it can be found. - // - message Id { - required string object_name = 1; - optional string chunk_key = 2; - } - - // Name of another reference this reference inherits its target - // from. The target is inherited on-the-fly at runtime by reading - // the other reference. Typically only "HEAD" uses symref. - // - optional string symref = 1; - - // ObjectId this reference currently points at. - // - optional Id target = 2; - - // True if the correct value for peeled is stored. - // - optional bool is_peeled = 3; - - // If is_peeled is true, this field is accurate. This field - // exists only if target points to annotated tag object, then - // this field stores the "object" field for that tag. - // - optional Id peeled = 4; -} - - - // Entry in ObjectIndexTable, describes how an object appears in a chunk. - // -<a name="message_ObjectInfo">message ObjectInfo</a> { - // Type of Git object. - // - enum ObjectType { - COMMIT = 1; - TREE = 2; - BLOB = 3; - TAG = 4; - } - optional ObjectType object_type = 1; - - // Position of the object's header within its chunk. - // - required int32 offset = 2; - - // Total number of compressed data bytes, not including the pack - // header. For fragmented objects this is the sum of all chunks. - // - required int64 packed_size = 3; - - // Total number of bytes of the uncompressed object. For a - // delta this is the size after applying the delta onto its base. - // - required int64 inflated_size = 4; - - // ObjectId of the delta base, if this object is stored as a delta. - // The base is stored in raw binary. - // - optional bytes delta_base = 5; -} - - - // Describes at a high-level the information about a chunk. - // A repository can use this summary to determine how much - // data is stored, or when garbage collection should occur. - // -<a name="message_ChunkInfo">message ChunkInfo</a> { - // Source of the chunk (what code path created it). - // - enum Source { - RECEIVE = 1; // Came in over the network from external source. - INSERT = 2; // Created in this repository (e.g. a merge). - REPACK = 3; // Generated during a repack of this repository. - } - optional Source source = 1; - - // Type of Git object stored in this chunk. - // - enum ObjectType { - MIXED = 0; - COMMIT = 1; - TREE = 2; - BLOB = 3; - TAG = 4; - } - optional ObjectType object_type = 2; - - // True if this chunk is a member of a fragmented object. - // - optional bool is_fragment = 3; - - // If present, key of the CachedPackInfo object - // that this chunk is a member of. - // - optional string cached_pack_key = 4; - - // Summary description of the objects stored here. - // - message ObjectCounts { - // Number of objects stored in this chunk. - // - optional int32 total = 1; - - // Number of objects stored in whole (non-delta) form. - // - optional int32 whole = 2; - - // Number of objects stored in OFS_DELTA format. - // The delta base appears in the same chunk, or - // may appear in an earlier chunk through the - // ChunkMeta.base_chunk link. - // - optional int32 ofs_delta = 3; - - // Number of objects stored in REF_DELTA format. - // The delta base is at an unknown location. - // - optional int32 ref_delta = 4; - } - optional ObjectCounts object_counts = 5; - - // Size in bytes of the chunk's compressed data column. - // - optional int32 chunk_size = 6; - - // Size in bytes of the chunk's index. - // - optional int32 index_size = 7; - - // Size in bytes of the meta information. - // - optional int32 meta_size = 8; -} - - - // Describes meta information about a chunk, stored inline with it. - // -<a name="message_ChunkMeta">message ChunkMeta</a> { - // Enumerates the other chunks this chunk depends upon by OFS_DELTA. - // Entries are sorted by relative_start ascending, enabling search. Thus - // the earliest chunk is at the end of the list. - // - message BaseChunk { - // Bytes between start of the base chunk and start of this chunk. - // Although the value is positive, its a negative offset. - // - required int64 relative_start = 1; - required string chunk_key = 2; - } - repeated BaseChunk base_chunk = 1; - - // If this chunk is part of a fragment, key of every chunk that - // makes up the fragment, including this chunk. - // - repeated string fragment = 2; - - // Chunks that should be prefetched if reading the current chunk. - // - message PrefetchHint { - repeated string edge = 1; - repeated string sequential = 2; - } - optional PrefetchHint commit_prefetch = 51; - optional PrefetchHint tree_prefetch = 52; -} - - - // Describes a CachedPack, for efficient bulk clones. - // -<a name="message_CachedPackInfo">message CachedPackInfo</a> { - // Unique name of the cached pack. This is the SHA-1 hash of - // all of the objects that make up the cached pack, sorted and - // in binary form. (Same rules as Git on the filesystem.) - // - required string name = 1; - - // SHA-1 of all chunk keys, which are themselves SHA-1s of the - // raw chunk data. If any bit differs in compression (due to - // repacking) the version will differ. - // - required string version = 2; - - // Total number of objects in the cached pack. This must be known - // in order to set the final resulting pack header correctly before it - // is sent to clients. - // - required int64 objects_total = 3; - - // Number of objects stored as deltas, rather than deflated whole. - // - optional int64 objects_delta = 4; - - // Total size of the chunks, in bytes, not including the chunk footer. - // - optional int64 bytes_total = 5; - - // Objects this pack starts from. - // - message TipObjectList { - repeated string object_name = 1; - } - required TipObjectList tip_list = 6; - - // Chunks, in order of occurrence in the stream. - // - message ChunkList { - repeated string chunk_key = 1; - } - required ChunkList chunk_list = 7; -} -</pre> - -</body> -</html> diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java deleted file mode 100644 index a59e47bb86..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/AsyncCallback.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -/** - * Receives notification when an asynchronous operation has finished. - * <p> - * Many storage provider interface operations use this type to signal completion - * or failure status of an operation that runs asynchronously to the caller. - * <p> - * Only one of {@link #onSuccess(Object)} or {@link #onFailure(DhtException)} - * should be invoked. - * - * @param <T> - * type of object returned from the operation on success. - */ -public interface AsyncCallback<T> { - /** - * Notification the operation completed. - * - * @param result - * the result value from the operation. - */ - public void onSuccess(T result); - - /** - * Notification the operation failed. - * - * @param error - * a description of the error. - */ - public void onFailure(DhtException error); -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java deleted file mode 100644 index 218bffc123..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/BatchObjectLookup.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantLock; - -import org.eclipse.jgit.lib.NullProgressMonitor; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ProgressMonitor; -import org.eclipse.jgit.lib.ThreadSafeProgressMonitor; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; - -abstract class BatchObjectLookup<T extends ObjectId> { - private final RepositoryKey repo; - - private final Database db; - - private final DhtReader reader; - - private final ThreadSafeProgressMonitor progress; - - private final Semaphore batches; - - private final ReentrantLock resultLock; - - private final AtomicReference<DhtException> error; - - private final int concurrentBatches; - - private final List<T> retry; - - private final ArrayList<ObjectInfo> tmp; - - private boolean retryMissingObjects; - - private boolean cacheLoadedInfo; - - BatchObjectLookup(DhtReader reader) { - this(reader, null); - } - - BatchObjectLookup(DhtReader reader, ProgressMonitor monitor) { - this.repo = reader.getRepositoryKey(); - this.db = reader.getDatabase(); - this.reader = reader; - - if (monitor != null && monitor != NullProgressMonitor.INSTANCE) - this.progress = new ThreadSafeProgressMonitor(monitor); - else - this.progress = null; - - this.concurrentBatches = reader.getOptions() - .getObjectIndexConcurrentBatches(); - - this.batches = new Semaphore(concurrentBatches); - this.resultLock = new ReentrantLock(); - this.error = new AtomicReference<DhtException>(); - this.retry = new ArrayList<T>(); - this.tmp = new ArrayList<ObjectInfo>(4); - } - - void setRetryMissingObjects(boolean on) { - retryMissingObjects = on; - } - - void setCacheLoadedInfo(boolean on) { - cacheLoadedInfo = on; - } - - void select(Iterable<T> objects) throws IOException { - selectInBatches(Context.FAST_MISSING_OK, lookInCache(objects)); - - // Not all of the selection ran with fast options. - if (retryMissingObjects && !retry.isEmpty()) { - batches.release(concurrentBatches); - selectInBatches(Context.READ_REPAIR, retry); - } - - if (progress != null) - progress.pollForUpdates(); - } - - private Iterable<T> lookInCache(Iterable<T> objects) { - RecentInfoCache infoCache = reader.getRecentInfoCache(); - List<T> missing = null; - for (T obj : objects) { - List<ObjectInfo> info = infoCache.get(obj); - if (info != null) { - onResult(obj, info); - if (progress != null) - progress.update(1); - } else { - if (missing == null) { - if (objects instanceof List<?>) - missing = new ArrayList<T>(((List<?>) objects).size()); - else - missing = new ArrayList<T>(); - } - missing.add(obj); - } - } - if (missing != null) - return missing; - return Collections.emptyList(); - } - - private void selectInBatches(Context options, Iterable<T> objects) - throws DhtException { - final int batchSize = reader.getOptions() - .getObjectIndexBatchSize(); - - Map<ObjectIndexKey, T> batch = new HashMap<ObjectIndexKey, T>(); - Iterator<T> otpItr = objects.iterator(); - while (otpItr.hasNext()) { - T otp = otpItr.next(); - - batch.put(ObjectIndexKey.create(repo, otp), otp); - - if (batch.size() < batchSize && otpItr.hasNext()) - continue; - - if (error.get() != null) - break; - - try { - if (progress != null) { - while (!batches.tryAcquire(500, MILLISECONDS)) - progress.pollForUpdates(); - progress.pollForUpdates(); - } else { - batches.acquire(); - } - } catch (InterruptedException err) { - error.compareAndSet(null, new DhtTimeoutException(err)); - break; - } - - startQuery(options, batch); - batch = new HashMap<ObjectIndexKey, T>(); - } - - try { - if (progress != null) { - while (!batches.tryAcquire(concurrentBatches, 500, MILLISECONDS)) - progress.pollForUpdates(); - progress.pollForUpdates(); - } else { - batches.acquire(concurrentBatches); - } - } catch (InterruptedException err) { - error.compareAndSet(null, new DhtTimeoutException(err)); - } - - if (error.get() != null) - throw error.get(); - - // Make sure retry changes are visible to us. - resultLock.lock(); - resultLock.unlock(); - } - - private void startQuery(final Context context, - final Map<ObjectIndexKey, T> batch) { - final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> cb; - - cb = new AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>() { - public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> r) { - resultLock.lock(); - try { - processResults(context, batch, r); - } finally { - resultLock.unlock(); - batches.release(); - } - } - - public void onFailure(DhtException e) { - error.compareAndSet(null, e); - batches.release(); - } - }; - db.objectIndex().get(context, batch.keySet(), cb); - } - - private void processResults(Context context, Map<ObjectIndexKey, T> batch, - Map<ObjectIndexKey, Collection<ObjectInfo>> objects) { - for (T obj : batch.values()) { - Collection<ObjectInfo> matches = objects.get(obj); - - if (matches == null || matches.isEmpty()) { - if (retryMissingObjects && context == Context.FAST_MISSING_OK) - retry.add(obj); - continue; - } - - tmp.clear(); - tmp.addAll(matches); - ObjectInfo.sort(tmp); - if (cacheLoadedInfo) - reader.getRecentInfoCache().put(obj, tmp); - - onResult(obj, tmp); - } - - if (progress != null) - progress.update(objects.size()); - } - - protected abstract void onResult(T obj, List<ObjectInfo> info); -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java deleted file mode 100644 index 274cc68d87..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/CachedPackKey.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.util.RawParseUtils.decode; - -import java.text.MessageFormat; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.lib.ObjectId; - -/** Unique identifier of a {@link CachedPackInfo} in the DHT. */ -public final class CachedPackKey implements RowKey { - static final int KEYLEN = 81; - - /** - * @param key - * @return the key - */ - public static CachedPackKey fromBytes(byte[] key) { - return fromBytes(key, 0, key.length); - } - - /** - * @param key - * @param ptr - * @param len - * @return the key - */ - public static CachedPackKey fromBytes(byte[] key, int ptr, int len) { - if (len != KEYLEN) - throw new IllegalArgumentException(MessageFormat.format( - DhtText.get().invalidChunkKey, decode(key, ptr, ptr + len))); - - ObjectId name = ObjectId.fromString(key, ptr); - ObjectId vers = ObjectId.fromString(key, ptr + 41); - return new CachedPackKey(name, vers); - } - - /** - * @param key - * @return the key - */ - public static CachedPackKey fromString(String key) { - int d = key.indexOf('.'); - ObjectId name = ObjectId.fromString(key.substring(0, d)); - ObjectId vers = ObjectId.fromString(key.substring(d + 1)); - return new CachedPackKey(name, vers); - } - - /** - * @param info - * @return the key - */ - public static CachedPackKey fromInfo(CachedPackInfo info) { - ObjectId name = ObjectId.fromString(info.getName()); - ObjectId vers = ObjectId.fromString(info.getVersion()); - return new CachedPackKey(name, vers); - } - - private final ObjectId name; - - private final ObjectId version; - - CachedPackKey(ObjectId name, ObjectId version) { - this.name = name; - this.version = version; - } - - /** @return unique SHA-1 name of the pack. */ - public ObjectId getName() { - return name; - } - - /** @return unique version of the pack. */ - public ObjectId getVersion() { - return version; - } - - public byte[] asBytes() { - byte[] r = new byte[KEYLEN]; - name.copyTo(r, 0); - r[40] = '.'; - version.copyTo(r, 41); - return r; - } - - public String asString() { - return name.name() + "." + version.name(); - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof CachedPackKey) { - CachedPackKey key = (CachedPackKey) other; - return name.equals(key.name) && version.equals(key.version); - } - return false; - } - - @Override - public String toString() { - return "cached-pack:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java deleted file mode 100644 index 011cfb06ec..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkFormatter.java +++ /dev/null @@ -1,497 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.zip.Deflater; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta.BaseChunk; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ObjectInfo.ObjectType; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.transport.PackedObjectInfo; -import org.eclipse.jgit.util.NB; - -/** - * Formats one {@link PackChunk} for storage in the DHT. - * <p> - * Each formatter instance can be used only once. - */ -class ChunkFormatter { - static final int TRAILER_SIZE = 4; - - private final RepositoryKey repo; - - private final DhtInserterOptions options; - - private final byte[] varIntBuf; - - private final int maxObjects; - - private Map<ChunkKey, BaseChunkInfo> baseChunks; - - private List<StoredObject> objectList; - - private byte[] chunkData; - - private int ptr; - - private int mark; - - private int currentObjectType; - - private BaseChunkInfo currentObjectBase; - - private PackChunk.Members builder; - - private GitStore.ChunkInfo.Source source; - - private boolean fragment; - - private int objectType; - - private int objectsTotal, objectsWhole, objectsRefDelta, objectsOfsDelta; - - private ChunkInfo chunkInfo; - - ChunkFormatter(RepositoryKey repo, DhtInserterOptions options) { - this.repo = repo; - this.options = options; - this.varIntBuf = new byte[32]; - this.chunkData = new byte[options.getChunkSize()]; - this.maxObjects = options.getMaxObjectCount(); - this.objectType = -1; - } - - void setSource(GitStore.ChunkInfo.Source src) { - source = src; - } - - void setObjectType(int type) { - objectType = type; - } - - void setFragment() { - fragment = true; - } - - ChunkKey getChunkKey() { - return getChunkInfo().getChunkKey(); - } - - ChunkInfo getChunkInfo() { - return chunkInfo; - } - - ChunkMeta getChunkMeta() { - return builder.getMeta(); - } - - PackChunk getPackChunk() throws DhtException { - return builder.build(); - } - - void setChunkIndex(List<PackedObjectInfo> objs) { - builder.setChunkIndex(ChunkIndex.create(objs)); - } - - ChunkKey end(MessageDigest md) { - if (md == null) - md = Constants.newMessageDigest(); - - // Embed a small amount of randomness into the chunk content, - // and thus impact its name. This prevents malicious clients from - // being able to predict what a chunk is called, which keeps them - // from replacing an existing chunk. - // - chunkData = cloneArray(chunkData, ptr + TRAILER_SIZE); - NB.encodeInt32(chunkData, ptr, options.nextChunkSalt()); - ptr += 4; - - md.update(chunkData, 0, ptr); - ChunkKey key = ChunkKey.create(repo, ObjectId.fromRaw(md.digest())); - - GitStore.ChunkInfo.Builder info = GitStore.ChunkInfo.newBuilder(); - info.setSource(source); - info.setObjectType(GitStore.ChunkInfo.ObjectType.valueOf(objectType)); - if (fragment) - info.setIsFragment(true); - info.setChunkSize(chunkData.length); - - GitStore.ChunkInfo.ObjectCounts.Builder cnts = info.getObjectCountsBuilder(); - cnts.setTotal(objectsTotal); - if (objectsWhole > 0) - cnts.setWhole(objectsWhole); - if (objectsRefDelta > 0) - cnts.setRefDelta(objectsRefDelta); - if (objectsOfsDelta > 0) - cnts.setOfsDelta(objectsOfsDelta); - - builder = new PackChunk.Members(); - builder.setChunkKey(key); - builder.setChunkData(chunkData); - - if (baseChunks != null) { - List<BaseChunk> list = new ArrayList<BaseChunk>(baseChunks.size()); - for (BaseChunkInfo b : baseChunks.values()) { - if (0 < b.useCount) { - BaseChunk.Builder c = BaseChunk.newBuilder(); - c.setRelativeStart(b.relativeStart); - c.setChunkKey(b.key.asString()); - list.add(c.build()); - } - } - Collections.sort(list, new Comparator<BaseChunk>() { - public int compare(BaseChunk a, BaseChunk b) { - return Long.signum(a.getRelativeStart() - - b.getRelativeStart()); - } - }); - ChunkMeta.Builder b = ChunkMeta.newBuilder(); - b.addAllBaseChunk(list); - ChunkMeta meta = b.build(); - builder.setMeta(meta); - info.setMetaSize(meta.getSerializedSize()); - } - - if (objectList != null && !objectList.isEmpty()) { - byte[] index = ChunkIndex.create(objectList); - builder.setChunkIndex(index); - info.setIndexSize(index.length); - } - - chunkInfo = new ChunkInfo(key, info.build()); - return getChunkKey(); - } - - /** - * Safely put the chunk to the database. - * <p> - * This method is slow. It first puts the chunk info, waits for success, - * then puts the chunk itself, waits for success, and finally queues up the - * object index with its chunk links in the supplied buffer. - * - * @param db - * @param dbWriteBuffer - * @throws DhtException - */ - void safePut(Database db, WriteBuffer dbWriteBuffer) throws DhtException { - WriteBuffer chunkBuf = db.newWriteBuffer(); - - db.repository().put(repo, getChunkInfo(), chunkBuf); - chunkBuf.flush(); - - db.chunk().put(builder, chunkBuf); - chunkBuf.flush(); - - linkObjects(db, dbWriteBuffer); - } - - void unsafePut(Database db, WriteBuffer dbWriteBuffer) throws DhtException { - db.repository().put(repo, getChunkInfo(), dbWriteBuffer); - db.chunk().put(builder, dbWriteBuffer); - linkObjects(db, dbWriteBuffer); - } - - private void linkObjects(Database db, WriteBuffer dbWriteBuffer) - throws DhtException { - if (objectList != null && !objectList.isEmpty()) { - for (StoredObject obj : objectList) { - db.objectIndex().add(ObjectIndexKey.create(repo, obj), - obj.link(getChunkKey()), dbWriteBuffer); - } - } - } - - boolean whole(Deflater def, int type, byte[] data, int off, final int size, - ObjectId objId) { - if (free() < 10 || maxObjects <= objectsTotal) - return false; - - header(type, size); - objectsWhole++; - currentObjectType = type; - - int endOfHeader = ptr; - def.setInput(data, off, size); - def.finish(); - do { - int left = free(); - if (left == 0) { - rollback(); - return false; - } - - int n = def.deflate(chunkData, ptr, left); - if (n == 0) { - rollback(); - return false; - } - - ptr += n; - } while (!def.finished()); - - if (objectList == null) - objectList = new ArrayList<StoredObject>(); - - final int packedSize = ptr - endOfHeader; - objectList.add(new StoredObject(objId, type, mark, packedSize, size)); - - if (objectType < 0) - objectType = type; - else if (objectType != type) - objectType = ChunkInfo.OBJ_MIXED; - - return true; - } - - boolean whole(int type, long inflatedSize) { - if (free() < 10 || maxObjects <= objectsTotal) - return false; - - header(type, inflatedSize); - objectsWhole++; - currentObjectType = type; - return true; - } - - boolean ofsDelta(long inflatedSize, long negativeOffset) { - final int ofsPtr = encodeVarInt(negativeOffset); - final int ofsLen = varIntBuf.length - ofsPtr; - if (free() < 10 + ofsLen || maxObjects <= objectsTotal) - return false; - - header(Constants.OBJ_OFS_DELTA, inflatedSize); - objectsOfsDelta++; - currentObjectType = Constants.OBJ_OFS_DELTA; - currentObjectBase = null; - - if (append(varIntBuf, ofsPtr, ofsLen)) - return true; - - rollback(); - return false; - } - - boolean refDelta(long inflatedSize, AnyObjectId baseId) { - if (free() < 30 || maxObjects <= objectsTotal) - return false; - - header(Constants.OBJ_REF_DELTA, inflatedSize); - objectsRefDelta++; - currentObjectType = Constants.OBJ_REF_DELTA; - - baseId.copyRawTo(chunkData, ptr); - ptr += 20; - return true; - } - - void useBaseChunk(long relativeStart, ChunkKey baseChunkKey) { - if (baseChunks == null) - baseChunks = new HashMap<ChunkKey, BaseChunkInfo>(); - - BaseChunkInfo base = baseChunks.get(baseChunkKey); - if (base == null) { - base = new BaseChunkInfo(relativeStart, baseChunkKey); - baseChunks.put(baseChunkKey, base); - } - base.useCount++; - currentObjectBase = base; - } - - void appendDeflateOutput(Deflater def) { - while (!def.finished()) { - int left = free(); - if (left == 0) - return; - int n = def.deflate(chunkData, ptr, left); - if (n == 0) - return; - ptr += n; - } - } - - boolean append(byte[] data, int off, int len) { - if (free() < len) - return false; - - System.arraycopy(data, off, chunkData, ptr, len); - ptr += len; - return true; - } - - boolean isEmpty() { - return ptr == 0; - } - - int getObjectCount() { - return objectsTotal; - } - - int position() { - return ptr; - } - - int size() { - return ptr; - } - - int free() { - return (chunkData.length - TRAILER_SIZE) - ptr; - } - - byte[] getRawChunkDataArray() { - return chunkData; - } - - int getCurrentObjectType() { - return currentObjectType; - } - - void rollback() { - ptr = mark; - adjustObjectCount(-1, currentObjectType); - } - - void adjustObjectCount(int delta, int type) { - objectsTotal += delta; - - switch (type) { - case Constants.OBJ_COMMIT: - case Constants.OBJ_TREE: - case Constants.OBJ_BLOB: - case Constants.OBJ_TAG: - objectsWhole += delta; - break; - - case Constants.OBJ_OFS_DELTA: - objectsOfsDelta += delta; - if (currentObjectBase != null && --currentObjectBase.useCount == 0) - baseChunks.remove(currentObjectBase.key); - currentObjectBase = null; - break; - - case Constants.OBJ_REF_DELTA: - objectsRefDelta += delta; - break; - } - } - - private void header(int type, long inflatedSize) { - mark = ptr; - objectsTotal++; - - long nextLength = inflatedSize >>> 4; - chunkData[ptr++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (inflatedSize & 0x0F)); - inflatedSize = nextLength; - while (inflatedSize > 0) { - nextLength >>>= 7; - chunkData[ptr++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (inflatedSize & 0x7F)); - inflatedSize = nextLength; - } - } - - private int encodeVarInt(long value) { - int n = varIntBuf.length - 1; - varIntBuf[n] = (byte) (value & 0x7F); - while ((value >>= 7) > 0) - varIntBuf[--n] = (byte) (0x80 | (--value & 0x7F)); - return n; - } - - private static byte[] cloneArray(byte[] src, int len) { - byte[] dst = new byte[len]; - System.arraycopy(src, 0, dst, 0, len); - return dst; - } - - private static class BaseChunkInfo { - final long relativeStart; - - final ChunkKey key; - - int useCount; - - BaseChunkInfo(long relativeStart, ChunkKey key) { - this.relativeStart = relativeStart; - this.key = key; - } - } - - private static class StoredObject extends PackedObjectInfo { - private final int type; - - private final int packed; - - private final int inflated; - - StoredObject(AnyObjectId id, int type, int offset, int packed, int size) { - super(id); - setOffset(offset); - this.type = type; - this.packed = packed; - this.inflated = size; - } - - ObjectInfo link(ChunkKey key) { - GitStore.ObjectInfo.Builder b = GitStore.ObjectInfo.newBuilder(); - b.setObjectType(ObjectType.valueOf(type)); - b.setOffset((int) getOffset()); - b.setPackedSize(packed); - b.setInflatedSize(inflated); - return new ObjectInfo(key, b.build()); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java deleted file mode 100644 index 89029c0cc7..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkIndex.java +++ /dev/null @@ -1,428 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.text.MessageFormat; -import java.util.Collections; -import java.util.List; - -import org.eclipse.jgit.lib.AnyObjectId; -import static org.eclipse.jgit.lib.Constants.*; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.transport.PackedObjectInfo; -import org.eclipse.jgit.util.NB; - -/** Index into a {@link PackChunk}. */ -public abstract class ChunkIndex { - private static final int V1 = 0x01; - - static ChunkIndex fromBytes(ChunkKey key, byte[] index, int ptr, int len) - throws DhtException { - int v = index[ptr] & 0xff; - switch (v) { - case V1: { - final int offsetFormat = index[ptr + 1] & 7; - switch (offsetFormat) { - case 1: - return new Offset1(index, ptr, len, key); - case 2: - return new Offset2(index, ptr, len, key); - case 3: - return new Offset3(index, ptr, len, key); - case 4: - return new Offset4(index, ptr, len, key); - default: - throw new DhtException(MessageFormat.format( - DhtText.get().unsupportedChunkIndex, - Integer.toHexString(NB.decodeUInt16(index, ptr)), key)); - } - } - default: - throw new DhtException(MessageFormat.format( - DhtText.get().unsupportedChunkIndex, - Integer.toHexString(v), key)); - } - } - - /** - * Format the chunk index and return its binary representation. - * - * @param list - * the list of objects that appear in the chunk. This list will - * be sorted in-place if it has more than 1 element. - * @return binary representation of the chunk's objects and their starting - * offsets. The format is private to this class. - */ - @SuppressWarnings("null") - static byte[] create(List<? extends PackedObjectInfo> list) { - int cnt = list.size(); - sortObjectList(list); - - int fanoutFormat = 0; - int[] buckets = null; - if (64 < cnt) { - buckets = new int[256]; - for (PackedObjectInfo oe : list) - buckets[oe.getFirstByte()]++; - fanoutFormat = selectFanoutFormat(buckets); - } - - int offsetFormat = selectOffsetFormat(list); - byte[] index = new byte[2 // header - + 256 * fanoutFormat // (optional) fanout - + cnt * OBJECT_ID_LENGTH // ids - + cnt * offsetFormat // offsets - ]; - index[0] = V1; - index[1] = (byte) ((fanoutFormat << 3) | offsetFormat); - - int ptr = 2; - - switch (fanoutFormat) { - case 0: - break; - case 1: - for (int i = 0; i < 256; i++, ptr++) - index[ptr] = (byte) buckets[i]; - break; - case 2: - for (int i = 0; i < 256; i++, ptr += 2) - NB.encodeInt16(index, ptr, buckets[i]); - break; - case 3: - for (int i = 0; i < 256; i++, ptr += 3) - encodeUInt24(index, ptr, buckets[i]); - break; - case 4: - for (int i = 0; i < 256; i++, ptr += 4) - NB.encodeInt32(index, ptr, buckets[i]); - break; - } - - for (PackedObjectInfo oe : list) { - oe.copyRawTo(index, ptr); - ptr += OBJECT_ID_LENGTH; - } - - switch (offsetFormat) { - case 1: - for (PackedObjectInfo oe : list) - index[ptr++] = (byte) oe.getOffset(); - break; - - case 2: - for (PackedObjectInfo oe : list) { - NB.encodeInt16(index, ptr, (int) oe.getOffset()); - ptr += 2; - } - break; - - case 3: - for (PackedObjectInfo oe : list) { - encodeUInt24(index, ptr, (int) oe.getOffset()); - ptr += 3; - } - break; - - case 4: - for (PackedObjectInfo oe : list) { - NB.encodeInt32(index, ptr, (int) oe.getOffset()); - ptr += 4; - } - break; - } - - return index; - } - - private static int selectFanoutFormat(int[] buckets) { - int fmt = 1; - int max = 1 << (8 * fmt); - - for (int cnt : buckets) { - while (max <= cnt && fmt < 4) { - if (++fmt == 4) - return fmt; - max = 1 << (8 * fmt); - } - } - return fmt; - } - - private static int selectOffsetFormat(List<? extends PackedObjectInfo> list) { - int fmt = 1; - int max = 1 << (8 * fmt); - - for (PackedObjectInfo oe : list) { - while (max <= oe.getOffset() && fmt < 4) { - if (++fmt == 4) - return fmt; - max = 1 << (8 * fmt); - } - } - return fmt; - } - - private static void sortObjectList(List<? extends PackedObjectInfo> list) { - Collections.sort(list); - } - - private final byte[] indexBuf; - - private final int indexPtr; - - private final int indexLen; - - private final int[] fanout; - - private final int idTable; - - private final int offsetTable; - - private final int count; - - ChunkIndex(byte[] indexBuf, int ptr, int len, ChunkKey key) - throws DhtException { - final int ctl = indexBuf[ptr + 1]; - final int fanoutFormat = (ctl >>> 3) & 7; - final int offsetFormat = ctl & 7; - - switch (fanoutFormat) { - case 0: - fanout = null; // no fanout, too small - break; - - case 1: { - int last = 0; - fanout = new int[256]; - for (int i = 0; i < 256; i++) { - last += indexBuf[ptr + 2 + i] & 0xff; - fanout[i] = last; - } - break; - } - case 2: { - int last = 0; - fanout = new int[256]; - for (int i = 0; i < 256; i++) { - last += NB.decodeUInt16(indexBuf, ptr + 2 + i * 2); - fanout[i] = last; - } - break; - } - case 3: { - int last = 0; - fanout = new int[256]; - for (int i = 0; i < 256; i++) { - last += decodeUInt24(indexBuf, ptr + 2 + i * 3); - fanout[i] = last; - } - break; - } - case 4: { - int last = 0; - fanout = new int[256]; - for (int i = 0; i < 256; i++) { - last += NB.decodeInt32(indexBuf, ptr + 2 + i * 4); - fanout[i] = last; - } - break; - } - default: - throw new DhtException(MessageFormat.format( - DhtText.get().unsupportedChunkIndex, - Integer.toHexString(NB.decodeUInt16(indexBuf, ptr)), key)); - } - - this.indexBuf = indexBuf; - this.indexPtr = ptr; - this.indexLen = len; - this.idTable = indexPtr + 2 + 256 * fanoutFormat; - - int recsz = OBJECT_ID_LENGTH + offsetFormat; - this.count = (indexLen - (idTable - indexPtr)) / recsz; - this.offsetTable = idTable + count * OBJECT_ID_LENGTH; - } - - /** - * Get the total number of objects described by this index. - * - * @return number of objects in this index and its associated chunk. - */ - public final int getObjectCount() { - return count; - } - - /** - * Get an ObjectId from this index. - * - * @param nth - * the object to return. Must be in range [0, getObjectCount). - * @return the object id. - */ - public final ObjectId getObjectId(int nth) { - return ObjectId.fromRaw(indexBuf, idPosition(nth)); - } - - /** - * Get the offset of an object in the chunk. - * - * @param nth - * offset to return. Must be in range [0, getObjectCount). - * @return the offset. - */ - public final int getOffset(int nth) { - return getOffset(indexBuf, offsetTable, nth); - } - - /** @return the size of this index, in bytes. */ - int getIndexSize() { - int sz = indexBuf.length; - if (fanout != null) - sz += 12 + 256 * 4; - return sz; - } - - /** - * Search for an object in the index. - * - * @param objId - * the object to locate. - * @return offset of the object in the corresponding chunk; -1 if not found. - */ - final int findOffset(AnyObjectId objId) { - int hi, lo; - - if (fanout != null) { - int fb = objId.getFirstByte(); - lo = fb == 0 ? 0 : fanout[fb - 1]; - hi = fanout[fb]; - } else { - lo = 0; - hi = count; - } - - while (lo < hi) { - final int mid = (lo + hi) >>> 1; - final int cmp = objId.compareTo(indexBuf, idPosition(mid)); - if (cmp < 0) - hi = mid; - else if (cmp == 0) - return getOffset(mid); - else - lo = mid + 1; - } - return -1; - } - - abstract int getOffset(byte[] indexArray, int offsetTableStart, int nth); - - private int idPosition(int nth) { - return idTable + (nth * OBJECT_ID_LENGTH); - } - - private static class Offset1 extends ChunkIndex { - Offset1(byte[] index, int ptr, int len, ChunkKey key) - throws DhtException { - super(index, ptr, len, key); - } - - int getOffset(byte[] index, int offsetTable, int nth) { - return index[offsetTable + nth] & 0xff; - } - } - - private static class Offset2 extends ChunkIndex { - Offset2(byte[] index, int ptr, int len, ChunkKey key) - throws DhtException { - super(index, ptr, len, key); - } - - int getOffset(byte[] index, int offsetTable, int nth) { - return NB.decodeUInt16(index, offsetTable + (nth * 2)); - } - } - - private static class Offset3 extends ChunkIndex { - Offset3(byte[] index, int ptr, int len, ChunkKey key) - throws DhtException { - super(index, ptr, len, key); - } - - int getOffset(byte[] index, int offsetTable, int nth) { - return decodeUInt24(index, offsetTable + (nth * 3)); - } - } - - private static class Offset4 extends ChunkIndex { - Offset4(byte[] index, int ptr, int len, ChunkKey key) - throws DhtException { - super(index, ptr, len, key); - } - - int getOffset(byte[] index, int offsetTable, int nth) { - return NB.decodeInt32(index, offsetTable + (nth * 4)); - } - } - - private static void encodeUInt24(byte[] intbuf, int offset, int v) { - intbuf[offset + 2] = (byte) v; - v >>>= 8; - - intbuf[offset + 1] = (byte) v; - v >>>= 8; - - intbuf[offset] = (byte) v; - } - - private static int decodeUInt24(byte[] intbuf, int offset) { - int r = (intbuf[offset] & 0xff) << 8; - - r |= intbuf[offset + 1] & 0xff; - r <<= 8; - - r |= intbuf[offset + 2] & 0xff; - return r; - } -}
\ No newline at end of file diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java deleted file mode 100644 index 2c156c8a68..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkInfo.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; - -/** - * Summary information about a chunk owned by a repository. - */ -public class ChunkInfo { - /** Mixed objects are stored in the chunk (instead of single type). */ - public static final int OBJ_MIXED = 0; - - private final ChunkKey chunkKey; - - private final GitStore.ChunkInfo data; - - /** - * Wrap a ChunkInfo message. - * - * @param key - * associated chunk key. - * @param data - * data. - */ - public ChunkInfo(ChunkKey key, GitStore.ChunkInfo data) { - this.chunkKey = key; - this.data = data; - } - - /** @return the repository that contains the chunk. */ - public RepositoryKey getRepositoryKey() { - return chunkKey.getRepositoryKey(); - } - - /** @return the chunk this information describes. */ - public ChunkKey getChunkKey() { - return chunkKey; - } - - /** @return the underlying message containing all data. */ - public GitStore.ChunkInfo getData() { - return data; - } - - @Override - public String toString() { - StringBuilder b = new StringBuilder(); - b.append("ChunkInfo:"); - b.append(chunkKey); - b.append("\n"); - b.append(data); - return b.toString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java deleted file mode 100644 index ced37b31df..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkKey.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.storage.dht.KeyUtils.format32; -import static org.eclipse.jgit.storage.dht.KeyUtils.parse32; -import static org.eclipse.jgit.util.RawParseUtils.decode; - -import java.io.Serializable; -import java.text.MessageFormat; - -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.ObjectId; - -/** Unique identifier of a {@link PackChunk} in the DHT. */ -public final class ChunkKey implements RowKey, Serializable { - private static final long serialVersionUID = 1L; - - static final int KEYLEN = 49; - - /** - * @param repo - * @param chunk - * @return the key - */ - public static ChunkKey create(RepositoryKey repo, ObjectId chunk) { - return new ChunkKey(repo.asInt(), chunk); - } - - /** - * @param key - * @return the key - */ - public static ChunkKey fromBytes(byte[] key) { - return fromBytes(key, 0, key.length); - } - - /** - * @param key - * @param ptr - * @param len - * @return the key - */ - public static ChunkKey fromBytes(byte[] key, int ptr, int len) { - if (len != KEYLEN) - throw new IllegalArgumentException(MessageFormat.format( - DhtText.get().invalidChunkKey, decode(key, ptr, ptr + len))); - - int repo = parse32(key, ptr); - ObjectId chunk = ObjectId.fromString(key, ptr + 9); - return new ChunkKey(repo, chunk); - } - - /** - * @param key - * @return the key - */ - public static ChunkKey fromString(String key) { - return fromBytes(Constants.encodeASCII(key)); - } - - private final int repo; - - private final ObjectId chunk; - - ChunkKey(int repo, ObjectId chunk) { - this.repo = repo; - this.chunk = chunk; - } - - /** @return the repository that contains the chunk. */ - public RepositoryKey getRepositoryKey() { - return RepositoryKey.fromInt(repo); - } - - int getRepositoryId() { - return repo; - } - - /** @return unique SHA-1 describing the chunk. */ - public ObjectId getChunkHash() { - return chunk; - } - - public byte[] asBytes() { - byte[] r = new byte[KEYLEN]; - format32(r, 0, repo); - r[8] = '.'; - chunk.copyTo(r, 9); - return r; - } - - public String asString() { - return decode(asBytes()); - } - - @Override - public int hashCode() { - return chunk.hashCode(); - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof ChunkKey) { - ChunkKey thisChunk = this; - ChunkKey otherChunk = (ChunkKey) other; - return thisChunk.repo == otherChunk.repo - && thisChunk.chunk.equals(otherChunk.chunk); - } - return false; - } - - @Override - public String toString() { - return "chunk:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMetaUtil.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMetaUtil.java deleted file mode 100644 index 7bc6439172..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ChunkMetaUtil.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.text.MessageFormat; -import java.util.List; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta.BaseChunk; - -class ChunkMetaUtil { - static BaseChunk getBaseChunk(ChunkKey chunkKey, ChunkMeta meta, - long position) throws DhtException { - // Chunks are sorted by ascending relative_start order. - // Thus for a pack sequence of: A B C, we have: - // - // -- C relative_start = 10,000 - // -- B relative_start = 20,000 - // -- A relative_start = 30,000 - // - // Indicating that chunk C starts 10,000 bytes before us, - // chunk B starts 20,000 bytes before us (and 10,000 before C), - // chunk A starts 30,000 bytes before us (and 10,000 before B), - // - // If position falls within: - // - // -- C (10k), then position is between 0..10,000 - // -- B (20k), then position is between 10,000 .. 20,000 - // -- A (30k), then position is between 20,000 .. 30,000 - - List<BaseChunk> baseChunks = meta.getBaseChunkList(); - int high = baseChunks.size(); - int low = 0; - while (low < high) { - final int mid = (low + high) >>> 1; - final BaseChunk base = baseChunks.get(mid); - - if (position > base.getRelativeStart()) { - low = mid + 1; - - } else if (mid == 0 || position == base.getRelativeStart()) { - return base; - - } else if (baseChunks.get(mid - 1).getRelativeStart() < position) { - return base; - - } else { - high = mid; - } - } - - throw new DhtException(MessageFormat.format( - DhtText.get().missingLongOffsetBase, chunkKey, - Long.valueOf(position))); - } - - static ChunkKey getNextFragment(ChunkMeta meta, ChunkKey chunkKey) { - int cnt = meta.getFragmentCount(); - for (int i = 0; i < cnt - 1; i++) { - ChunkKey key = ChunkKey.fromString(meta.getFragment(i)); - if (chunkKey.equals(key)) - return ChunkKey.fromString(meta.getFragment(i + 1)); - } - return null; - } - - private ChunkMetaUtil() { - // Static utilities only, do not create instances. - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java deleted file mode 100644 index 0bc1652f6f..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DeltaBaseCache.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.lang.ref.SoftReference; - -/** - * Caches recently used objects for {@link DhtReader}. - * <p> - * This cache is not thread-safe. Each reader should have its own cache. - */ -final class DeltaBaseCache { - private final DhtReader.Statistics stats; - - private int maxByteCount; - - private final Slot[] table; - - private Slot lruHead; - - private Slot lruTail; - - private int curByteCount; - - DeltaBaseCache(DhtReader reader) { - stats = reader.getStatistics(); - - DhtReaderOptions options = reader.getOptions(); - maxByteCount = options.getDeltaBaseCacheLimit(); - table = new Slot[options.getDeltaBaseCacheSize()]; - } - - Entry get(ChunkKey key, int position) { - Slot e = table[hash(key, position)]; - for (; e != null; e = e.tableNext) { - if (e.offset == position && key.equals(e.chunkKey)) { - Entry buf = e.data.get(); - if (buf != null) { - moveToHead(e); - stats.deltaBaseCache_Hits++; - return buf; - } - } - } - stats.deltaBaseCache_Miss++; - return null; - } - - void put(ChunkKey key, int offset, int objectType, byte[] data) { - if (data.length > maxByteCount) - return; // Too large to cache. - - curByteCount += data.length; - releaseMemory(); - - int tableIdx = hash(key, offset); - Slot e = new Slot(key, offset, data.length); - e.data = new SoftReference<Entry>(new Entry(data, objectType)); - e.tableNext = table[tableIdx]; - table[tableIdx] = e; - moveToHead(e); - } - - private void releaseMemory() { - while (curByteCount > maxByteCount && lruTail != null) { - Slot currOldest = lruTail; - Slot nextOldest = currOldest.lruPrev; - - curByteCount -= currOldest.size; - unlink(currOldest); - removeFromTable(currOldest); - - if (nextOldest == null) - lruHead = null; - else - nextOldest.lruNext = null; - lruTail = nextOldest; - } - } - - private void removeFromTable(Slot e) { - int tableIdx = hash(e.chunkKey, e.offset); - Slot p = table[tableIdx]; - - if (p == e) { - table[tableIdx] = e.tableNext; - return; - } - - for (; p != null; p = p.tableNext) { - if (p.tableNext == e) { - p.tableNext = e.tableNext; - return; - } - } - } - - private void moveToHead(final Slot e) { - unlink(e); - e.lruPrev = null; - e.lruNext = lruHead; - if (lruHead != null) - lruHead.lruPrev = e; - else - lruTail = e; - lruHead = e; - } - - private void unlink(final Slot e) { - Slot prev = e.lruPrev; - Slot next = e.lruNext; - - if (prev != null) - prev.lruNext = next; - if (next != null) - next.lruPrev = prev; - } - - private int hash(ChunkKey key, int position) { - return (((key.hashCode() & 0xfffff000) + position) >>> 1) % table.length; - } - - static class Entry { - final byte[] data; - - final int type; - - Entry(final byte[] aData, final int aType) { - data = aData; - type = aType; - } - } - - private static class Slot { - final ChunkKey chunkKey; - - final int offset; - - final int size; - - Slot tableNext; - - Slot lruPrev; - - Slot lruNext; - - SoftReference<Entry> data; - - Slot(ChunkKey key, int offset, int size) { - this.chunkKey = key; - this.offset = offset; - this.size = size; - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java deleted file mode 100644 index 0fd253bfbf..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtCachedPack.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.text.MessageFormat; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo.ChunkList; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.storage.pack.CachedPack; -import org.eclipse.jgit.storage.pack.ObjectToPack; -import org.eclipse.jgit.storage.pack.PackOutputStream; -import org.eclipse.jgit.storage.pack.StoredObjectRepresentation; - -/** A cached pack stored by the DHT. */ -public class DhtCachedPack extends CachedPack { - private final CachedPackInfo info; - - private Set<ObjectId> tips; - - private Set<ChunkKey> keySet; - - private ChunkKey[] keyList; - - DhtCachedPack(CachedPackInfo info) { - this.info = info; - } - - @Override - public Set<ObjectId> getTips() { - if (tips == null) { - tips = new HashSet<ObjectId>(); - for (String idString : info.getTipList().getObjectNameList()) - tips.add(ObjectId.fromString(idString)); - tips = Collections.unmodifiableSet(tips); - } - return tips; - } - - @Override - public long getObjectCount() { - return info.getObjectsTotal(); - } - - @Override - public long getDeltaCount() throws IOException { - return info.getObjectsDelta(); - } - - /** @return information describing this cached pack. */ - public CachedPackInfo getCachedPackInfo() { - return info; - } - - @Override - public boolean hasObject(ObjectToPack obj, StoredObjectRepresentation rep) { - DhtObjectRepresentation objrep = (DhtObjectRepresentation) rep; - if (keySet == null) - init(); - return keySet.contains(objrep.getChunkKey()); - } - - private void init() { - ChunkList chunkList = info.getChunkList(); - int cnt = chunkList.getChunkKeyCount(); - keySet = new HashSet<ChunkKey>(); - keyList = new ChunkKey[cnt]; - for (int i = 0; i < cnt; i++) { - ChunkKey key = ChunkKey.fromString(chunkList.getChunkKey(i)); - keySet.add(key); - keyList[i] = key; - } - } - - void copyAsIs(PackOutputStream out, boolean validate, DhtReader ctx) - throws IOException { - if (keyList == null) - init(); - - // Clear the recent chunks because all of the reader's - // chunk limit should be made available for prefetch. - int cacheLimit = ctx.getOptions().getChunkLimit(); - ctx.getRecentChunks().setMaxBytes(0); - try { - Prefetcher p = new Prefetcher(ctx, 0, cacheLimit); - p.push(Arrays.asList(keyList)); - copyPack(out, p, validate); - } finally { - ctx.getRecentChunks().setMaxBytes(cacheLimit); - } - } - - private void copyPack(PackOutputStream out, Prefetcher prefetcher, - boolean validate) throws DhtException, DhtMissingChunkException, - IOException { - Map<String, Long> startsAt = new HashMap<String, Long>(); - for (ChunkKey key : keyList) { - PackChunk chunk = prefetcher.get(key); - - // The prefetcher should always produce the chunk for us, if not - // there is something seriously wrong with the ordering or - // within the prefetcher code and aborting is more sane than - // using slow synchronous lookups. - // - if (chunk == null) - throw new DhtMissingChunkException(key); - - // Verify each long OFS_DELTA chunk appears at the right offset. - // This is a cheap validation that the cached pack hasn't been - // incorrectly created and would confuse the client. - // - long position = out.length(); - ChunkMeta meta = chunk.getMeta(); - if (meta != null && meta.getBaseChunkCount() != 0) { - for (ChunkMeta.BaseChunk base : meta.getBaseChunkList()) { - Long act = startsAt.get(base.getChunkKey()); - long exp = position - base.getRelativeStart(); - - if (act == null) { - throw new DhtException(MessageFormat.format(DhtText - .get().wrongChunkPositionInCachedPack, - rowKey(), base.getChunkKey(), - "[not written]", key, Long.valueOf(exp))); - } - - if (act.longValue() != exp) { - throw new DhtException(MessageFormat.format(DhtText - .get().wrongChunkPositionInCachedPack, - rowKey(), base.getChunkKey(), - act, key, Long.valueOf(exp))); - } - } - } - - startsAt.put(key.asString(), Long.valueOf(position)); - chunk.copyEntireChunkAsIs(out, null, validate); - } - } - - private String rowKey() { - return info.getName() + "." + info.getVersion(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java deleted file mode 100644 index 24963c7962..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtConfig.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; - -import org.eclipse.jgit.errors.ConfigInvalidException; -import org.eclipse.jgit.lib.StoredConfig; - -final class DhtConfig extends StoredConfig { - @Override - public void load() throws IOException, ConfigInvalidException { - clear(); - } - - @Override - public void save() throws IOException { - // TODO actually store this configuration. - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java deleted file mode 100644 index 7fdd662e06..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtException.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; - -import org.eclipse.jgit.storage.dht.spi.Database; - -/** Any error caused by a {@link Database} operation. */ -public class DhtException extends IOException { - private static final long serialVersionUID = 1L; - - /** - * @param message - */ - public DhtException(String message) { - super(message); - } - - /** - * @param cause - */ - public DhtException(Throwable cause) { - super(cause.getMessage()); - initCause(cause); - } - - /** - * @param message - * @param cause - */ - public DhtException(String message, Throwable cause) { - super(message); - initCause(cause); - } - - /** TODO: Remove this type and all of its locations. */ - public static class TODO extends RuntimeException { - private static final long serialVersionUID = 1L; - - /** - * @param what - */ - public TODO(String what) { - super(what); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java deleted file mode 100644 index 4ae4cf58ed..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserter.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.ByteArrayInputStream; -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.security.MessageDigest; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedList; -import java.util.zip.Deflater; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ObjectInfo.ObjectType; -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectInserter; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.transport.PackParser; -import org.eclipse.jgit.transport.PackedObjectInfo; -import org.eclipse.jgit.util.IO; - -class DhtInserter extends ObjectInserter { - private final DhtObjDatabase objdb; - - private final RepositoryKey repo; - - private final Database db; - - private final DhtInserterOptions options; - - private Deflater deflater; - - private WriteBuffer dbWriteBuffer; - - private ChunkFormatter activeChunk; - - DhtInserter(DhtObjDatabase objdb) { - this.objdb = objdb; - this.repo = objdb.getRepository().getRepositoryKey(); - this.db = objdb.getDatabase(); - this.options = objdb.getInserterOptions(); - } - - @Override - public ObjectId insert(int type, long len, InputStream in) - throws IOException { - if (Integer.MAX_VALUE < len || mustFragmentSize() < len) - return insertStream(type, len, in); - - byte[] tmp; - try { - tmp = new byte[(int) len]; - } catch (OutOfMemoryError tooLarge) { - return insertStream(type, len, in); - } - IO.readFully(in, tmp, 0, tmp.length); - return insert(type, tmp, 0, tmp.length); - } - - private ObjectId insertStream(final int type, final long inflatedSize, - final InputStream in) throws IOException { - - // TODO Permit multiple chunks to be buffered here at once. - // It might be possible to compress and hold all chunks for - // an object, which would then allow them to write their - // ChunkInfo and chunks in parallel, as well as avoid the - // rewrite with the ChunkFragments at the end. - - MessageDigest chunkDigest = Constants.newMessageDigest(); - LinkedList<ChunkKey> fragmentList = new LinkedList<ChunkKey>(); - - ChunkFormatter chunk = newChunk(); - int position = chunk.position(); - if (!chunk.whole(type, inflatedSize)) - throw new DhtException(DhtText.get().cannotInsertObject); - - MessageDigest objDigest = digest(); - objDigest.update(Constants.encodedTypeString(type)); - objDigest.update((byte) ' '); - objDigest.update(Constants.encodeASCII(inflatedSize)); - objDigest.update((byte) 0); - - Deflater def = deflater(); - byte[] inBuf = buffer(); - long packedSize = 0; - long done = 0; - while (done < inflatedSize) { - if (done == 0 || def.needsInput()) { - int inAvail = in.read(inBuf); - if (inAvail <= 0) - throw new EOFException(); - objDigest.update(inBuf, 0, inAvail); - def.setInput(inBuf, 0, inAvail); - done += inAvail; - } - - if (chunk.free() == 0) { - packedSize += chunk.size(); - chunk.setObjectType(type); - chunk.setFragment(); - fragmentList.add(chunk.end(chunkDigest)); - chunk.safePut(db, dbBuffer()); - chunk = newChunk(); - } - chunk.appendDeflateOutput(def); - } - - def.finish(); - - while (!def.finished()) { - if (chunk.free() == 0) { - packedSize += chunk.size(); - chunk.setObjectType(type); - chunk.setFragment(); - fragmentList.add(chunk.end(chunkDigest)); - chunk.safePut(db, dbBuffer()); - chunk = newChunk(); - } - chunk.appendDeflateOutput(def); - } - - ObjectId objId = ObjectId.fromRaw(objDigest.digest()); - PackedObjectInfo oe = new PackedObjectInfo(objId); - oe.setOffset(position); - - if (!chunk.isEmpty()) { - packedSize += chunk.size(); - chunk.setObjectType(type); - - if (fragmentList.isEmpty()) { - ChunkKey key = chunk.end(chunkDigest); - chunk.setChunkIndex(Collections.singletonList(oe)); - chunk.safePut(db, dbBuffer()); - - GitStore.ObjectInfo.Builder b = GitStore.ObjectInfo.newBuilder(); - b.setObjectType(ObjectType.valueOf(type)); - b.setOffset(position); - b.setPackedSize(packedSize); - b.setInflatedSize(inflatedSize); - ObjectInfo info = new ObjectInfo(key, b.build()); - ObjectIndexKey objKey = ObjectIndexKey.create(repo, objId); - db.objectIndex().add(objKey, info, dbBuffer()); - return objId; - } - - chunk.setFragment(); - fragmentList.add(chunk.end(chunkDigest)); - chunk.safePut(db, dbBuffer()); - } - chunk = null; - - ChunkKey firstChunkKey = fragmentList.get(0); - - ChunkMeta.Builder metaBuilder = ChunkMeta.newBuilder(); - for (ChunkKey k : fragmentList) - metaBuilder.addFragment(k.asString()); - ChunkMeta meta = metaBuilder.build(); - - for (ChunkKey key : fragmentList) { - PackChunk.Members builder = new PackChunk.Members(); - builder.setChunkKey(key); - builder.setMeta(meta); - - if (firstChunkKey.equals(key)) - builder.setChunkIndex(ChunkIndex.create(Arrays.asList(oe))); - - db.chunk().put(builder, dbBuffer()); - } - - GitStore.ObjectInfo.Builder b = GitStore.ObjectInfo.newBuilder(); - b.setObjectType(ObjectType.valueOf(type)); - b.setOffset(position); - b.setPackedSize(packedSize); - b.setInflatedSize(inflatedSize); - ObjectInfo info = new ObjectInfo(firstChunkKey, b.build()); - ObjectIndexKey objKey = ObjectIndexKey.create(repo, objId); - db.objectIndex().add(objKey, info, dbBuffer()); - - return objId; - } - - @Override - public ObjectId insert(int type, byte[] data, int off, int len) - throws IOException { - // TODO Is it important to avoid duplicate objects here? - // IIRC writing out a DirCache just blindly writes all of the - // tree objects to the inserter, relying on the inserter to - // strip out duplicates. We might need to buffer trees as - // long as possible, then collapse the buffer by looking up - // any existing objects and avoiding inserting those. - - if (mustFragmentSize() < len) - return insertStream(type, len, asStream(data, off, len)); - - ObjectId objId = idFor(type, data, off, len); - - if (activeChunk == null) - activeChunk = newChunk(); - - if (activeChunk.whole(deflater(), type, data, off, len, objId)) - return objId; - - // TODO Allow more than one chunk pending at a time, this would - // permit batching puts of the ChunkInfo records. - - if (!activeChunk.isEmpty()) { - activeChunk.end(digest()); - activeChunk.safePut(db, dbBuffer()); - activeChunk = newChunk(); - if (activeChunk.whole(deflater(), type, data, off, len, objId)) - return objId; - } - - return insertStream(type, len, asStream(data, off, len)); - } - - /** @return size that compressing still won't fit into a single chunk. */ - private int mustFragmentSize() { - return 4 * options.getChunkSize(); - } - - @Override - public PackParser newPackParser(InputStream in) throws IOException { - return new DhtPackParser(objdb, in); - } - - @Override - public void flush() throws IOException { - if (activeChunk != null && !activeChunk.isEmpty()) { - activeChunk.end(digest()); - activeChunk.safePut(db, dbBuffer()); - activeChunk = null; - } - - if (dbWriteBuffer != null) - dbWriteBuffer.flush(); - } - - @Override - public void release() { - if (deflater != null) { - deflater.end(); - deflater = null; - } - - dbWriteBuffer = null; - activeChunk = null; - } - - private Deflater deflater() { - if (deflater == null) - deflater = new Deflater(options.getCompression()); - else - deflater.reset(); - return deflater; - } - - private WriteBuffer dbBuffer() { - if (dbWriteBuffer == null) - dbWriteBuffer = db.newWriteBuffer(); - return dbWriteBuffer; - } - - private ChunkFormatter newChunk() { - ChunkFormatter fmt; - - fmt = new ChunkFormatter(repo, options); - fmt.setSource(GitStore.ChunkInfo.Source.INSERT); - return fmt; - } - - private static ByteArrayInputStream asStream(byte[] data, int off, int len) { - return new ByteArrayInputStream(data, off, len); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java deleted file mode 100644 index 56b323bd2b..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtInserterOptions.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static java.util.zip.Deflater.DEFAULT_COMPRESSION; -import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH; - -import java.security.SecureRandom; -import java.util.zip.Deflater; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.Config; -import org.eclipse.jgit.lib.CoreConfig; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; - -/** Options controlling how objects are inserted into a DHT stored repository. */ -public class DhtInserterOptions { - private static final SecureRandom prng = new SecureRandom(); - - /** 1024 (number of bytes in one kibibyte/kilobyte) */ - public static final int KiB = 1024; - - /** 1024 {@link #KiB} (number of bytes in one mebibyte/megabyte) */ - public static final int MiB = 1024 * KiB; - - private int chunkSize; - - private int writeBufferSize; - - private int compression; - - private int prefetchDepth; - - private long parserCacheLimit; - - /** Create a default inserter configuration. */ - public DhtInserterOptions() { - setChunkSize(1 * MiB); - setWriteBufferSize(1 * MiB); - setCompression(DEFAULT_COMPRESSION); - setPrefetchDepth(50); - setParserCacheLimit(512 * getChunkSize()); - } - - /** @return maximum size of a chunk, in bytes. */ - public int getChunkSize() { - return chunkSize; - } - - /** - * Set the maximum size of a chunk, in bytes. - * - * @param sizeInBytes - * the maximum size. A chunk's data segment won't exceed this. - * @return {@code this} - */ - public DhtInserterOptions setChunkSize(int sizeInBytes) { - chunkSize = Math.max(1024, sizeInBytes); - return this; - } - - /** @return maximum number of outstanding write bytes. */ - public int getWriteBufferSize() { - return writeBufferSize; - } - - /** - * Set the maximum number of outstanding bytes in a {@link WriteBuffer}. - * - * @param sizeInBytes - * maximum number of bytes. - * @return {@code this} - */ - public DhtInserterOptions setWriteBufferSize(int sizeInBytes) { - writeBufferSize = Math.max(1024, sizeInBytes); - return this; - } - - /** @return maximum number of objects to put into a chunk. */ - public int getMaxObjectCount() { - // Do not allow the index to be larger than a chunk itself. - return getChunkSize() / (OBJECT_ID_LENGTH + 4); - } - - /** @return compression level used when writing new objects into chunks. */ - public int getCompression() { - return compression; - } - - /** - * Set the compression level used when writing new objects. - * - * @param level - * the compression level. Use - * {@link Deflater#DEFAULT_COMPRESSION} to specify a default - * compression setting. - * @return {@code this} - */ - public DhtInserterOptions setCompression(int level) { - compression = level; - return this; - } - - /** - * Maximum number of entries in a chunk's prefetch list. - * <p> - * Each commit or tree chunk stores an optional prefetch list containing the - * next X chunk keys that a reader would need if they were traversing the - * project history. This implies that chunk prefetch lists are overlapping. - * <p> - * The depth at insertion time needs to be deep enough to allow readers to - * have sufficient parallel prefetch to keep themselves busy without waiting - * on sequential loads. If the depth is not sufficient, readers will stall - * while they sequentially look up the next chunk they need. - * - * @return maximum number of entries in a {@link ChunkMeta} list. - */ - public int getPrefetchDepth() { - return prefetchDepth; - } - - /** - * Maximum number of entries in a chunk's prefetch list. - * - * @param depth - * maximum depth of the prefetch list. - * @return {@code this} - */ - public DhtInserterOptions setPrefetchDepth(int depth) { - prefetchDepth = Math.max(0, depth); - return this; - } - - /** - * Number of chunks the parser can cache for delta resolution support. - * - * @return chunks to hold in memory to support delta resolution. - */ - public int getParserCacheSize() { - return (int) (getParserCacheLimit() / getChunkSize()); - } - - /** @return number of bytes the PackParser can cache for delta resolution. */ - public long getParserCacheLimit() { - return parserCacheLimit; - } - - /** - * Set the number of bytes the PackParser can cache. - * - * @param limit - * number of bytes the parser can cache. - * @return {@code this} - */ - public DhtInserterOptions setParserCacheLimit(long limit) { - parserCacheLimit = Math.max(0, limit); - return this; - } - - /** @return next random 32 bits to salt chunk keys. */ - int nextChunkSalt() { - return prng.nextInt(); - } - - /** - * Update properties by setting fields from the configuration. - * <p> - * If a property is not defined in the configuration, then it is left - * unmodified. - * - * @param rc - * configuration to read properties from. - * @return {@code this} - */ - public DhtInserterOptions fromConfig(Config rc) { - setChunkSize(rc.getInt("core", "dht", "chunkSize", getChunkSize())); - setWriteBufferSize(rc.getInt("core", "dht", "writeBufferSize", getWriteBufferSize())); - setCompression(rc.get(CoreConfig.KEY).getCompression()); - setPrefetchDepth(rc.getInt("core", "dht", "packParserPrefetchDepth", getPrefetchDepth())); - setParserCacheLimit(rc.getLong("core", "dht", "packParserCacheLimit", getParserCacheLimit())); - return this; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java deleted file mode 100644 index 4fc103be95..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtMissingChunkException.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.text.MessageFormat; - -/** Indicates a {@link PackChunk} doesn't exist in the database. */ -public class DhtMissingChunkException extends DhtException { - private static final long serialVersionUID = 1L; - - private final ChunkKey chunkKey; - - /** - * Initialize a new missing chunk exception. - * - * @param key - * the key of the chunk that is not found. - */ - public DhtMissingChunkException(ChunkKey key) { - super(MessageFormat.format(DhtText.get().missingChunk, key)); - chunkKey = key; - } - - /** - * Initialize a new missing chunk exception. - * - * @param key - * the key of the chunk that is not found. - * @param why - * reason the chunk is missing. This may be an explanation about - * low-level data corruption in the database. - */ - public DhtMissingChunkException(ChunkKey key, Throwable why) { - this(key); - initCause(why); - } - - /** @return key of the chunk that is missing. */ - public ChunkKey getChunkKey() { - return chunkKey; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java deleted file mode 100644 index 4261676b9e..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjDatabase.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.lib.ObjectDatabase; -import org.eclipse.jgit.lib.ObjectInserter; -import org.eclipse.jgit.lib.ObjectReader; -import org.eclipse.jgit.storage.dht.spi.Database; - -/** ObjectDatabase stored on top of the DHT database. */ -public class DhtObjDatabase extends ObjectDatabase { - private final DhtRepository repository; - - private final Database db; - - private final DhtReaderOptions readerOptions; - - private final DhtInserterOptions inserterOptions; - - DhtObjDatabase(DhtRepository repository, DhtRepositoryBuilder builder) { - this.repository = repository; - this.db = builder.getDatabase(); - this.readerOptions = builder.getReaderOptions(); - this.inserterOptions = builder.getInserterOptions(); - } - - DhtRepository getRepository() { - return repository; - } - - Database getDatabase() { - return db; - } - - DhtReaderOptions getReaderOptions() { - return readerOptions; - } - - DhtInserterOptions getInserterOptions() { - return inserterOptions; - } - - @Override - public boolean exists() { - return repository.getRepositoryKey() != null; - } - - @Override - public void close() { - // Do nothing. - } - - @Override - public ObjectReader newReader() { - return new DhtReader(this); - } - - @Override - public ObjectInserter newInserter() { - return new DhtInserter(this); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java deleted file mode 100644 index f6d55c1a49..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectRepresentation.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.storage.pack.StoredObjectRepresentation; - -final class DhtObjectRepresentation extends StoredObjectRepresentation { - private ObjectInfo info; - - void set(ObjectInfo link) { - this.info = link; - } - - ChunkKey getChunkKey() { - return info.getChunkKey(); - } - - int getOffset() { - return info.getOffset(); - } - - long getPackedSize() { - return info.getPackedSize(); - } - - boolean isFragmented() { - return info.isFragmented(); - } - - @Override - public ObjectId getDeltaBase() { - return info.getDeltaBase(); - } - - @Override - public int getFormat() { - if (info.isDelta()) - return PACK_DELTA; - return PACK_WHOLE; - } - - @Override - public int getWeight() { - long size = info.getPackedSize(); - return (int) Math.min(size, Integer.MAX_VALUE); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java deleted file mode 100644 index 98161802fa..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtObjectToPack.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.revwalk.RevObject; -import org.eclipse.jgit.storage.pack.ObjectToPack; -import org.eclipse.jgit.storage.pack.StoredObjectRepresentation; - -final class DhtObjectToPack extends ObjectToPack { - private static final int FRAGMENTED = 1 << 0; - - /** Chunk that contains this object. */ - ChunkKey chunk; - - /** Offset of this object within its chunk. */ - int offset; - - /** Number of bytes in the object's compressed form, excluding pack header. */ - int size; - - /** Order this chunk occurs in the {@link Prefetcher}. */ - int visitOrder; - - DhtObjectToPack(RevObject obj) { - super(obj); - } - - boolean isFragmented() { - return isExtendedFlag(FRAGMENTED); - } - - @Override - public void select(StoredObjectRepresentation ref) { - DhtObjectRepresentation rep = (DhtObjectRepresentation) ref; - chunk = rep.getChunkKey(); - offset = rep.getOffset(); - - final long sz = rep.getPackedSize(); - if (sz <= Integer.MAX_VALUE) - size = (int) sz; - else - size = -1; - - if (rep.isFragmented()) - setExtendedFlag(FRAGMENTED); - else - clearExtendedFlag(FRAGMENTED); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java deleted file mode 100644 index a3972474e0..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtPackParser.java +++ /dev/null @@ -1,1442 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.OBJ_BLOB; -import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT; -import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA; -import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA; -import static org.eclipse.jgit.lib.Constants.OBJ_TAG; -import static org.eclipse.jgit.lib.Constants.OBJ_TREE; -import static org.eclipse.jgit.storage.dht.ChunkInfo.OBJ_MIXED; - -import java.io.IOException; -import java.io.InputStream; -import java.security.MessageDigest; -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.MutableObjectId; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectIdSubclassMap; -import org.eclipse.jgit.lib.ProgressMonitor; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.file.PackLock; -import org.eclipse.jgit.transport.PackParser; -import org.eclipse.jgit.transport.PackedObjectInfo; -import org.eclipse.jgit.treewalk.CanonicalTreeParser; -import org.eclipse.jgit.util.LongList; - -import com.google.protobuf.ByteString; - -/** Parses the pack stream into chunks, and indexes the chunks for lookup. */ -public class DhtPackParser extends PackParser { - private final DhtObjDatabase objdb; - - private final RepositoryKey repo; - - private final Database db; - - private final DhtInserterOptions options; - - private final MessageDigest chunkKeyDigest; - - /** Number of objects to write to the global index at once. */ - private final int linkBatchSize; - - private Boolean saveAsCachedPack; - - private WriteBuffer dbWriteBuffer; - - /** Chunk writers for the 4 major object types, keyed by object type code. */ - private ChunkFormatter[] openChunks; - - /** Edges for current chunks. */ - private Edges[] openEdges; - - /** Prior chunks that were written, keyed by object type code. */ - private List<ChunkKey>[] chunkByOrder; - - /** Information on chunks already written out. */ - private Map<ChunkKey, ChunkInfo> infoByKey; - - /** Information on chunks already written out. */ - private Map<ChunkKey, ChunkMeta> chunkMeta; - - /** ChunkMeta that needs to be written out again, as it was modified. */ - private Map<ChunkKey, ChunkMeta> dirtyMeta; - - private Map<ChunkKey, Edges> chunkEdges; - - // Correlated lists, sorted by object stream position. - private LongList objStreamPos; - - private LongList objChunkPtrs; - - /** Formatter handling the current object's data stream. */ - private ChunkFormatter currChunk; - - /** Current type of the object, if known. */ - private int currType; - - /** Position of the current object in the chunks we create. */ - private long currChunkPtr; - - /** If using OFS_DELTA, location of the base object in chunk space. */ - private long currBasePtr; - - /** Starting byte of the object data (aka end of the object header). */ - private int currDataPos; - - /** Total number of bytes in the object representation. */ - private long currPackedSize; - - /** Total number of bytes in the entire inflated object. */ - private long currInflatedSize; - - /** If the current object is fragmented, the list of chunks holding it. */ - private List<ChunkKey> currFragments; - - /** Previously written chunk that is being re-read during delta resolution. */ - private PackChunk dbChunk; - - /** Current read position in {@link #dbChunk}. */ - private int dbPtr; - - /** Recent chunks that were written, or recently read. */ - private LinkedHashMap<ChunkKey, PackChunk> chunkReadBackCache; - - /** Objects parsed from the stream, sorted by SHA-1. */ - private List<DhtInfo> objectListByName; - - /** Objects parsed from the stream, sorted by chunk (aka offset). */ - private List<DhtInfo> objectListByChunk; - - /** Iterators to write {@link #objectListByName} into the global index. */ - private ListIterator<DhtInfo>[] linkIterators; - - /** If the pack stream was self-contained, the cached pack info record key. */ - private CachedPackKey cachedPackKey; - - private CanonicalTreeParser treeParser; - - private final MutableObjectId idBuffer; - - private ObjectIdSubclassMap<DhtInfo> objectMap; - - DhtPackParser(DhtObjDatabase objdb, InputStream in) { - super(objdb, in); - - // Disable collision checking. DhtReader performs some magic to look - // only at old objects, so a colliding replacement will be ignored until - // its removed during garbage collection. - // - setCheckObjectCollisions(false); - - this.objdb = objdb; - this.repo = objdb.getRepository().getRepositoryKey(); - this.db = objdb.getDatabase(); - this.options = objdb.getInserterOptions(); - this.chunkKeyDigest = Constants.newMessageDigest(); - - dbWriteBuffer = db.newWriteBuffer(); - openChunks = new ChunkFormatter[5]; - openEdges = new Edges[5]; - chunkByOrder = newListArray(5); - infoByKey = new HashMap<ChunkKey, ChunkInfo>(); - dirtyMeta = new HashMap<ChunkKey, ChunkMeta>(); - chunkMeta = new HashMap<ChunkKey, ChunkMeta>(); - chunkEdges = new HashMap<ChunkKey, Edges>(); - treeParser = new CanonicalTreeParser(); - idBuffer = new MutableObjectId(); - objectMap = new ObjectIdSubclassMap<DhtInfo>(); - - final int max = options.getParserCacheSize(); - chunkReadBackCache = new LinkedHashMap<ChunkKey, PackChunk>(max, 0.75f, true) { - private static final long serialVersionUID = 1L; - - @Override - protected boolean removeEldestEntry(Entry<ChunkKey, PackChunk> e) { - return max < size(); - } - }; - - // The typical WriteBuffer flushes at 512 KiB increments, and - // the typical ObjectInfo record is around 180 bytes. Use these - // figures to come up with a rough estimate for how many links - // to construct in one region of the DHT before moving onto a - // different region in order to increase parallelism on large - // object imports. - // - linkBatchSize = 512 * 1024 / 180; - } - - @SuppressWarnings("unchecked") - private static <T> List<T>[] newListArray(int size) { - return new List[size]; - } - - /** @return if true, the pack stream is marked as a cached pack. */ - public boolean isSaveAsCachedPack() { - return saveAsCachedPack != null && saveAsCachedPack.booleanValue(); - } - - /** - * Enable saving the pack stream as a cached pack. - * - * @param save - * if true, the stream is saved. - */ - public void setSaveAsCachedPack(boolean save) { - saveAsCachedPack = Boolean.valueOf(save); - } - - @Override - public PackLock parse(ProgressMonitor receiving, ProgressMonitor resolving) - throws IOException { - boolean success = false; - try { - PackLock lock = super.parse(receiving, resolving); - - chunkReadBackCache = null; - openChunks = null; - openEdges = null; - treeParser = null; - - final int objCnt = getObjectCount(); - if (objCnt == 0) { - // If no objects were received, no chunks were created. Leaving - // success to false and doing a rollback is a good way to make - // sure this is true. - // - return lock; - } - - createObjectLists(); - - if (isSaveAsCachedPack()) - putCachedPack(); - computeChunkEdges(); - putChunkIndexes(); - putDirtyMeta(); - - chunkMeta = null; - chunkEdges = null; - dirtyMeta = null; - objectMap = null; - objectListByChunk = null; - dbWriteBuffer.flush(); - - putGlobalIndex(resolving); - dbWriteBuffer.flush(); - - success = true; - return lock; - } finally { - openChunks = null; - openEdges = null; - objStreamPos = null; - objChunkPtrs = null; - currChunk = null; - currFragments = null; - dbChunk = null; - chunkReadBackCache = null; - infoByKey = null; - chunkMeta = null; - chunkEdges = null; - treeParser = null; - - if (!success) - rollback(); - - chunkByOrder = null; - objectListByName = null; - objectListByChunk = null; - linkIterators = null; - dbWriteBuffer = null; - } - } - - @SuppressWarnings("unchecked") - private void createObjectLists() { - List objs = getSortedObjectList(null /* by name */); - objectListByName = objs; - - int cnt = objectListByName.size(); - DhtInfo[] copy = objectListByName.toArray(new DhtInfo[cnt]); - Arrays.sort(copy, new Comparator<PackedObjectInfo>() { - public int compare(PackedObjectInfo o1, PackedObjectInfo o2) { - DhtInfo a = (DhtInfo) o1; - DhtInfo b = (DhtInfo) o2; - return Long.signum(a.chunkPtr - b.chunkPtr); - } - }); - objectListByChunk = Arrays.asList(copy); - } - - private void putCachedPack() throws DhtException { - CachedPackInfo.Builder info = CachedPackInfo.newBuilder(); - - for (DhtInfo obj : objectMap) { - if (!obj.isInPack()) - return; - - if (!obj.isReferenced()) - info.getTipListBuilder().addObjectName(obj.name()); - } - - MessageDigest version = Constants.newMessageDigest(); - addChunkList(info, version, chunkByOrder[OBJ_TAG]); - addChunkList(info, version, chunkByOrder[OBJ_COMMIT]); - addChunkList(info, version, chunkByOrder[OBJ_TREE]); - addChunkList(info, version, chunkByOrder[OBJ_BLOB]); - - info.setName(computePackName().name()); - info.setVersion(ObjectId.fromRaw(version.digest()).name()); - - cachedPackKey = CachedPackKey.fromInfo(info.build()); - for (List<ChunkKey> list : chunkByOrder) { - if (list == null) - continue; - for (ChunkKey key : list) { - ChunkInfo oldInfo = infoByKey.get(key); - GitStore.ChunkInfo.Builder b = - GitStore.ChunkInfo.newBuilder(oldInfo.getData()); - b.setCachedPackKey(cachedPackKey.asString()); - ChunkInfo newInfo = new ChunkInfo(key, b.build()); - infoByKey.put(key, newInfo); - - // A fragment was already put, and has to be re-put. - // Non-fragments will put later and do not put now. - if (newInfo.getData().getIsFragment()) - db.repository().put(repo, newInfo, dbWriteBuffer); - } - } - - db.repository().put(repo, info.build(), dbWriteBuffer); - } - - private void addChunkList(CachedPackInfo.Builder info, - MessageDigest version, List<ChunkKey> list) { - if (list == null) - return; - - long bytesTotal = info.getBytesTotal(); - long objectsTotal = info.getObjectsTotal(); - long objectsDelta = info.getObjectsDelta(); - - byte[] buf = new byte[Constants.OBJECT_ID_LENGTH]; - for (ChunkKey key : list) { - ChunkInfo chunkInfo = infoByKey.get(key); - GitStore.ChunkInfo c = chunkInfo.getData(); - int len = c.getChunkSize() - ChunkFormatter.TRAILER_SIZE; - bytesTotal += len; - objectsTotal += c.getObjectCounts().getTotal(); - objectsDelta += c.getObjectCounts().getOfsDelta(); - objectsDelta += c.getObjectCounts().getRefDelta(); - info.getChunkListBuilder().addChunkKey( - chunkInfo.getChunkKey().asString()); - chunkInfo.getChunkKey().getChunkHash().copyRawTo(buf, 0); - version.update(buf); - } - - info.setBytesTotal(bytesTotal); - info.setObjectsTotal(objectsTotal); - info.setObjectsDelta(objectsDelta); - } - - private ObjectId computePackName() { - byte[] buf = new byte[Constants.OBJECT_ID_LENGTH]; - MessageDigest md = Constants.newMessageDigest(); - for (DhtInfo otp : objectListByName) { - otp.copyRawTo(buf, 0); - md.update(buf); - } - return ObjectId.fromRaw(md.digest()); - } - - private void rollback() throws DhtException { - try { - dbWriteBuffer.abort(); - dbWriteBuffer = db.newWriteBuffer(); - - if (cachedPackKey != null) - db.repository().remove(repo, cachedPackKey, dbWriteBuffer); - - if (linkIterators != null) { - boolean removed = true; - while (removed) { - removed = false; - for (ListIterator<DhtInfo> itr : linkIterators) { - int cnt = 0; - while (itr.hasPrevious() && cnt < linkBatchSize) { - DhtInfo oe = itr.previous(); - db.objectIndex().remove( // - ObjectIndexKey.create(repo, oe), // - chunkOf(oe.chunkPtr), // - dbWriteBuffer); - cnt++; - } - if (0 < cnt) - removed = true; - } - } - } - - deleteChunks(chunkByOrder[OBJ_COMMIT]); - deleteChunks(chunkByOrder[OBJ_TREE]); - deleteChunks(chunkByOrder[OBJ_BLOB]); - deleteChunks(chunkByOrder[OBJ_TAG]); - - dbWriteBuffer.flush(); - } catch (Throwable err) { - throw new DhtException(DhtText.get().packParserRollbackFailed, err); - } - } - - private void deleteChunks(List<ChunkKey> list) throws DhtException { - if (list != null) { - for (ChunkKey key : list) { - db.chunk().remove(key, dbWriteBuffer); - db.repository().remove(repo, key, dbWriteBuffer); - } - } - } - - private void putGlobalIndex(ProgressMonitor pm) throws DhtException { - int objcnt = objectListByName.size(); - pm.beginTask(DhtText.get().recordingObjects, objcnt); - - int segments = Math.max(1, Math.min(objcnt / linkBatchSize, 32)); - linkIterators = newListIteratorArray(segments); - - int objsPerSegment = objcnt / segments; - int beginIdx = 0; - for (int i = 0; i < segments - 1; i++) { - int endIdx = Math.min(beginIdx + objsPerSegment, objcnt); - linkIterators[i] = objectListByName.subList(beginIdx, endIdx) - .listIterator(); - beginIdx = endIdx; - } - linkIterators[segments - 1] = objectListByName - .subList(beginIdx, objcnt).listIterator(); - - boolean inserted = true; - while (inserted) { - inserted = false; - for (ListIterator<DhtInfo> itr : linkIterators) { - int cnt = 0; - while (itr.hasNext() && cnt < linkBatchSize) { - DhtInfo oe = itr.next(); - db.objectIndex().add( // - ObjectIndexKey.create(repo, oe), // - oe.info(chunkOf(oe.chunkPtr)), // - dbWriteBuffer); - cnt++; - } - if (0 < cnt) { - pm.update(cnt); - inserted = true; - } - } - } - - pm.endTask(); - } - - @SuppressWarnings("unchecked") - private static ListIterator<DhtInfo>[] newListIteratorArray(int size) { - return new ListIterator[size]; - } - - private void computeChunkEdges() throws DhtException { - List<DhtInfo> objs = objectListByChunk; - int beginIdx = 0; - ChunkKey key = chunkOf(objs.get(0).chunkPtr); - int type = typeOf(objs.get(0).chunkPtr); - - int objIdx = 1; - for (; objIdx < objs.size(); objIdx++) { - DhtInfo oe = objs.get(objIdx); - ChunkKey oeKey = chunkOf(oe.chunkPtr); - if (!key.equals(oeKey)) { - computeEdges(objs.subList(beginIdx, objIdx), key, type); - beginIdx = objIdx; - - key = oeKey; - type = typeOf(oe.chunkPtr); - } - if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr)) - type = OBJ_MIXED; - } - computeEdges(objs.subList(beginIdx, objs.size()), key, type); - } - - private void computeEdges(List<DhtInfo> objs, ChunkKey key, int type) - throws DhtException { - Edges edges = chunkEdges.get(key); - if (edges == null) - return; - - for (DhtInfo obj : objs) - edges.remove(obj); - - switch (type) { - case OBJ_COMMIT: - edges.commitEdges = toChunkList(edges.commitIds); - break; - case OBJ_TREE: - // TODO prefetch tree edges - break; - } - - edges.commitIds = null; - } - - private List<ChunkKey> toChunkList(Set<DhtInfo> objects) - throws DhtException { - if (objects == null || objects.isEmpty()) - return null; - - Map<ChunkKey, ChunkOrderingEntry> map = new HashMap<ChunkKey, ChunkOrderingEntry>(); - for (DhtInfo obj : objects) { - if (!obj.isInPack()) - continue; - - long chunkPtr = obj.chunkPtr; - ChunkKey key = chunkOf(chunkPtr); - ChunkOrderingEntry e = map.get(key); - if (e == null) { - e = new ChunkOrderingEntry(); - e.key = key; - e.order = chunkIdx(chunkPtr); - map.put(key, e); - } else { - e.order = Math.min(e.order, chunkIdx(chunkPtr)); - } - } - - ChunkOrderingEntry[] tmp = map.values().toArray( - new ChunkOrderingEntry[map.size()]); - Arrays.sort(tmp); - - ChunkKey[] out = new ChunkKey[tmp.length]; - for (int i = 0; i < tmp.length; i++) - out[i] = tmp[i].key; - return Arrays.asList(out); - } - - private static final class ChunkOrderingEntry implements - Comparable<ChunkOrderingEntry> { - ChunkKey key; - - int order; - - public int compareTo(ChunkOrderingEntry o) { - return order - o.order; - } - } - - private void putChunkIndexes() throws DhtException { - List<DhtInfo> objs = objectListByChunk; - int sIdx = 0; - DhtInfo oe = objs.get(0); - oe.setOffset(offsetOf(oe.chunkPtr)); - - ChunkKey key = chunkOf(oe.chunkPtr); - int type = typeOf(oe.chunkPtr); - - int objIdx = 1; - for (; objIdx < objs.size(); objIdx++) { - oe = objs.get(objIdx); - oe.setOffset(offsetOf(oe.chunkPtr)); - - ChunkKey oeKey = chunkOf(oe.chunkPtr); - if (!key.equals(oeKey)) { - putChunkIndex(objs.subList(sIdx, objIdx), key, type); - sIdx = objIdx; - - key = oeKey; - type = typeOf(oe.chunkPtr); - } - if (type != OBJ_MIXED && type != typeOf(oe.chunkPtr)) - type = OBJ_MIXED; - } - putChunkIndex(objs.subList(sIdx, objs.size()), key, type); - } - - private void putChunkIndex(List<DhtInfo> objectList, ChunkKey key, int type) - throws DhtException { - ChunkInfo oldInfo = infoByKey.get(key); - GitStore.ChunkInfo.Builder info - = GitStore.ChunkInfo.newBuilder(oldInfo.getData()); - - PackChunk.Members builder = new PackChunk.Members(); - builder.setChunkKey(key); - - byte[] index = ChunkIndex.create(objectList); - info.setIndexSize(index.length); - builder.setChunkIndex(index); - - ChunkMeta meta = dirtyMeta.remove(key); - if (meta == null) - meta = chunkMeta.get(key); - - switch (type) { - case OBJ_COMMIT: { - Edges edges = chunkEdges.get(key); - List<ChunkKey> e = edges != null ? edges.commitEdges : null; - List<ChunkKey> s = sequentialHint(key, OBJ_COMMIT); - if (e == null) - e = Collections.emptyList(); - if (s == null) - s = Collections.emptyList(); - if (!e.isEmpty() || !s.isEmpty()) { - ChunkMeta.Builder m = edit(meta); - ChunkMeta.PrefetchHint.Builder h = m.getCommitPrefetchBuilder(); - for (ChunkKey k : e) - h.addEdge(k.asString()); - for (ChunkKey k : s) - h.addSequential(k.asString()); - meta = m.build(); - } - break; - } - case OBJ_TREE: { - List<ChunkKey> s = sequentialHint(key, OBJ_TREE); - if (s == null) - s = Collections.emptyList(); - if (!s.isEmpty()) { - ChunkMeta.Builder m = edit(meta); - ChunkMeta.PrefetchHint.Builder h = m.getTreePrefetchBuilder(); - for (ChunkKey k : s) - h.addSequential(k.asString()); - meta = m.build(); - } - break; - } - } - - if (meta != null) { - info.setMetaSize(meta.getSerializedSize()); - builder.setMeta(meta); - } - - ChunkInfo newInfo = new ChunkInfo(key, info.build()); - infoByKey.put(key, newInfo); - db.repository().put(repo, newInfo, dbWriteBuffer); - db.chunk().put(builder, dbWriteBuffer); - } - - private static ChunkMeta.Builder edit(ChunkMeta meta) { - if (meta != null) - return ChunkMeta.newBuilder(meta); - return ChunkMeta.newBuilder(); - } - - private List<ChunkKey> sequentialHint(ChunkKey key, int typeCode) { - List<ChunkKey> all = chunkByOrder[typeCode]; - if (all == null) - return null; - int idx = all.indexOf(key); - if (0 <= idx) { - int max = options.getPrefetchDepth(); - int end = Math.min(idx + 1 + max, all.size()); - return all.subList(idx + 1, end); - } - return null; - } - - private void putDirtyMeta() throws DhtException { - for (Map.Entry<ChunkKey, ChunkMeta> meta : dirtyMeta.entrySet()) { - PackChunk.Members builder = new PackChunk.Members(); - builder.setChunkKey(meta.getKey()); - builder.setMeta(meta.getValue()); - db.chunk().put(builder, dbWriteBuffer); - } - } - - @Override - protected PackedObjectInfo newInfo(AnyObjectId id, UnresolvedDelta delta, - ObjectId baseId) { - DhtInfo obj = objectMap.addIfAbsent(new DhtInfo(id)); - if (delta != null) { - DhtDelta d = (DhtDelta) delta; - obj.chunkPtr = d.chunkPtr; - obj.packedSize = d.packedSize; - obj.inflatedSize = d.inflatedSize; - obj.base = baseId; - obj.setType(d.getType()); - if (d.isFragmented()) - obj.setFragmented(); - } - return obj; - } - - @Override - protected void onPackHeader(long objCnt) throws IOException { - if (Integer.MAX_VALUE < objCnt) { - throw new DhtException(MessageFormat.format( - DhtText.get().tooManyObjectsInPack, Long.valueOf(objCnt))); - } - - objStreamPos = new LongList((int) objCnt); - objChunkPtrs = new LongList((int) objCnt); - - if (saveAsCachedPack == null) - setSaveAsCachedPack(1000 < objCnt); - } - - @Override - protected void onBeginWholeObject(long streamPosition, int type, - long inflatedSize) throws IOException { - ChunkFormatter w = begin(type); - if (!w.whole(type, inflatedSize)) { - endChunk(type); - w = begin(type); - if (!w.whole(type, inflatedSize)) - throw panicCannotInsert(); - } - - currType = type; - currDataPos = w.position(); - currPackedSize = 0; - currInflatedSize = inflatedSize; - objStreamPos.add(streamPosition); - } - - @Override - protected void onEndWholeObject(PackedObjectInfo info) throws IOException { - boolean fragmented = currFragments != null; - endOneObject(); - - DhtInfo oe = (DhtInfo) info; - oe.chunkPtr = currChunkPtr; - oe.packedSize = currPackedSize; - oe.inflatedSize = currInflatedSize; - oe.setType(currType); - if (fragmented) - oe.setFragmented(); - } - - private void endOneObject() throws DhtException { - if (currFragments != null) - endFragmentedObject(); - objChunkPtrs.add(currChunkPtr); - } - - @Override - protected void onBeginOfsDelta(long deltaPos, long basePos, - long inflatedSize) throws IOException { - long basePtr = objChunkPtrs.get(findStreamIndex(basePos)); - int type = typeOf(basePtr); - - currType = type; - currPackedSize = 0; - currInflatedSize = inflatedSize; - currBasePtr = basePtr; - objStreamPos.add(deltaPos); - - ChunkFormatter w = begin(type); - if (isInCurrentChunk(basePtr)) { - if (w.ofsDelta(inflatedSize, w.position() - offsetOf(basePtr))) { - currDataPos = w.position(); - return; - } - - endChunk(type); - w = begin(type); - } - - if (!longOfsDelta(w, inflatedSize, basePtr)) { - endChunk(type); - w = begin(type); - if (!longOfsDelta(w, inflatedSize, basePtr)) - throw panicCannotInsert(); - } - - currDataPos = w.position(); - } - - @Override - protected void onBeginRefDelta(long deltaPos, AnyObjectId baseId, - long inflatedSize) throws IOException { - // Try to get the base type, but only if it was seen before in this - // pack stream. If not assume worst-case of BLOB type. - // - int typeCode; - DhtInfo baseInfo = objectMap.get(baseId); - if (baseInfo != null && baseInfo.isInPack()) { - typeCode = baseInfo.getType(); - currType = typeCode; - } else { - typeCode = OBJ_BLOB; - currType = -1; - } - - ChunkFormatter w = begin(typeCode); - if (!w.refDelta(inflatedSize, baseId)) { - endChunk(typeCode); - w = begin(typeCode); - if (!w.refDelta(inflatedSize, baseId)) - throw panicCannotInsert(); - } - - currDataPos = w.position(); - currPackedSize = 0; - currInflatedSize = inflatedSize; - objStreamPos.add(deltaPos); - } - - @Override - protected DhtDelta onEndDelta() throws IOException { - boolean fragmented = currFragments != null; - endOneObject(); - - DhtDelta delta = new DhtDelta(); - delta.chunkPtr = currChunkPtr; - delta.packedSize = currPackedSize; - delta.inflatedSize = currInflatedSize; - if (0 < currType) - delta.setType(currType); - if (fragmented) - delta.setFragmented(); - return delta; - } - - @Override - protected void onObjectData(Source src, byte[] raw, int pos, int len) - throws IOException { - if (src != Source.INPUT) - return; - - if (currChunk.append(raw, pos, len)) { - currPackedSize += len; - return; - } - - if (currFragments == null && currChunk.getObjectCount() == 1) - currFragments = new LinkedList<ChunkKey>(); - if (currFragments != null) { - appendToFragment(raw, pos, len); - return; - } - - // Everything between dataPos and dataEnd must be saved. - // - final int dataPos = currDataPos; - final int dataEnd = currChunk.position(); - final int hdrPos = offsetOf(currChunkPtr); - final int hdrLen = dataPos - hdrPos; - final int type = typeOf(currChunkPtr); - byte[] dataOld = currChunk.getRawChunkDataArray(); - final int typeOld = currChunk.getCurrentObjectType(); - - currChunk.rollback(); - endChunk(type); - - final ChunkFormatter w = begin(type); - switch (typeOld) { - case OBJ_COMMIT: - case OBJ_BLOB: - case OBJ_TREE: - case OBJ_TAG: - case OBJ_REF_DELTA: - w.adjustObjectCount(1, typeOld); - if (!w.append(dataOld, hdrPos, hdrLen)) - throw panicCannotInsert(); - break; - - case OBJ_OFS_DELTA: - if (!longOfsDelta(w, currInflatedSize, currBasePtr)) - throw panicCannotInsert(); - break; - - default: - throw new DhtException("Internal programming error: " + typeOld); - } - - currDataPos = w.position(); - if (dataPos < dataEnd && !w.append(dataOld, dataPos, dataEnd - dataPos)) - throw panicCannotInsert(); - dataOld = null; - - if (w.append(raw, pos, len)) { - currPackedSize += len; - } else { - currFragments = new LinkedList<ChunkKey>(); - appendToFragment(raw, pos, len); - } - } - - private boolean longOfsDelta(ChunkFormatter w, long infSize, long basePtr) { - final int type = typeOf(basePtr); - final List<ChunkKey> infoList = chunkByOrder[type]; - final int baseIdx = chunkIdx(basePtr); - final ChunkInfo baseInfo = infoByKey.get(infoList.get(baseIdx)); - - // Go backwards to the start of the base's chunk. - long relativeChunkStart = 0; - for (int i = infoList.size() - 1; baseIdx <= i; i--) { - GitStore.ChunkInfo info = infoByKey.get(infoList.get(i)).getData(); - int packSize = info.getChunkSize() - ChunkFormatter.TRAILER_SIZE; - relativeChunkStart += packSize; - } - - // Offset to the base goes back to start of our chunk, then start of - // the base chunk, but slide forward the distance of the base within - // its own chunk. - // - long ofs = w.position() + relativeChunkStart - offsetOf(basePtr); - if (w.ofsDelta(infSize, ofs)) { - w.useBaseChunk(relativeChunkStart, baseInfo.getChunkKey()); - return true; - } - return false; - } - - private void appendToFragment(byte[] raw, int pos, int len) - throws DhtException { - while (0 < len) { - if (currChunk.free() == 0) { - int typeCode = typeOf(currChunkPtr); - currChunk.setFragment(); - currFragments.add(endChunk(typeCode)); - currChunk = openChunk(typeCode); - } - - int n = Math.min(len, currChunk.free()); - currChunk.append(raw, pos, n); - currPackedSize += n; - pos += n; - len -= n; - } - } - - private void endFragmentedObject() throws DhtException { - currChunk.setFragment(); - ChunkKey lastKey = endChunk(typeOf(currChunkPtr)); - if (lastKey != null) - currFragments.add(lastKey); - - ChunkMeta.Builder protoBuilder = ChunkMeta.newBuilder(); - for (ChunkKey key : currFragments) - protoBuilder.addFragment(key.asString()); - ChunkMeta protoMeta = protoBuilder.build(); - - for (ChunkKey key : currFragments) { - ChunkMeta oldMeta = chunkMeta.get(key); - if (oldMeta != null) { - ChunkMeta.Builder newMeta = ChunkMeta.newBuilder(oldMeta); - newMeta.clearFragment(); - newMeta.mergeFrom(protoMeta); - ChunkMeta meta = newMeta.build(); - dirtyMeta.put(key, meta); - chunkMeta.put(key, meta); - } else { - dirtyMeta.put(key, protoMeta); - chunkMeta.put(key, protoMeta); - } - } - currFragments = null; - } - - @Override - protected void onInflatedObjectData(PackedObjectInfo obj, int typeCode, - byte[] data) throws IOException { - DhtInfo info = (DhtInfo) obj; - info.inflatedSize = data.length; - info.setType(typeCode); - - switch (typeCode) { - case OBJ_COMMIT: - onCommit(info, data); - break; - - case OBJ_TREE: - onTree(data); - break; - - case OBJ_TAG: - onTag(data); - break; - } - } - - private void onCommit(DhtInfo obj, byte[] raw) throws DhtException { - Edges edges = edges(obj.chunkPtr); - edges.remove(obj); - - // TODO compute hints for trees. - if (isSaveAsCachedPack()) { - idBuffer.fromString(raw, 5); - lookupByName(idBuffer).setReferenced(); - } - - int ptr = 46; - while (raw[ptr] == 'p') { - idBuffer.fromString(raw, ptr + 7); - DhtInfo p = lookupByName(idBuffer); - p.setReferenced(); - edges.commit(p); - ptr += 48; - } - } - - private void onTree(byte[] data) { - if (isSaveAsCachedPack()) { - treeParser.reset(data); - while (!treeParser.eof()) { - idBuffer.fromRaw(treeParser.idBuffer(), treeParser.idOffset()); - lookupByName(idBuffer).setReferenced(); - treeParser.next(); - } - } - } - - private void onTag(byte[] data) { - if (isSaveAsCachedPack()) { - idBuffer.fromString(data, 7); // "object $sha1" - lookupByName(idBuffer).setReferenced(); - } - } - - private DhtInfo lookupByName(AnyObjectId obj) { - DhtInfo info = objectMap.get(obj); - if (info == null) { - info = new DhtInfo(obj); - objectMap.add(info); - } - return info; - } - - private Edges edges(long chunkPtr) throws DhtException { - if (isInCurrentChunk(chunkPtr)) { - int type = typeOf(chunkPtr); - Edges s = openEdges[type]; - if (s == null) { - s = new Edges(); - openEdges[type] = s; - } - return s; - } else { - ChunkKey key = chunkOf(chunkPtr); - Edges s = chunkEdges.get(key); - if (s == null) { - s = new Edges(); - chunkEdges.put(key, s); - } - return s; - } - } - - private static class Edges { - Set<DhtInfo> commitIds; - - List<ChunkKey> commitEdges; - - void commit(DhtInfo id) { - if (commitIds == null) - commitIds = new HashSet<DhtInfo>(); - commitIds.add(id); - } - - void remove(DhtInfo id) { - if (commitIds != null) - commitIds.remove(id); - } - } - - @Override - protected ObjectTypeAndSize seekDatabase(PackedObjectInfo obj, - ObjectTypeAndSize info) throws IOException { - return seekDatabase(((DhtInfo) obj).chunkPtr, info); - } - - @Override - protected ObjectTypeAndSize seekDatabase(UnresolvedDelta delta, - ObjectTypeAndSize info) throws IOException { - return seekDatabase(((DhtDelta) delta).chunkPtr, info); - } - - private ObjectTypeAndSize seekDatabase(long chunkPtr, ObjectTypeAndSize info) - throws DhtException { - seekChunk(chunkOf(chunkPtr), true); - dbPtr = dbChunk.readObjectTypeAndSize(offsetOf(chunkPtr), info); - return info; - } - - @Override - protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException { - int n = dbChunk.read(dbPtr, dst, pos, cnt); - if (0 < n) { - dbPtr += n; - return n; - } - - // ChunkMeta for fragments is delayed writing, so it isn't available - // on the chunk if the chunk was read-back from the database. Use - // our copy of ChunkMeta instead of the PackChunk's copy. - - ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey()); - if (meta == null) - return 0; - - ChunkKey next = ChunkMetaUtil.getNextFragment(meta, dbChunk.getChunkKey()); - if (next == null) - return 0; - - seekChunk(next, false); - n = dbChunk.read(0, dst, pos, cnt); - dbPtr = n; - return n; - } - - private void seekChunk(ChunkKey key, boolean cache) throws DhtException, - DhtTimeoutException { - if (dbChunk == null || !dbChunk.getChunkKey().equals(key)) { - dbChunk = chunkReadBackCache.get(key); - if (dbChunk == null) { - dbWriteBuffer.flush(); - - Collection<PackChunk.Members> found; - Context opt = Context.READ_REPAIR; - Sync<Collection<PackChunk.Members>> sync = Sync.create(); - db.chunk().get(opt, Collections.singleton(key), sync); - try { - found = sync.get(objdb.getReaderOptions().getTimeout()); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - - if (found.isEmpty()) { - throw new DhtException(MessageFormat.format( - DhtText.get().missingChunk, key)); - } - - dbChunk = found.iterator().next().build(); - if (cache) - chunkReadBackCache.put(key, dbChunk); - } - } - } - - @Override - protected boolean onAppendBase(int typeCode, byte[] data, - PackedObjectInfo info) throws IOException { - return false; // This implementation does not copy base objects. - } - - @Override - protected void onEndThinPack() throws IOException { - // Do nothing, this event is not relevant. - } - - @Override - protected void onPackFooter(byte[] hash) throws IOException { - // TODO Combine together fractional chunks to reduce overhead. - // Fractional chunks are common for single-commit pushes since - // they are broken out by object type. - - // TODO Try to combine the chunk data and its index into a single - // put call for the last chunk of each type. This would break the - // read back we do in seekDatabase during delta resolution. - - // If there are deltas to be resolved the pending chunks - // will need to be reloaded later. Ensure they are stored. - // - endChunk(OBJ_COMMIT); - endChunk(OBJ_TREE); - endChunk(OBJ_BLOB); - endChunk(OBJ_TAG); - - // These are only necessary during initial parsing. Drop them now. - // - objStreamPos = null; - objChunkPtrs = null; - } - - @Override - protected void onObjectHeader(Source src, byte[] raw, int pos, int len) - throws IOException { - // Do nothing, the original stream headers are not used. - } - - @Override - protected void onStoreStream(byte[] raw, int pos, int len) - throws IOException { - // Do nothing, the stream is being sliced and cannot be stored as-is. - } - - @Override - protected boolean checkCRC(int oldCRC) { - return true; // Don't bother to check CRCs, assume the chunk is OK. - } - - private ChunkFormatter begin(int typeCode) throws DhtException { - ChunkFormatter w = openChunk(typeCode); - currChunk = w; - currChunkPtr = makeObjectPointer(w, typeCode); - return w; - } - - private ChunkFormatter openChunk(int typeCode) throws DhtException { - if (typeCode == 0) - throw new DhtException("Invalid internal typeCode 0"); - - ChunkFormatter w = openChunks[typeCode]; - if (w == null) { - w = new ChunkFormatter(repo, options); - w.setSource(GitStore.ChunkInfo.Source.RECEIVE); - w.setObjectType(typeCode); - openChunks[typeCode] = w; - } - return w; - } - - private ChunkKey endChunk(int typeCode) throws DhtException { - ChunkFormatter w = openChunks[typeCode]; - if (w == null) - return null; - - openChunks[typeCode] = null; - currChunk = null; - - if (w.isEmpty()) - return null; - - ChunkKey key = w.end(chunkKeyDigest); - ChunkInfo info = w.getChunkInfo(); - - if (chunkByOrder[typeCode] == null) - chunkByOrder[typeCode] = new ArrayList<ChunkKey>(); - chunkByOrder[typeCode].add(key); - infoByKey.put(key, info); - - if (w.getChunkMeta() != null) - chunkMeta.put(key, w.getChunkMeta()); - - Edges e = openEdges[typeCode]; - if (e != null) { - chunkEdges.put(key, e); - openEdges[typeCode] = null; - } - - if (currFragments == null) - chunkReadBackCache.put(key, w.getPackChunk()); - - w.unsafePut(db, dbWriteBuffer); - return key; - } - - private int findStreamIndex(long streamPosition) throws DhtException { - int high = objStreamPos.size(); - int low = 0; - do { - final int mid = (low + high) >>> 1; - final long pos = objStreamPos.get(mid); - if (streamPosition < pos) - high = mid; - else if (streamPosition == pos) - return mid; - else - low = mid + 1; - } while (low < high); - throw new DhtException(MessageFormat.format( - DhtText.get().noSavedTypeForBase, Long.valueOf(streamPosition))); - } - - private long makeObjectPointer(ChunkFormatter w, int typeCode) { - List<ChunkKey> list = chunkByOrder[typeCode]; - int idx = list == null ? 0 : list.size(); - int ptr = w.position(); - return (((long) typeCode) << 61) | (((long) idx) << 32) | ptr; - } - - private static int typeOf(long objectPtr) { - return (int) (objectPtr >>> 61); - } - - private static int chunkIdx(long objectPtr) { - return ((int) ((objectPtr << 3) >>> (32 + 3))); - } - - private static int offsetOf(long objectPtr) { - return (int) objectPtr; - } - - private boolean isInCurrentChunk(long objectPtr) { - List<ChunkKey> list = chunkByOrder[typeOf(objectPtr)]; - if (list == null) - return chunkIdx(objectPtr) == 0; - return chunkIdx(objectPtr) == list.size(); - } - - private ChunkKey chunkOf(long objectPtr) throws DhtException { - List<ChunkKey> list = chunkByOrder[typeOf(objectPtr)]; - int idx = chunkIdx(objectPtr); - if (list == null || list.size() <= idx) { - throw new DhtException(MessageFormat.format( - DhtText.get().packParserInvalidPointer, // - Constants.typeString(typeOf(objectPtr)), // - Integer.valueOf(idx), // - Integer.valueOf(offsetOf(objectPtr)))); - } - return list.get(idx); - } - - private static DhtException panicCannotInsert() { - // This exception should never happen. - return new DhtException(DhtText.get().cannotInsertObject); - } - - static class DhtInfo extends PackedObjectInfo { - private static final int REFERENCED = 1 << 3; - - static final int FRAGMENTED = 1 << 4; - - long chunkPtr; - - long packedSize; - - long inflatedSize; - - ObjectId base; - - DhtInfo(AnyObjectId id) { - super(id); - } - - boolean isInPack() { - return chunkPtr != 0; - } - - boolean isReferenced() { - return (getCRC() & REFERENCED) != 0; - } - - void setReferenced() { - setCRC(getCRC() | REFERENCED); - } - - boolean isFragmented() { - return (getCRC() & FRAGMENTED) != 0; - } - - void setFragmented() { - setCRC(getCRC() | FRAGMENTED); - } - - int getType() { - return getCRC() & 7; - } - - void setType(int type) { - setCRC((getCRC() & ~7) | type); - } - - ObjectInfo info(ChunkKey chunkKey) { - GitStore.ObjectInfo.Builder b = GitStore.ObjectInfo.newBuilder(); - b.setObjectType(GitStore.ObjectInfo.ObjectType.valueOf(getType())); - b.setOffset(offsetOf(chunkPtr)); - b.setPackedSize(packedSize); - b.setInflatedSize(inflatedSize); - if (base != null) { - byte[] t = new byte[Constants.OBJECT_ID_LENGTH]; - base.copyRawTo(t, 0); - b.setDeltaBase(ByteString.copyFrom(t)); - } - if (isFragmented()) - b.setIsFragmented(true); - return new ObjectInfo(chunkKey, b.build()); - } - } - - static class DhtDelta extends UnresolvedDelta { - long chunkPtr; - - long packedSize; - - long inflatedSize; - - int getType() { - return getCRC() & 7; - } - - void setType(int type) { - setCRC((getCRC() & ~7) | type); - } - - boolean isFragmented() { - return (getCRC() & DhtInfo.FRAGMENTED) != 0; - } - - void setFragmented() { - setCRC(getCRC() | DhtInfo.FRAGMENTED); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java deleted file mode 100644 index 330b5c0734..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReader.java +++ /dev/null @@ -1,710 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT; -import static org.eclipse.jgit.lib.Constants.OBJ_TREE; - -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeoutException; -import java.util.zip.Inflater; - -import org.eclipse.jgit.errors.IncorrectObjectTypeException; -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.lib.AbbreviatedObjectId; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.AsyncObjectLoaderQueue; -import org.eclipse.jgit.lib.AsyncObjectSizeQueue; -import org.eclipse.jgit.lib.InflaterCache; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectLoader; -import org.eclipse.jgit.lib.ObjectReader; -import org.eclipse.jgit.lib.ProgressMonitor; -import org.eclipse.jgit.revwalk.ObjectWalk; -import org.eclipse.jgit.revwalk.RevCommit; -import org.eclipse.jgit.revwalk.RevObject; -import org.eclipse.jgit.revwalk.RevWalk; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.pack.CachedPack; -import org.eclipse.jgit.storage.pack.ObjectReuseAsIs; -import org.eclipse.jgit.storage.pack.ObjectToPack; -import org.eclipse.jgit.storage.pack.PackOutputStream; -import org.eclipse.jgit.storage.pack.PackWriter; - -/** - * ObjectReader implementation for DHT based repositories. - * <p> - * This class is public only to expose its unique statistics for runtime - * performance reporting. Applications should always prefer to use the more - * generic base class, {@link ObjectReader}. - */ -public class DhtReader extends ObjectReader implements ObjectReuseAsIs { - private final DhtRepository repository; - - private final RepositoryKey repo; - - private final Database db; - - private final DhtReaderOptions readerOptions; - - private final DhtInserterOptions inserterOptions; - - private final Statistics stats; - - private final RecentInfoCache recentInfo; - - private final RecentChunks recentChunks; - - private final DeltaBaseCache deltaBaseCache; - - private Collection<CachedPack> cachedPacks; - - private Inflater inflater; - - private Prefetcher prefetcher; - - DhtReader(DhtObjDatabase objdb) { - this.repository = objdb.getRepository(); - this.repo = objdb.getRepository().getRepositoryKey(); - this.db = objdb.getDatabase(); - this.readerOptions = objdb.getReaderOptions(); - this.inserterOptions = objdb.getInserterOptions(); - - this.stats = new Statistics(); - this.recentInfo = new RecentInfoCache(getOptions()); - this.recentChunks = new RecentChunks(this); - this.deltaBaseCache = new DeltaBaseCache(this); - } - - /** @return describes how this DhtReader has performed. */ - public Statistics getStatistics() { - return stats; - } - - Database getDatabase() { - return db; - } - - RepositoryKey getRepositoryKey() { - return repo; - } - - DhtReaderOptions getOptions() { - return readerOptions; - } - - DhtInserterOptions getInserterOptions() { - return inserterOptions; - } - - RecentInfoCache getRecentInfoCache() { - return recentInfo; - } - - RecentChunks getRecentChunks() { - return recentChunks; - } - - DeltaBaseCache getDeltaBaseCache() { - return deltaBaseCache; - } - - Inflater inflater() { - if (inflater == null) - inflater = InflaterCache.get(); - else - inflater.reset(); - return inflater; - } - - @Override - public void release() { - recentChunks.clear(); - endPrefetch(); - - InflaterCache.release(inflater); - inflater = null; - - super.release(); - } - - @Override - public ObjectReader newReader() { - return new DhtReader(repository.getObjectDatabase()); - } - - @Override - public boolean has(AnyObjectId objId, int typeHint) throws IOException { - if (objId instanceof RefDataUtil.IdWithChunk) - return true; - - if (recentChunks.has(repo, objId)) - return true; - - if (repository.getRefDatabase().findChunk(objId) != null) - return true; - - return !find(objId).isEmpty(); - } - - @Override - public ObjectLoader open(AnyObjectId objId, int typeHint) - throws MissingObjectException, IncorrectObjectTypeException, - IOException { - ObjectLoader ldr = recentChunks.open(repo, objId, typeHint); - if (ldr != null) - return ldr; - - ChunkAndOffset p = getChunk(objId, typeHint, false); - ldr = PackChunk.read(p.chunk, p.offset, this, typeHint); - recentChunk(p.chunk); - return ldr; - } - - @Override - public <T extends ObjectId> AsyncObjectLoaderQueue<T> open( - Iterable<T> objectIds, boolean reportMissing) { - return new OpenQueue<T>(this, objectIds, reportMissing); - } - - @Override - public long getObjectSize(AnyObjectId objectId, int typeHint) - throws MissingObjectException, IncorrectObjectTypeException, - IOException { - for (ObjectInfo info : find(objectId)) - return info.getSize(); - throw missing(objectId, typeHint); - } - - @Override - public <T extends ObjectId> AsyncObjectSizeQueue<T> getObjectSize( - Iterable<T> objectIds, boolean reportMissing) { - return new SizeQueue<T>(this, objectIds, reportMissing); - } - - @Override - public void walkAdviceBeginCommits(RevWalk rw, Collection<RevCommit> roots) - throws IOException { - endPrefetch(); - - // Don't assign the prefetcher right away. Delay until its - // configured as push might invoke our own methods that may - // try to call back into the active prefetcher. - // - Prefetcher p = prefetch(OBJ_COMMIT, readerOptions.getWalkCommitsPrefetchRatio()); - p.push(this, roots); - prefetcher = p; - } - - @Override - public void walkAdviceBeginTrees(ObjectWalk ow, RevCommit min, RevCommit max) - throws IOException { - endPrefetch(); - - // Don't assign the prefetcher right away. Delay until its - // configured as push might invoke our own methods that may - // try to call back into the active prefetcher. - // - Prefetcher p = prefetch(OBJ_TREE, readerOptions.getWalkTreesPrefetchRatio()); - p.push(this, min.getTree(), max.getTree()); - prefetcher = p; - } - - @Override - public void walkAdviceEnd() { - endPrefetch(); - } - - void recentChunk(PackChunk chunk) { - recentChunks.put(chunk); - } - - ChunkAndOffset getChunkGently(AnyObjectId objId) { - return recentChunks.find(repo, objId); - } - - ChunkAndOffset getChunk(AnyObjectId objId, int typeHint, boolean checkRecent) - throws DhtException, MissingObjectException { - if (checkRecent) { - ChunkAndOffset r = recentChunks.find(repo, objId); - if (r != null) - return r; - } - - ChunkKey key; - if (objId instanceof RefDataUtil.IdWithChunk) - key = ((RefDataUtil.IdWithChunk) objId).getChunkKey(); - else - key = repository.getRefDatabase().findChunk(objId); - - if (key != null) { - PackChunk chunk = load(key); - if (chunk != null && chunk.hasIndex()) { - int pos = chunk.findOffset(repo, objId); - if (0 <= pos) - return new ChunkAndOffset(chunk, pos); - } - - // The hint above is stale. Fall through and do a - // more exhaustive lookup to find the object. - } - - if (prefetcher != null) { - ChunkAndOffset r = prefetcher.find(repo, objId); - if (r != null) - return r; - } - - for (ObjectInfo link : find(objId)) { - PackChunk chunk; - - if (prefetcher != null) { - chunk = prefetcher.get(link.getChunkKey()); - if (chunk == null) { - chunk = load(link.getChunkKey()); - if (chunk == null) - continue; - if (prefetcher.isType(typeHint)) - prefetcher.push(chunk.getMeta()); - } - } else { - chunk = load(link.getChunkKey()); - if (chunk == null) - continue; - } - - return new ChunkAndOffset(chunk, link.getOffset()); - } - - throw missing(objId, typeHint); - } - - ChunkKey findChunk(AnyObjectId objId) throws DhtException { - if (objId instanceof RefDataUtil.IdWithChunk) - return ((RefDataUtil.IdWithChunk) objId).getChunkKey(); - - ChunkKey key = repository.getRefDatabase().findChunk(objId); - if (key != null) - return key; - - ChunkAndOffset r = recentChunks.find(repo, objId); - if (r != null) - return r.chunk.getChunkKey(); - - for (ObjectInfo link : find(objId)) - return link.getChunkKey(); - - return null; - } - - static MissingObjectException missing(AnyObjectId objId, int typeHint) { - ObjectId id = objId.copy(); - if (typeHint != OBJ_ANY) - return new MissingObjectException(id, typeHint); - return new MissingObjectException(id, DhtText.get().objectTypeUnknown); - } - - PackChunk getChunk(ChunkKey key) throws DhtException { - PackChunk chunk = recentChunks.get(key); - if (chunk != null) - return chunk; - - chunk = load(key); - if (chunk != null) - return chunk; - - throw new DhtMissingChunkException(key); - } - - @Override - public Collection<ObjectId> resolve(AbbreviatedObjectId id) - throws IOException { - // Because ObjectIndexKey requires at least 4 leading digits - // don't resolve anything that is shorter than 4 digits. - // - if (id.length() < 4) - return Collections.emptySet(); - - throw new DhtException.TODO("resolve abbreviations"); - } - - public DhtObjectToPack newObjectToPack(RevObject obj) { - return new DhtObjectToPack(obj); - } - - @SuppressWarnings("unchecked") - public void selectObjectRepresentation(PackWriter packer, - ProgressMonitor monitor, Iterable<ObjectToPack> objects) - throws IOException, MissingObjectException { - Iterable itr = objects; - new RepresentationSelector(packer, this, monitor).select(itr); - } - - private Prefetcher prefetch(final int type, final int ratio) { - int limit = readerOptions.getChunkLimit(); - int prefetchLimit = (int) (limit * (ratio / 100.0)); - recentChunks.setMaxBytes(limit - prefetchLimit); - return new Prefetcher(this, type, prefetchLimit); - } - - private void endPrefetch() { - recentChunks.setMaxBytes(getOptions().getChunkLimit()); - prefetcher = null; - } - - @SuppressWarnings("unchecked") - public void writeObjects(PackOutputStream out, List<ObjectToPack> objects) - throws IOException { - prefetcher = prefetch(0, readerOptions.getWriteObjectsPrefetchRatio()); - try { - List itr = objects; - new ObjectWriter(this, prefetcher).plan(itr); - for (ObjectToPack otp : objects) - out.writeObject(otp); - } finally { - endPrefetch(); - } - } - - public void copyObjectAsIs(PackOutputStream out, ObjectToPack otp, - boolean validate) throws IOException, - StoredObjectRepresentationNotAvailableException { - DhtObjectToPack obj = (DhtObjectToPack) otp; - try { - PackChunk chunk = recentChunks.get(obj.chunk); - if (chunk == null) { - chunk = prefetcher.get(obj.chunk); - if (chunk == null) { - // This should never happen during packing, it implies - // the fetch plan was incorrect. Unfortunately that can - // occur if objects need to be recompressed on the fly. - // - stats.access(obj.chunk).cntCopyObjectAsIs_PrefetchMiss++; - chunk = getChunk(obj.chunk); - } - if (!chunk.isFragment()) - recentChunk(chunk); - } - chunk.copyObjectAsIs(out, obj, validate, this); - } catch (DhtMissingChunkException missingChunk) { - stats.access(missingChunk.getChunkKey()).cntCopyObjectAsIs_InvalidChunk++; - throw new StoredObjectRepresentationNotAvailableException(otp); - } - } - - public Collection<CachedPack> getCachedPacks() throws IOException { - if (cachedPacks == null) { - Collection<CachedPackInfo> info; - Collection<CachedPack> packs; - - try { - info = db.repository().getCachedPacks(repo); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - - packs = new ArrayList<CachedPack>(info.size()); - for (CachedPackInfo i : info) - packs.add(new DhtCachedPack(i)); - cachedPacks = packs; - } - return cachedPacks; - } - - public void copyPackAsIs(PackOutputStream out, CachedPack pack, - boolean validate) throws IOException { - ((DhtCachedPack) pack).copyAsIs(out, validate, this); - } - - private List<ObjectInfo> find(AnyObjectId obj) throws DhtException { - List<ObjectInfo> info = recentInfo.get(obj); - if (info != null) - return info; - - stats.cntObjectIndex_Load++; - ObjectIndexKey idxKey = ObjectIndexKey.create(repo, obj); - Context opt = Context.READ_REPAIR; - Sync<Map<ObjectIndexKey, Collection<ObjectInfo>>> sync = Sync.create(); - db.objectIndex().get(opt, Collections.singleton(idxKey), sync); - try { - Collection<ObjectInfo> m; - - m = sync.get(getOptions().getTimeout()).get(idxKey); - if (m == null || m.isEmpty()) - return Collections.emptyList(); - - info = new ArrayList<ObjectInfo>(m); - ObjectInfo.sort(info); - recentInfo.put(obj, info); - return info; - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - } - - private PackChunk load(ChunkKey chunkKey) throws DhtException { - if (0 == stats.access(chunkKey).cntReader_Load++ - && readerOptions.isTrackFirstChunkLoad()) - stats.access(chunkKey).locReader_Load = new Throwable("first"); - Context opt = Context.READ_REPAIR; - Sync<Collection<PackChunk.Members>> sync = Sync.create(); - db.chunk().get(opt, Collections.singleton(chunkKey), sync); - try { - Collection<PackChunk.Members> c = sync.get(getOptions() - .getTimeout()); - if (c.isEmpty()) - return null; - if (c instanceof List) - return ((List<PackChunk.Members>) c).get(0).build(); - return c.iterator().next().build(); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - } - - static class ChunkAndOffset { - final PackChunk chunk; - - final int offset; - - ChunkAndOffset(PackChunk chunk, int offset) { - this.chunk = chunk; - this.offset = offset; - } - } - - /** How this DhtReader has performed since creation. */ - public static class Statistics { - private final Map<ChunkKey, ChunkAccess> chunkAccess = new LinkedHashMap<ChunkKey, ChunkAccess>(); - - ChunkAccess access(ChunkKey chunkKey) { - ChunkAccess ca = chunkAccess.get(chunkKey); - if (ca == null) { - ca = new ChunkAccess(chunkKey); - chunkAccess.put(chunkKey, ca); - } - return ca; - } - - /** - * Number of sequential {@link ObjectIndexTable} lookups made by the - * reader. These were made without the support of batch lookups. - */ - public int cntObjectIndex_Load; - - /** Cycles detected in delta chains during OBJ_REF_DELTA reads. */ - public int deltaChainCycles; - - int recentChunks_Hits; - - int recentChunks_Miss; - - int deltaBaseCache_Hits; - - int deltaBaseCache_Miss; - - /** @return ratio of recent chunk hits, [0.00,1.00]. */ - public double getRecentChunksHitRatio() { - int total = recentChunks_Hits + recentChunks_Miss; - return ((double) recentChunks_Hits) / total; - } - - /** @return ratio of delta base cache hits, [0.00,1.00]. */ - public double getDeltaBaseCacheHitRatio() { - int total = deltaBaseCache_Hits + deltaBaseCache_Miss; - return ((double) deltaBaseCache_Hits) / total; - } - - /** - * @return collection of chunk accesses made by the application code - * against this reader. The collection's iterator has no - * relevant order. - */ - public Collection<ChunkAccess> getChunkAccess() { - return chunkAccess.values(); - } - - @Override - public String toString() { - StringBuilder b = new StringBuilder(); - b.append("DhtReader.Statistics:\n"); - b.append(" "); - if (recentChunks_Hits != 0 || recentChunks_Miss != 0) - ratio(b, "recentChunks", getRecentChunksHitRatio()); - if (deltaBaseCache_Hits != 0 || deltaBaseCache_Miss != 0) - ratio(b, "deltaBaseCache", getDeltaBaseCacheHitRatio()); - appendFields(this, b); - b.append("\n"); - for (ChunkAccess ca : getChunkAccess()) { - b.append(" "); - b.append(ca.toString()); - b.append("\n"); - } - return b.toString(); - } - - @SuppressWarnings("boxing") - static void ratio(StringBuilder b, String name, double value) { - b.append(String.format(" %s=%.2f%%", name, value * 100.0)); - } - - static void appendFields(Object obj, StringBuilder b) { - try { - for (Field field : obj.getClass().getDeclaredFields()) { - String n = field.getName(); - - if (field.getType() == Integer.TYPE - && (field.getModifiers() & Modifier.PUBLIC) != 0) { - int v = field.getInt(obj); - if (0 < v) - b.append(' ').append(n).append('=').append(v); - } - } - } catch (IllegalArgumentException e) { - throw new RuntimeException(e); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } - } - - /** Summary describing how a chunk was accessed. */ - public static final class ChunkAccess { - /** Chunk this access block describes. */ - public final ChunkKey chunkKey; - - /** - * Number of times chunk was loaded sequentially. Incremented when - * the reader had to load the chunk on demand with no cache or - * prefetcher support. - */ - public int cntReader_Load; - - Throwable locReader_Load; - - /** - * Number of times the prefetcher loaded from the database. - * Incremented each time the prefetcher asked for the chunk from the - * underlying database (which might have its own distributed cache, - * or not). - */ - public int cntPrefetcher_Load; - - /** - * Number of times the prefetcher ordering was wrong. Incremented if - * a reader wants a chunk but the prefetcher didn't have it ready at - * the time of request. This indicates a bad prefetching plan as the - * chunk should have been listed earlier in the prefetcher's list. - */ - public int cntPrefetcher_OutOfOrder; - - /** - * Number of times the reader had to stall to wait for a chunk that - * is currently being prefetched to finish loading and become ready. - * This indicates the prefetcher may have fetched other chunks first - * (had the wrong order), or does not have a deep enough window to - * hide these loads from the application. - */ - public int cntPrefetcher_WaitedForLoad; - - /** - * Number of times the reader asked the prefetcher for the same - * chunk after it was already consumed from the prefetcher. This - * indicates the reader has walked back on itself and revisited a - * chunk again. - */ - public int cntPrefetcher_Revisited; - - /** - * Number of times the reader needed this chunk to copy an object - * as-is into a pack stream, but the prefetcher didn't have it - * ready. This correlates with {@link #cntPrefetcher_OutOfOrder} or - * {@link #cntPrefetcher_Revisited}. - */ - public int cntCopyObjectAsIs_PrefetchMiss; - - /** - * Number of times the reader tried to copy an object from this - * chunk, but discovered the chunk was corrupt or did not contain - * the object as expected. - */ - public int cntCopyObjectAsIs_InvalidChunk; - - ChunkAccess(ChunkKey key) { - chunkKey = key; - } - - @Override - public String toString() { - StringBuilder b = new StringBuilder(); - b.append(chunkKey).append('['); - appendFields(this, b); - b.append(" ]"); - if (locReader_Load != null) { - StringWriter sw = new StringWriter(); - locReader_Load.printStackTrace(new PrintWriter(sw)); - b.append(sw); - } - return b.toString(); - } - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java deleted file mode 100644 index db3f51028f..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtReaderOptions.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.lib.Config; - -/** Options controlling how objects are read from a DHT stored repository. */ -public class DhtReaderOptions { - /** 1024 (number of bytes in one kibibyte/kilobyte) */ - public static final int KiB = 1024; - - /** 1024 {@link #KiB} (number of bytes in one mebibyte/megabyte) */ - public static final int MiB = 1024 * KiB; - - private Timeout timeout; - - private boolean prefetchFollowEdgeHints; - - private int chunkLimit; - - private int openQueuePrefetchRatio; - - private int walkCommitsPrefetchRatio; - - private int walkTreesPrefetchRatio; - - private int writeObjectsPrefetchRatio; - - private int objectIndexConcurrentBatches; - - private int objectIndexBatchSize; - - private int deltaBaseCacheSize; - - private int deltaBaseCacheLimit; - - private int recentInfoCacheSize; - - private boolean trackFirstChunkLoad; - - /** Create a default reader configuration. */ - public DhtReaderOptions() { - setTimeout(Timeout.seconds(5)); - setPrefetchFollowEdgeHints(true); - - setChunkLimit(5 * MiB); - setOpenQueuePrefetchRatio(20 /* percent */); - setWalkCommitsPrefetchRatio(20 /* percent */); - setWalkTreesPrefetchRatio(20 /* percent */); - setWriteObjectsPrefetchRatio(90 /* percent */); - - setObjectIndexConcurrentBatches(2); - setObjectIndexBatchSize(512); - - setDeltaBaseCacheSize(1024); - setDeltaBaseCacheLimit(10 * MiB); - - setRecentInfoCacheSize(4096); - } - - /** @return default timeout to wait on long operations before aborting. */ - public Timeout getTimeout() { - return timeout; - } - - /** - * Set the default timeout to wait on long operations. - * - * @param maxWaitTime - * new wait time. - * @return {@code this} - */ - public DhtReaderOptions setTimeout(Timeout maxWaitTime) { - if (maxWaitTime == null || maxWaitTime.getTime() < 0) - throw new IllegalArgumentException(); - timeout = maxWaitTime; - return this; - } - - /** @return if the prefetcher should follow edge hints (experimental) */ - public boolean isPrefetchFollowEdgeHints() { - return prefetchFollowEdgeHints; - } - - /** - * Enable (or disable) the experimental edge following feature. - * - * @param follow - * true to follow the edge hints. - * @return {@code this} - */ - public DhtReaderOptions setPrefetchFollowEdgeHints(boolean follow) { - prefetchFollowEdgeHints = follow; - return this; - } - - /** @return number of bytes to hold within a DhtReader. */ - public int getChunkLimit() { - return chunkLimit; - } - - /** - * Set the number of bytes hold within a DhtReader. - * - * @param maxBytes - * @return {@code this} - */ - public DhtReaderOptions setChunkLimit(int maxBytes) { - chunkLimit = Math.max(1024, maxBytes); - return this; - } - - /** @return percentage of {@link #getChunkLimit()} used for prefetch, 0..100. */ - public int getOpenQueuePrefetchRatio() { - return openQueuePrefetchRatio; - } - - /** - * Set the prefetch ratio used by the open object queue. - * - * @param ratio 0..100. - * @return {@code this} - */ - public DhtReaderOptions setOpenQueuePrefetchRatio(int ratio) { - openQueuePrefetchRatio = Math.max(0, Math.min(ratio, 100)); - return this; - } - - /** @return percentage of {@link #getChunkLimit()} used for prefetch, 0..100. */ - public int getWalkCommitsPrefetchRatio() { - return walkCommitsPrefetchRatio; - } - - /** - * Set the prefetch ratio used by the open object queue. - * - * @param ratio 0..100. - * @return {@code this} - */ - public DhtReaderOptions setWalkCommitsPrefetchRatio(int ratio) { - walkCommitsPrefetchRatio = Math.max(0, Math.min(ratio, 100)); - return this; - } - - /** @return percentage of {@link #getChunkLimit()} used for prefetch, 0..100. */ - public int getWalkTreesPrefetchRatio() { - return walkTreesPrefetchRatio; - } - - /** - * Set the prefetch ratio used by the open object queue. - * - * @param ratio 0..100. - * @return {@code this} - */ - public DhtReaderOptions setWalkTreesPrefetchRatio(int ratio) { - walkTreesPrefetchRatio = Math.max(0, Math.min(ratio, 100)); - return this; - } - - /** @return percentage of {@link #getChunkLimit()} used for prefetch, 0..100. */ - public int getWriteObjectsPrefetchRatio() { - return writeObjectsPrefetchRatio; - } - - /** - * Set the prefetch ratio used by the open object queue. - * - * @param ratio 0..100. - * @return {@code this} - */ - public DhtReaderOptions setWriteObjectsPrefetchRatio(int ratio) { - writeObjectsPrefetchRatio = Math.max(0, Math.min(ratio, 100)); - return this; - } - - /** @return number of concurrent reads against ObjectIndexTable. */ - public int getObjectIndexConcurrentBatches() { - return objectIndexConcurrentBatches; - } - - /** - * Set the number of concurrent readers on ObjectIndexTable. - * - * @param batches - * number of batches. - * @return {@code this} - */ - public DhtReaderOptions setObjectIndexConcurrentBatches(int batches) { - objectIndexConcurrentBatches = Math.max(1, batches); - return this; - } - - /** @return number of objects to lookup in one batch. */ - public int getObjectIndexBatchSize() { - return objectIndexBatchSize; - } - - /** - * Set the number of objects to lookup at once. - * - * @param objectCnt - * the number of objects in a lookup batch. - * @return {@code this} - */ - public DhtReaderOptions setObjectIndexBatchSize(int objectCnt) { - objectIndexBatchSize = Math.max(1, objectCnt); - return this; - } - - /** @return size of the delta base cache hash table, in object entries. */ - public int getDeltaBaseCacheSize() { - return deltaBaseCacheSize; - } - - /** - * Set the size of the delta base cache hash table. - * - * @param slotCnt - * number of slots in the hash table. - * @return {@code this} - */ - public DhtReaderOptions setDeltaBaseCacheSize(int slotCnt) { - deltaBaseCacheSize = Math.max(1, slotCnt); - return this; - } - - /** @return maximum number of bytes to hold in per-reader DeltaBaseCache. */ - public int getDeltaBaseCacheLimit() { - return deltaBaseCacheLimit; - } - - /** - * Set the maximum number of bytes in the DeltaBaseCache. - * - * @param maxBytes - * the new limit. - * @return {@code this} - */ - public DhtReaderOptions setDeltaBaseCacheLimit(int maxBytes) { - deltaBaseCacheLimit = Math.max(0, maxBytes); - return this; - } - - /** @return number of objects to cache information on. */ - public int getRecentInfoCacheSize() { - return recentInfoCacheSize; - } - - /** - * Set the number of objects to cache information on. - * - * @param objectCnt - * the number of objects to cache. - * @return {@code this} - */ - public DhtReaderOptions setRecentInfoCacheSize(int objectCnt) { - recentInfoCacheSize = Math.max(0, objectCnt); - return this; - } - - /** - * @return true if {@link DhtReader.Statistics} includes the stack trace for - * the first time a chunk is loaded. Supports debugging DHT code. - */ - public boolean isTrackFirstChunkLoad() { - return trackFirstChunkLoad; - } - - /** - * Set whether or not the initial load of each chunk should be tracked. - * - * @param track - * true to track the stack trace of the first load. - * @return {@code this}. - */ - public DhtReaderOptions setTrackFirstChunkLoad(boolean track) { - trackFirstChunkLoad = track; - return this; - } - - /** - * Update properties by setting fields from the configuration. - * <p> - * If a property is not defined in the configuration, then it is left - * unmodified. - * - * @param rc - * configuration to read properties from. - * @return {@code this} - */ - public DhtReaderOptions fromConfig(Config rc) { - setTimeout(Timeout.getTimeout(rc, "core", "dht", "timeout", getTimeout())); - setPrefetchFollowEdgeHints(rc.getBoolean("core", "dht", "prefetchFollowEdgeHints", isPrefetchFollowEdgeHints())); - setChunkLimit(rc.getInt("core", "dht", "chunkLimit", getChunkLimit())); - setOpenQueuePrefetchRatio(rc.getInt("core", "dht", "openQueuePrefetchRatio", getOpenQueuePrefetchRatio())); - setWalkCommitsPrefetchRatio(rc.getInt("core", "dht", "walkCommitsPrefetchRatio", getWalkCommitsPrefetchRatio())); - setWalkTreesPrefetchRatio(rc.getInt("core", "dht", "walkTreesPrefetchRatio", getWalkTreesPrefetchRatio())); - setWriteObjectsPrefetchRatio(rc.getInt("core", "dht", "writeObjectsPrefetchRatio", getWriteObjectsPrefetchRatio())); - - setObjectIndexConcurrentBatches(rc.getInt("core", "dht", "objectIndexConcurrentBatches", getObjectIndexConcurrentBatches())); - setObjectIndexBatchSize(rc.getInt("core", "dht", "objectIndexBatchSize", getObjectIndexBatchSize())); - - setDeltaBaseCacheSize(rc.getInt("core", "dht", "deltaBaseCacheSize", getDeltaBaseCacheSize())); - setDeltaBaseCacheLimit(rc.getInt("core", "dht", "deltaBaseCacheLimit", getDeltaBaseCacheLimit())); - - setRecentInfoCacheSize(rc.getInt("core", "dht", "recentInfoCacheSize", getRecentInfoCacheSize())); - - setTrackFirstChunkLoad(rc.getBoolean("core", "dht", "debugTrackFirstChunkLoad", isTrackFirstChunkLoad())); - return this; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java deleted file mode 100644 index b4394494a9..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefDatabase.java +++ /dev/null @@ -1,524 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Ref.Storage.LOOSE; -import static org.eclipse.jgit.lib.Ref.Storage.NEW; -import static org.eclipse.jgit.storage.dht.RefDataUtil.NONE; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectIdSubclassMap; -import org.eclipse.jgit.lib.Ref; -import org.eclipse.jgit.lib.RefDatabase; -import org.eclipse.jgit.lib.RefRename; -import org.eclipse.jgit.lib.SymbolicRef; -import org.eclipse.jgit.revwalk.RevObject; -import org.eclipse.jgit.revwalk.RevTag; -import org.eclipse.jgit.revwalk.RevWalk; -import org.eclipse.jgit.storage.dht.RefDataUtil.IdWithChunk; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.util.RefList; -import org.eclipse.jgit.util.RefMap; - -/** Repository references stored on top of a DHT database. */ -public class DhtRefDatabase extends RefDatabase { - private final DhtRepository repository; - - private final Database db; - - private final AtomicReference<RefCache> cache; - - DhtRefDatabase(DhtRepository repository, Database db) { - this.repository = repository; - this.db = db; - this.cache = new AtomicReference<RefCache>(); - } - - DhtRepository getRepository() { - return repository; - } - - ChunkKey findChunk(AnyObjectId id) { - RefCache c = cache.get(); - if (c != null) { - IdWithChunk i = c.hints.get(id); - if (i != null) - return i.getChunkKey(); - } - return null; - } - - @Override - public Ref getRef(String needle) throws IOException { - RefCache curr = readRefs(); - for (String prefix : SEARCH_PATH) { - DhtRef ref = curr.ids.get(prefix + needle); - if (ref != null) { - ref = resolve(ref, 0, curr.ids); - return ref; - } - } - return null; - } - - private DhtRef getOneRef(String refName) throws IOException { - RefCache curr = readRefs(); - DhtRef ref = curr.ids.get(refName); - if (ref != null) - return resolve(ref, 0, curr.ids); - return ref; - } - - @Override - public List<Ref> getAdditionalRefs() { - return Collections.emptyList(); - } - - @Override - public Map<String, Ref> getRefs(String prefix) throws IOException { - RefCache curr = readRefs(); - RefList<DhtRef> packed = RefList.emptyList(); - RefList<DhtRef> loose = curr.ids; - RefList.Builder<DhtRef> sym = new RefList.Builder<DhtRef>(curr.sym.size()); - - for (int idx = 0; idx < curr.sym.size(); idx++) { - DhtRef ref = curr.sym.get(idx); - String name = ref.getName(); - ref = resolve(ref, 0, loose); - if (ref != null && ref.getObjectId() != null) { - sym.add(ref); - } else { - // A broken symbolic reference, we have to drop it from the - // collections the client is about to receive. Should be a - // rare occurrence so pay a copy penalty. - int toRemove = loose.find(name); - if (0 <= toRemove) - loose = loose.remove(toRemove); - } - } - - return new RefMap(prefix, packed, loose, sym.toRefList()); - } - - private DhtRef resolve(DhtRef ref, int depth, RefList<DhtRef> loose) - throws IOException { - if (!ref.isSymbolic()) - return ref; - - DhtRef dst = (DhtRef) ref.getTarget(); - - if (MAX_SYMBOLIC_REF_DEPTH <= depth) - return null; // claim it doesn't exist - - dst = loose.get(dst.getName()); - if (dst == null) - return ref; - - dst = resolve(dst, depth + 1, loose); - if (dst == null) - return null; - - return new DhtSymbolicRef( - ref.getName(), - dst, - ((DhtSymbolicRef) ref).getRefData()); - } - - @Override - public Ref peel(Ref ref) throws IOException { - final Ref oldLeaf = ref.getLeaf(); - if (oldLeaf.isPeeled() || oldLeaf.getObjectId() == null) - return ref; - - DhtRef newLeaf = doPeel(oldLeaf); - - RefCache cur = readRefs(); - int idx = cur.ids.find(oldLeaf.getName()); - if (0 <= idx && cur.ids.get(idx) == oldLeaf) { - RefList<DhtRef> newList = cur.ids.set(idx, newLeaf); - if (cache.compareAndSet(cur, new RefCache(newList, cur))) - cachePeeledState(oldLeaf, newLeaf); - } - - return recreate(ref, newLeaf); - } - - private void cachePeeledState(Ref oldLeaf, Ref newLeaf) { - // TODO(spearce) Use an ExecutorService here - try { - RepositoryKey repo = repository.getRepositoryKey(); - RefKey key = RefKey.create(repo, newLeaf.getName()); - RefData oldData = ((DhtRef) oldLeaf).getRefData(); - RefData newData = ((DhtRef) newLeaf).getRefData(); - db.ref().compareAndPut(key, oldData, newData); - } catch (TimeoutException e) { - // Ignore a timeout here, we were only trying to update - // a cached value to save peeling costs in the future. - - } catch (DhtException e) { - // Ignore a database error, this was only an attempt to - // fix a value that could be cached to save time later. - } - } - - private DhtRef doPeel(final Ref leaf) throws MissingObjectException, - IOException { - RevWalk rw = new RevWalk(getRepository()); - try { - DhtReader ctx = (DhtReader) rw.getObjectReader(); - RevObject obj = rw.parseAny(leaf.getObjectId()); - RefData.Builder d = RefData.newBuilder(((DhtRef) leaf).getRefData()); - - ChunkKey oKey = ctx.findChunk(leaf.getObjectId()); - if (oKey != null) - d.getTargetBuilder().setChunkKey(oKey.asString()); - else - d.getTargetBuilder().clearChunkKey(); - - if (obj instanceof RevTag) { - ObjectId pId = rw.peel(obj); - d.getPeeledBuilder().setObjectName(pId.name()); - - ChunkKey pKey = ctx.findChunk(pId); - if (pKey != null) - d.getPeeledBuilder().setChunkKey(pKey.asString()); - else - d.getPeeledBuilder().clearChunkKey(); - } else { - d.clearPeeled(); - } - - d.setIsPeeled(true); - d.setSequence(d.getSequence() + 1); - return new DhtObjectIdRef(leaf.getName(), d.build()); - } finally { - rw.release(); - } - } - - private static Ref recreate(final Ref old, final Ref leaf) { - if (old.isSymbolic()) { - Ref dst = recreate(old.getTarget(), leaf); - return new SymbolicRef(old.getName(), dst); - } - return leaf; - } - - @Override - public DhtRefUpdate newUpdate(String refName, boolean detach) - throws IOException { - boolean detachingSymbolicRef = false; - DhtRef ref = getOneRef(refName); - if (ref == null) - ref = new DhtObjectIdRef(refName, NONE); - else - detachingSymbolicRef = detach && ref.isSymbolic(); - - if (detachingSymbolicRef) { - RefData src = ((DhtRef) ref.getLeaf()).getRefData(); - RefData.Builder b = RefData.newBuilder(ref.getRefData()); - b.clearSymref(); - b.setTarget(src.getTarget()); - ref = new DhtObjectIdRef(refName, b.build()); - } - - RepositoryKey repo = repository.getRepositoryKey(); - DhtRefUpdate update = new DhtRefUpdate(this, repo, db, ref); - if (detachingSymbolicRef) - update.setDetachingSymbolicRef(); - return update; - } - - @Override - public RefRename newRename(String fromName, String toName) - throws IOException { - DhtRefUpdate src = newUpdate(fromName, true); - DhtRefUpdate dst = newUpdate(toName, true); - return new DhtRefRename(src, dst); - } - - @Override - public boolean isNameConflicting(String refName) throws IOException { - RefList<DhtRef> all = readRefs().ids; - - // Cannot be nested within an existing reference. - int lastSlash = refName.lastIndexOf('/'); - while (0 < lastSlash) { - String needle = refName.substring(0, lastSlash); - if (all.contains(needle)) - return true; - lastSlash = refName.lastIndexOf('/', lastSlash - 1); - } - - // Cannot be the container of an existing reference. - String prefix = refName + '/'; - int idx = -(all.find(prefix) + 1); - if (idx < all.size() && all.get(idx).getName().startsWith(prefix)) - return true; - return false; - } - - @Override - public void create() { - // Nothing to do. - } - - @Override - public void close() { - clearCache(); - } - - void clearCache() { - cache.set(null); - } - - void stored(String refName, RefData newData) { - DhtRef ref = fromData(refName, newData); - RefCache oldCache, newCache; - do { - oldCache = cache.get(); - if (oldCache == null) - return; - - RefList<DhtRef> ids = oldCache.ids.put(ref); - RefList<DhtRef> sym = oldCache.sym; - - if (ref.isSymbolic()) { - sym = sym.put(ref); - } else { - int p = sym.find(refName); - if (0 <= p) - sym = sym.remove(p); - } - - newCache = new RefCache(ids, sym, oldCache.hints); - } while (!cache.compareAndSet(oldCache, newCache)); - } - - void removed(String refName) { - RefCache oldCache, newCache; - do { - oldCache = cache.get(); - if (oldCache == null) - return; - - int p; - - RefList<DhtRef> ids = oldCache.ids; - p = ids.find(refName); - if (0 <= p) - ids = ids.remove(p); - - RefList<DhtRef> sym = oldCache.sym; - p = sym.find(refName); - if (0 <= p) - sym = sym.remove(p); - - newCache = new RefCache(ids, sym, oldCache.hints); - } while (!cache.compareAndSet(oldCache, newCache)); - } - - private RefCache readRefs() throws DhtException { - RefCache c = cache.get(); - if (c == null) { - try { - c = read(); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - cache.set(c); - } - return c; - } - - private RefCache read() throws DhtException, TimeoutException { - RefList.Builder<DhtRef> id = new RefList.Builder<DhtRef>(); - RefList.Builder<DhtRef> sym = new RefList.Builder<DhtRef>(); - ObjectIdSubclassMap<IdWithChunk> hints = new ObjectIdSubclassMap<IdWithChunk>(); - - for (Map.Entry<RefKey, RefData> e : scan()) { - DhtRef ref = fromData(e.getKey().getName(), e.getValue()); - - if (ref.isSymbolic()) - sym.add(ref); - id.add(ref); - - if (ref.getObjectId() instanceof IdWithChunk - && !hints.contains(ref.getObjectId())) - hints.add((IdWithChunk) ref.getObjectId()); - if (ref.getPeeledObjectId() instanceof IdWithChunk - && !hints.contains(ref.getPeeledObjectId())) - hints.add((IdWithChunk) ref.getPeeledObjectId()); - } - - id.sort(); - sym.sort(); - - return new RefCache(id.toRefList(), sym.toRefList(), hints); - } - - static DhtRef fromData(String name, RefData data) { - if (data.hasSymref()) - return new DhtSymbolicRef(name, data); - else - return new DhtObjectIdRef(name, data); - } - - private static ObjectId idFrom(RefData.Id src) { - ObjectId id = ObjectId.fromString(src.getObjectName()); - if (!src.hasChunkKey()) - return id; - return new IdWithChunk(id, ChunkKey.fromString(src.getChunkKey())); - } - - private Set<Map.Entry<RefKey, RefData>> scan() throws DhtException, - TimeoutException { - // TODO(spearce) Do we need to perform READ_REPAIR here? - RepositoryKey repo = repository.getRepositoryKey(); - return db.ref().getAll(Context.LOCAL, repo).entrySet(); - } - - private static class RefCache { - final RefList<DhtRef> ids; - - final RefList<DhtRef> sym; - - final ObjectIdSubclassMap<IdWithChunk> hints; - - RefCache(RefList<DhtRef> ids, RefList<DhtRef> sym, - ObjectIdSubclassMap<IdWithChunk> hints) { - this.ids = ids; - this.sym = sym; - this.hints = hints; - } - - RefCache(RefList<DhtRef> ids, RefCache old) { - this(ids, old.sym, old.hints); - } - } - - static interface DhtRef extends Ref { - RefData getRefData(); - } - - private static class DhtSymbolicRef extends SymbolicRef implements DhtRef { - private final RefData data; - - DhtSymbolicRef(String refName,RefData data) { - super(refName, new DhtObjectIdRef(data.getSymref(), NONE)); - this.data = data; - } - - DhtSymbolicRef(String refName, Ref target, RefData data) { - super(refName, target); - this.data = data; - } - - public RefData getRefData() { - return data; - } - } - - private static class DhtObjectIdRef implements DhtRef { - private final String name; - private final RefData data; - private final ObjectId objectId; - private final ObjectId peeledId; - - DhtObjectIdRef(String name, RefData data) { - this.name = name; - this.data = data; - this.objectId = data.hasTarget() ? idFrom(data.getTarget()) : null; - this.peeledId = data.hasPeeled() ? idFrom(data.getPeeled()) : null; - } - - public String getName() { - return name; - } - - public boolean isSymbolic() { - return false; - } - - public Ref getLeaf() { - return this; - } - - public Ref getTarget() { - return this; - } - - public ObjectId getObjectId() { - return objectId; - } - - public Ref.Storage getStorage() { - return data.hasTarget() ? LOOSE : NEW; - } - - public boolean isPeeled() { - return data.getIsPeeled(); - } - - public ObjectId getPeeledObjectId() { - return peeledId; - } - - public RefData getRefData() { - return data; - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java deleted file mode 100644 index 4df3bde787..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefRename.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; - -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.RefRename; -import org.eclipse.jgit.lib.RefUpdate.Result; - -class DhtRefRename extends RefRename { - DhtRefRename(DhtRefUpdate src, DhtRefUpdate dst) { - super(src, dst); - } - - @Override - protected Result doRename() throws IOException { - // TODO(spearce) Correctly handle renameing foo/bar to foo. - - destination.setExpectedOldObjectId(ObjectId.zeroId()); - destination.setNewObjectId(source.getRef().getObjectId()); - switch (destination.update()) { - case NEW: - source.delete(); - return Result.RENAMED; - - default: - return destination.getResult(); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java deleted file mode 100644 index cb363d0376..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRefUpdate.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.Ref; -import org.eclipse.jgit.lib.RefUpdate; -import org.eclipse.jgit.revwalk.RevObject; -import org.eclipse.jgit.revwalk.RevTag; -import org.eclipse.jgit.revwalk.RevWalk; -import org.eclipse.jgit.storage.dht.DhtRefDatabase.DhtRef; -import org.eclipse.jgit.storage.dht.spi.Database; - -class DhtRefUpdate extends RefUpdate { - private final DhtRefDatabase refdb; - - private final RepositoryKey repo; - - private final Database db; - - private RefKey refKey; - - private RefData oldData; - - private RefData newData; - - private Ref dstRef; - - private RevWalk rw; - - DhtRefUpdate(DhtRefDatabase refdb, RepositoryKey repo, Database db, Ref ref) { - super(ref); - this.refdb = refdb; - this.repo = repo; - this.db = db; - } - - @Override - protected DhtRefDatabase getRefDatabase() { - return refdb; - } - - @Override - protected DhtRepository getRepository() { - return refdb.getRepository(); - } - - @Override - public Result update(RevWalk walk) throws IOException { - try { - rw = walk; - return super.update(walk); - } finally { - rw = null; - } - } - - @Override - protected boolean tryLock(boolean deref) throws IOException { - dstRef = getRef(); - if (deref) - dstRef = dstRef.getLeaf(); - - refKey = RefKey.create(repo, dstRef.getName()); - oldData = ((DhtRef) dstRef).getRefData(); - - if (dstRef.isSymbolic()) - setOldObjectId(null); - else - setOldObjectId(dstRef.getObjectId()); - - return true; - } - - @Override - protected void unlock() { - // No state is held while "locked". - } - - @Override - protected Result doUpdate(Result desiredResult) throws IOException { - try { - newData = newData(); - boolean r = db.ref().compareAndPut(refKey, oldData, newData); - if (r) { - getRefDatabase().stored(dstRef.getName(), newData); - return desiredResult; - } else { - getRefDatabase().clearCache(); - return Result.LOCK_FAILURE; - } - } catch (TimeoutException e) { - return Result.IO_FAILURE; - } - } - - @Override - protected Result doDelete(Result desiredResult) throws IOException { - try { - boolean r = db.ref().compareAndRemove(refKey, oldData); - if (r) { - getRefDatabase().removed(dstRef.getName()); - return desiredResult; - } else { - getRefDatabase().clearCache(); - return Result.LOCK_FAILURE; - } - } catch (TimeoutException e) { - return Result.IO_FAILURE; - } - } - - @Override - protected Result doLink(String target) throws IOException { - try { - RefData.Builder d = RefData.newBuilder(oldData); - clearRefData(d); - updateSequence(d); - d.setSymref(target); - newData = d.build(); - boolean r = db.ref().compareAndPut(refKey, oldData, newData); - if (r) { - getRefDatabase().stored(dstRef.getName(), newData); - if (getRef().getStorage() == Ref.Storage.NEW) - return Result.NEW; - return Result.FORCED; - } else { - getRefDatabase().clearCache(); - return Result.LOCK_FAILURE; - } - } catch (TimeoutException e) { - return Result.IO_FAILURE; - } - } - - private RefData newData() throws IOException { - RefData.Builder d = RefData.newBuilder(oldData); - clearRefData(d); - updateSequence(d); - - ObjectId newId = getNewObjectId(); - d.getTargetBuilder().setObjectName(newId.name()); - try { - DhtReader ctx = (DhtReader) rw.getObjectReader(); - RevObject obj = rw.parseAny(newId); - - ChunkKey oKey = ctx.findChunk(newId); - if (oKey != null) - d.getTargetBuilder().setChunkKey(oKey.asString()); - - if (obj instanceof RevTag) { - ObjectId pId = rw.peel(obj); - ChunkKey pKey = ctx.findChunk(pId); - if (pKey != null) - d.getPeeledBuilder().setChunkKey(pKey.asString()); - d.getPeeledBuilder().setObjectName(pId.name()); - } - } catch (MissingObjectException e) { - // Automatic peeling failed. Ignore the problem and deal with it - // during reading later, this is the classical Git behavior on disk. - } - return d.build(); - } - - private static void clearRefData(RefData.Builder d) { - // Clear fields individually rather than discarding the RefData. - // This way implementation specific extensions are carried - // through from the old version to the new version. - d.clearSymref(); - d.clearTarget(); - d.clearPeeled(); - d.clearIsPeeled(); - } - - private static void updateSequence(RefData.Builder d) { - d.setSequence(d.getSequence() + 1); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java deleted file mode 100644 index faff469e90..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepository.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.text.MessageFormat; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.RefUpdate; -import org.eclipse.jgit.lib.Repository; -import org.eclipse.jgit.lib.StoredConfig; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.file.ReflogReader; - -/** - * A Git repository storing its objects and references in a DHT. - * <p> - * With the exception of repository creation, this class is thread-safe, but - * readers created from it are not. When creating a new repository using the - * {@link #create(boolean)} method, the newly constructed repository object does - * not ensure the assigned {@link #getRepositoryKey()} will be visible to all - * threads. Applications are encouraged to use their own synchronization when - * sharing a Repository instance that was used to create a new repository. - */ -public class DhtRepository extends Repository { - private final RepositoryName name; - - private final Database db; - - private final DhtRefDatabase refdb; - - private final DhtObjDatabase objdb; - - private final DhtConfig config; - - private RepositoryKey key; - - /** - * Initialize an in-memory representation of a DHT backed repository. - * - * @param builder - * description of the repository and its data storage. - */ - public DhtRepository(DhtRepositoryBuilder builder) { - super(builder); - this.name = RepositoryName.create(builder.getRepositoryName()); - this.key = builder.getRepositoryKey(); - this.db = builder.getDatabase(); - - this.refdb = new DhtRefDatabase(this, db); - this.objdb = new DhtObjDatabase(this, builder); - this.config = new DhtConfig(); - } - - /** @return database cluster that houses this repository (among others). */ - public Database getDatabase() { - return db; - } - - /** @return human readable name used to open this repository. */ - public RepositoryName getRepositoryName() { - return name; - } - - /** @return unique identity of the repository in the {@link #getDatabase()}. */ - public RepositoryKey getRepositoryKey() { - return key; - } - - @Override - public StoredConfig getConfig() { - return config; - } - - @Override - public DhtRefDatabase getRefDatabase() { - return refdb; - } - - @Override - public DhtObjDatabase getObjectDatabase() { - return objdb; - } - - @Override - public void create(boolean bare) throws IOException { - if (!bare) - throw new IllegalArgumentException( - DhtText.get().repositoryMustBeBare); - - if (getObjectDatabase().exists()) - throw new DhtException(MessageFormat.format( - DhtText.get().repositoryAlreadyExists, name.asString())); - - try { - key = db.repository().nextKey(); - db.repositoryIndex().putUnique(name, key); - } catch (TimeoutException err) { - throw new DhtTimeoutException(MessageFormat.format( - DhtText.get().timeoutLocatingRepository, name), err); - } - - String master = Constants.R_HEADS + Constants.MASTER; - RefUpdate.Result result = updateRef(Constants.HEAD, true).link(master); - if (result != RefUpdate.Result.NEW) - throw new IOException(result.name()); - } - - @Override - public void scanForRepoChanges() { - refdb.clearCache(); - } - - @Override - public void notifyIndexChanged() { - // we do not support non-bare repositories yet - } - - @Override - public String toString() { - return "DhtRepostitory[" + key + " / " + name + "]"; - } - - // TODO This method should be removed from the JGit API. - @Override - public ReflogReader getReflogReader(String refName) { - throw new UnsupportedOperationException(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java deleted file mode 100644 index a02b313cf1..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtRepositoryBuilder.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.File; -import java.text.MessageFormat; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.errors.RepositoryNotFoundException; -import org.eclipse.jgit.lib.BaseRepositoryBuilder; -import org.eclipse.jgit.storage.dht.spi.Database; - -/** - * Constructs a {@link DhtRepository}. - * - * @param <B> - * type of builder used by the DHT system. - * @param <R> - * type of repository used by the DHT system. - * @param <D> - * type of database used by the DHT system. - */ -public class DhtRepositoryBuilder<B extends DhtRepositoryBuilder, R extends DhtRepository, D extends Database> - extends BaseRepositoryBuilder<B, R> { - private D database; - - private DhtReaderOptions readerOptions; - - private DhtInserterOptions inserterOptions; - - private String name; - - private RepositoryKey key; - - /** Initializes an empty builder with no values set. */ - public DhtRepositoryBuilder() { - setBare(); - setMustExist(true); - } - - /** @return the database that stores the repositories. */ - public D getDatabase() { - return database; - } - - /** - * Set the cluster used to store the repositories. - * - * @param database - * the database supplier. - * @return {@code this} - */ - public B setDatabase(D database) { - this.database = database; - return self(); - } - - /** @return options used by readers accessing the repository. */ - public DhtReaderOptions getReaderOptions() { - return readerOptions; - } - - /** - * Set the reader options. - * - * @param opt - * new reader options object. - * @return {@code this} - */ - public B setReaderOptions(DhtReaderOptions opt) { - readerOptions = opt; - return self(); - } - - /** @return options used by writers accessing the repository. */ - public DhtInserterOptions getInserterOptions() { - return inserterOptions; - } - - /** - * Set the inserter options. - * - * @param opt - * new inserter options object. - * @return {@code this} - */ - public B setInserterOptions(DhtInserterOptions opt) { - inserterOptions = opt; - return self(); - } - - /** @return name of the repository in the DHT. */ - public String getRepositoryName() { - return name; - } - - /** - * Set the name of the repository to open. - * - * @param name - * the name. - * @return {@code this}. - */ - public B setRepositoryName(String name) { - this.name = name; - return self(); - } - - /** @return the repository's key. */ - public RepositoryKey getRepositoryKey() { - return key; - } - - /** - * @param key - * @return {@code this} - */ - public B setRepositoryKey(RepositoryKey key) { - this.key = key; - return self(); - } - - @Override - public B setup() throws IllegalArgumentException, DhtException, - RepositoryNotFoundException { - if (getDatabase() == null) - throw new IllegalArgumentException(DhtText.get().databaseRequired); - - if (getReaderOptions() == null) - setReaderOptions(new DhtReaderOptions()); - if (getInserterOptions() == null) - setInserterOptions(new DhtInserterOptions()); - - if (getRepositoryKey() == null) { - if (getRepositoryName() == null) - throw new IllegalArgumentException(DhtText.get().nameRequired); - - RepositoryKey r; - try { - r = getDatabase().repositoryIndex().get( - RepositoryName.create(name)); - } catch (TimeoutException e) { - throw new DhtTimeoutException(MessageFormat.format( - DhtText.get().timeoutLocatingRepository, name), e); - } - if (isMustExist() && r == null) - throw new RepositoryNotFoundException(getRepositoryName()); - if (r != null) - setRepositoryKey(r); - } - return self(); - } - - @Override - @SuppressWarnings("unchecked") - public R build() throws IllegalArgumentException, DhtException, - RepositoryNotFoundException { - return (R) new DhtRepository(setup()); - } - - // We don't support local file IO and thus shouldn't permit these to set. - - @Override - public B setGitDir(File gitDir) { - if (gitDir != null) - throw new IllegalArgumentException(); - return self(); - } - - @Override - public B setObjectDirectory(File objectDirectory) { - if (objectDirectory != null) - throw new IllegalArgumentException(); - return self(); - } - - @Override - public B addAlternateObjectDirectory(File other) { - throw new UnsupportedOperationException("Alternates not supported"); - } - - @Override - public B setWorkTree(File workTree) { - if (workTree != null) - throw new IllegalArgumentException(); - return self(); - } - - @Override - public B setIndexFile(File indexFile) { - if (indexFile != null) - throw new IllegalArgumentException(); - return self(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java deleted file mode 100644 index 4fb520be15..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtText.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.nls.NLS; -import org.eclipse.jgit.nls.TranslationBundle; - -/** Translation bundle for the DHT storage provider. */ -public class DhtText extends TranslationBundle { - /** @return an instance of this translation bundle. */ - public static DhtText get() { - return NLS.getBundleFor(DhtText.class); - } - - /***/ public String cannotInsertObject; - /***/ public String corruptChunk; - /***/ public String corruptCompressedObject; - /***/ public String cycleInDeltaChain; - /***/ public String databaseRequired; - /***/ public String expectedObjectSizeDuringCopyAsIs; - /***/ public String invalidCachedPackInfo; - /***/ public String invalidChunkKey; - /***/ public String invalidChunkMeta; - /***/ public String invalidObjectIndexKey; - /***/ public String invalidObjectInfo; - /***/ public String invalidRefData; - /***/ public String missingChunk; - /***/ public String missingLongOffsetBase; - /***/ public String nameRequired; - /***/ public String noSavedTypeForBase; - /***/ public String notTimeUnit; - /***/ public String objectListSelectingName; - /***/ public String objectListCountingFrom; - /***/ public String objectTypeUnknown; - /***/ public String packParserInvalidPointer; - /***/ public String packParserRollbackFailed; - /***/ public String recordingObjects; - /***/ public String repositoryAlreadyExists; - /***/ public String repositoryMustBeBare; - /***/ public String shortCompressedObject; - /***/ public String timeoutChunkMeta; - /***/ public String timeoutLocatingRepository; - /***/ public String tooManyObjectsInPack; - /***/ public String unsupportedChunkIndex; - /***/ public String unsupportedObjectTypeInChunk; - /***/ public String wrongChunkPositionInCachedPack; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java deleted file mode 100644 index 32d52f0a99..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/DhtTimeoutException.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.storage.dht.spi.Database; - -/** Any error caused by a {@link Database} operation. */ -public class DhtTimeoutException extends DhtException { - private static final long serialVersionUID = 1L; - - /** - * @param message - */ - public DhtTimeoutException(String message) { - super(message); - } - - /** - * @param message - * @param cause - */ - public DhtTimeoutException(String message, TimeoutException cause) { - super(message); - initCause(cause); - } - - /** - * @param cause - */ - public DhtTimeoutException(TimeoutException cause) { - super(cause.getMessage()); - initCause(cause); - } - - /** - * @param cause - */ - public DhtTimeoutException(InterruptedException cause) { - super(cause.getMessage()); - initCause(cause); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java deleted file mode 100644 index 6608a388e1..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/KeyUtils.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.util.RawParseUtils; - -final class KeyUtils { - static short parse16(byte[] src, int pos) { - return (short) RawParseUtils.parseHexInt16(src, pos); - } - - static int parse32(byte[] src, int pos) { - return RawParseUtils.parseHexInt32(src, pos); - } - - static void format16(byte[] dst, int p, short w) { - int o = p + 3; - while (o >= p && w != 0) { - dst[o--] = hexbyte[w & 0xf]; - w >>>= 4; - } - while (o >= p) - dst[o--] = '0'; - } - - static void format32(byte[] dst, int p, int w) { - int o = p + 7; - while (o >= p && w != 0) { - dst[o--] = hexbyte[w & 0xf]; - w >>>= 4; - } - while (o >= p) - dst[o--] = '0'; - } - - private static final byte[] hexbyte = { '0', '1', '2', '3', '4', '5', '6', - '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; - - private KeyUtils() { - // Do not create instances of this class. - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java deleted file mode 100644 index e6afd731fe..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/LargeNonDeltaObject.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.zip.InflaterInputStream; - -import org.eclipse.jgit.errors.LargeObjectException; -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.ObjectLoader; -import org.eclipse.jgit.lib.ObjectStream; - -/** Loader for a large non-delta object. */ -class LargeNonDeltaObject extends ObjectLoader { - private final int type; - - private final long sz; - - private final int pos; - - private final DhtReader ctx; - - private final ChunkMeta meta; - - private PackChunk firstChunk; - - LargeNonDeltaObject(int type, long sz, PackChunk pc, int pos, DhtReader ctx) { - this.type = type; - this.sz = sz; - this.pos = pos; - this.ctx = ctx; - this.meta = pc.getMeta(); - firstChunk = pc; - } - - @Override - public boolean isLarge() { - return true; - } - - @Override - public byte[] getCachedBytes() throws LargeObjectException { - throw new LargeObjectException.ExceedsByteArrayLimit(); - } - - @Override - public int getType() { - return type; - } - - @Override - public long getSize() { - return sz; - } - - @Override - public ObjectStream openStream() throws MissingObjectException, IOException { - PackChunk pc = firstChunk; - if (pc != null) - firstChunk = null; - else - pc = ctx.getChunk(ChunkKey.fromString(meta.getFragment(0))); - - InputStream in = new ChunkInputStream(meta, ctx, pos, pc); - in = new BufferedInputStream(new InflaterInputStream(in), 8192); - return new ObjectStream.Filter(type, sz, in); - } - - private static class ChunkInputStream extends InputStream { - private final ChunkMeta meta; - - private final DhtReader ctx; - - private int ptr; - - private PackChunk pc; - - private int fragment; - - ChunkInputStream(ChunkMeta meta, DhtReader ctx, int pos, PackChunk pc) { - this.ctx = ctx; - this.meta = meta; - this.ptr = pos; - this.pc = pc; - } - - @Override - public int read(byte[] dstbuf, int dstptr, int dstlen) - throws IOException { - if (0 == dstlen) - return 0; - - int n = pc.read(ptr, dstbuf, dstptr, dstlen); - if (n == 0) { - if (fragment == meta.getFragmentCount()) - return -1; - - pc = ctx.getChunk(ChunkKey.fromString( - meta.getFragment(++fragment))); - ptr = 0; - n = pc.read(ptr, dstbuf, dstptr, dstlen); - if (n == 0) - return -1; - } - ptr += n; - return n; - } - - @Override - public int read() throws IOException { - byte[] tmp = new byte[1]; - int n = read(tmp, 0, 1); - return n == 1 ? tmp[0] & 0xff : -1; - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java deleted file mode 100644 index ab8f8352b0..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectIndexKey.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.storage.dht.KeyUtils.format32; -import static org.eclipse.jgit.storage.dht.KeyUtils.parse32; -import static org.eclipse.jgit.util.RawParseUtils.decode; - -import java.text.MessageFormat; - -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.lib.ObjectId; - -/** Identifies an ObjectId in the DHT. */ -public final class ObjectIndexKey extends ObjectId implements RowKey { - private static final int KEYLEN = 49; - - /** - * @param repo - * @param objId - * @return the key - */ - public static ObjectIndexKey create(RepositoryKey repo, AnyObjectId objId) { - return new ObjectIndexKey(repo.asInt(), objId); - } - - /** - * @param key - * @return the key - */ - public static ObjectIndexKey fromBytes(byte[] key) { - if (key.length != KEYLEN) - throw new IllegalArgumentException(MessageFormat.format( - DhtText.get().invalidChunkKey, decode(key))); - - int repo = parse32(key, 0); - ObjectId id = ObjectId.fromString(key, 9); - return new ObjectIndexKey(repo, id); - } - - /** - * @param key - * @return the key - */ - public static ObjectIndexKey fromString(String key) { - return fromBytes(Constants.encodeASCII(key)); - } - - private final int repo; - - ObjectIndexKey(int repo, AnyObjectId objId) { - super(objId); - this.repo = repo; - } - - /** @return the repository that contains the object. */ - public RepositoryKey getRepositoryKey() { - return RepositoryKey.fromInt(repo); - } - - int getRepositoryId() { - return repo; - } - - public byte[] asBytes() { - byte[] r = new byte[KEYLEN]; - format32(r, 0, repo); - r[8] = '.'; - copyTo(r, 9); - return r; - } - - public String asString() { - return decode(asBytes()); - } - - @Override - public String toString() { - return "object-index:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java deleted file mode 100644 index 9123a8be8f..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectInfo.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.List; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; -import org.eclipse.jgit.lib.ObjectId; - -/** Connects an object to the chunk it is stored in. */ -public class ObjectInfo { - /** Orders ObjectInfo by their time member, oldest first. */ - public static final Comparator<ObjectInfo> BY_TIME = new Comparator<ObjectInfo>() { - public int compare(ObjectInfo a, ObjectInfo b) { - return Long.signum(a.getTime() - b.getTime()); - } - }; - - /** - * Sort the info list according to time, oldest member first. - * - * @param toSort - * list to sort. - */ - public static void sort(List<ObjectInfo> toSort) { - Collections.sort(toSort, BY_TIME); - } - - private final ChunkKey chunk; - - private final long time; - - private final GitStore.ObjectInfo data; - - /** - * Wrap an ObjectInfo from the storage system. - * - * @param chunkKey - * the chunk the object points to. - * @param data - * the data of the ObjectInfo. - */ - public ObjectInfo(ChunkKey chunkKey, GitStore.ObjectInfo data) { - this.chunk = chunkKey; - this.time = 0; - this.data = data; - } - - /** - * Wrap an ObjectInfo from the storage system. - * - * @param chunkKey - * the chunk the object points to. - * @param time - * timestamp of the ObjectInfo. - * @param data - * the data of the ObjectInfo. - */ - public ObjectInfo(ChunkKey chunkKey, long time, GitStore.ObjectInfo data) { - this.chunk = chunkKey; - this.time = time < 0 ? 0 : time; - this.data = data; - } - - /** @return the chunk this link points to. */ - public ChunkKey getChunkKey() { - return chunk; - } - - /** @return approximate time the object was created, in milliseconds. */ - public long getTime() { - return time; - } - - /** @return GitStore.ObjectInfo to embed in the database. */ - public GitStore.ObjectInfo getData() { - return data; - } - - /** @return type of the object, in OBJ_* constants. */ - public int getType() { - return data.getObjectType().getNumber(); - } - - /** @return size of the object when fully inflated. */ - public long getSize() { - return data.getInflatedSize(); - } - - /** @return true if the object storage uses delta compression. */ - public boolean isDelta() { - return data.hasDeltaBase(); - } - - /** @return true if the object has been fragmented across chunks. */ - public boolean isFragmented() { - return data.getIsFragmented(); - } - - int getOffset() { - return data.getOffset(); - } - - long getPackedSize() { - return data.getPackedSize(); - } - - ObjectId getDeltaBase() { - if (data.hasDeltaBase()) - return ObjectId.fromRaw(data.getDeltaBase().toByteArray(), 0); - return null; - } - - @Override - public String toString() { - StringBuilder b = new StringBuilder(); - b.append("ObjectInfo:"); - b.append(chunk); - if (0 < time) - b.append(" @ ").append(new Date(time)); - b.append("\n"); - b.append(data.toString()); - return b.toString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java deleted file mode 100644 index d36b03bdb5..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/ObjectWriter.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicReference; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.util.BlockList; - -/** - * Re-orders objects destined for a pack stream by chunk locality. - * <p> - * By re-ordering objects according to chunk locality, and then the original - * order the PackWriter intended to use, objects can be copied quickly from - * chunks, and each chunk is visited at most once. A {@link Prefetcher} for the - * {@link DhtReader} is used to fetch chunks in the order they will be used, - * improving throughput by reducing the number of round-trips required to the - * storage system. - */ -final class ObjectWriter { - private final DhtReader ctx; - - private final Prefetcher prefetch; - - private final int batchSize; - - private final Semaphore metaBatches; - - private final AtomicReference<DhtException> metaError; - - private final LinkedHashMap<ChunkKey, Integer> allVisits; - - private final Map<ChunkKey, ChunkMeta> allMeta; - - private final Set<ChunkKey> metaMissing; - - private Set<ChunkKey> metaToRead; - - private int curVisit; - - ObjectWriter(DhtReader ctx, Prefetcher prefetch) { - this.ctx = ctx; - this.prefetch = prefetch; - - batchSize = ctx.getOptions().getObjectIndexBatchSize(); - metaBatches = new Semaphore(batchSize); - metaError = new AtomicReference<DhtException>(); - - allVisits = new LinkedHashMap<ChunkKey, Integer>(); - allMeta = new HashMap<ChunkKey, ChunkMeta>(); - metaMissing = new HashSet<ChunkKey>(); - metaToRead = new HashSet<ChunkKey>(); - curVisit = 1; - } - - void plan(List<DhtObjectToPack> list) throws DhtException { - try { - for (DhtObjectToPack obj : list) - visit(obj); - - if (!metaToRead.isEmpty()) - startBatch(Context.FAST_MISSING_OK); - awaitPendingBatches(); - - synchronized (metaMissing) { - if (!metaMissing.isEmpty()) { - metaBatches.release(batchSize); - resolveMissing(); - awaitPendingBatches(); - } - } - } catch (InterruptedException err) { - throw new DhtTimeoutException(err); - } - - Iterable<ChunkKey> order; - synchronized (allMeta) { - if (allMeta.isEmpty()) { - order = allVisits.keySet(); - } else { - BlockList<ChunkKey> keys = new BlockList<ChunkKey>(); - for (ChunkKey key : allVisits.keySet()) { - keys.add(key); - - ChunkMeta meta = allMeta.remove(key); - if (meta != null) { - for (int i = 1; i < meta.getFragmentCount(); i++) - keys.add(ChunkKey.fromString(meta.getFragment(i))); - } - } - order = keys; - } - } - prefetch.push(order); - - Collections.sort(list, new Comparator<DhtObjectToPack>() { - public int compare(DhtObjectToPack a, DhtObjectToPack b) { - return a.visitOrder - b.visitOrder; - } - }); - } - - private void visit(DhtObjectToPack obj) throws InterruptedException, - DhtTimeoutException { - // Plan the visit to the delta base before the object. This - // ensures the base is in the stream first, and OFS_DELTA can - // be used for the delta. - // - DhtObjectToPack base = (DhtObjectToPack) obj.getDeltaBase(); - if (base != null && base.visitOrder == 0) { - // Use the current visit, even if its wrong. This will - // prevent infinite recursion when there is a cycle in the - // delta chain. Cycles are broken during writing, not in - // the earlier planning phases. - // - obj.visitOrder = curVisit; - visit(base); - } - - ChunkKey key = obj.chunk; - if (key != null) { - Integer i = allVisits.get(key); - if (i == null) { - i = Integer.valueOf(1 + allVisits.size()); - allVisits.put(key, i); - } - curVisit = i.intValue(); - } - - if (obj.isFragmented()) { - metaToRead.add(key); - if (metaToRead.size() == batchSize) - startBatch(Context.FAST_MISSING_OK); - } - obj.visitOrder = curVisit; - } - - private void resolveMissing() throws DhtTimeoutException, - InterruptedException { - metaToRead = new HashSet<ChunkKey>(); - for (ChunkKey key : metaMissing) { - metaToRead.add(key); - if (metaToRead.size() == batchSize) - startBatch(Context.LOCAL); - } - if (!metaToRead.isEmpty()) - startBatch(Context.LOCAL); - } - - private void startBatch(Context context) throws InterruptedException, - DhtTimeoutException { - Timeout to = ctx.getOptions().getTimeout(); - if (!metaBatches.tryAcquire(1, to.getTime(), to.getUnit())) - throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta); - - Set<ChunkKey> keys = metaToRead; - ctx.getDatabase().chunk().getMeta( - context, - keys, - new MetaLoader(context, keys)); - metaToRead = new HashSet<ChunkKey>(); - } - - private void awaitPendingBatches() throws InterruptedException, - DhtTimeoutException, DhtException { - Timeout to = ctx.getOptions().getTimeout(); - if (!metaBatches.tryAcquire(batchSize, to.getTime(), to.getUnit())) - throw new DhtTimeoutException(DhtText.get().timeoutChunkMeta); - if (metaError.get() != null) - throw metaError.get(); - } - - private class MetaLoader implements AsyncCallback<Map<ChunkKey, ChunkMeta>> { - private final Context context; - - private final Set<ChunkKey> keys; - - MetaLoader(Context context, Set<ChunkKey> keys) { - this.context = context; - this.keys = keys; - } - - public void onSuccess(Map<ChunkKey, ChunkMeta> result) { - try { - synchronized (allMeta) { - allMeta.putAll(result); - keys.removeAll(result.keySet()); - } - if (context == Context.FAST_MISSING_OK && !keys.isEmpty()) { - synchronized (metaMissing) { - metaMissing.addAll(keys); - } - } - } finally { - metaBatches.release(1); - } - } - - public void onFailure(DhtException error) { - metaError.compareAndSet(null, error); - metaBatches.release(1); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java deleted file mode 100644 index 32b22340d1..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/OpenQueue.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Map; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.lib.AsyncObjectLoaderQueue; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectLoader; -import org.eclipse.jgit.lib.ObjectReader; - -/** - * Locates objects in large batches, then opens them clustered by chunk. - * <p> - * To simplify the implementation this method performs lookups for the - * {@link ObjectInfo} in large batches, clusters those by ChunkKey, and loads - * the chunks with a {@link Prefetcher}. - * <p> - * The lookup queue is completely spun out during the first invocation of - * {@link #next()}, ensuring all chunks are known before any single chunk is - * accessed. This is necessary to improve access locality and prevent thrashing - * of the local ChunkCache. It also causes {@link MissingObjectException} to be - * thrown at the start of traversal, until the lookup queue is exhausted. - * - * @param <T> - * type of object to associate with the loader. - */ -final class OpenQueue<T extends ObjectId> extends QueueObjectLookup<T> - implements AsyncObjectLoaderQueue<T> { - private Map<ChunkKey, Collection<ObjectWithInfo<T>>> byChunk; - - private Iterator<Collection<ObjectWithInfo<T>>> chunkItr; - - private Iterator<ObjectWithInfo<T>> objectItr; - - private Prefetcher prefetcher; - - private ObjectWithInfo<T> current; - - private PackChunk currChunk; - - OpenQueue(DhtReader reader, Iterable<T> objectIds, boolean reportMissing) { - super(reader, reportMissing); - setCacheLoadedInfo(true); - setNeedChunkOnly(true); - init(objectIds); - - byChunk = new LinkedHashMap<ChunkKey, Collection<ObjectWithInfo<T>>>(); - objectItr = Collections.<ObjectWithInfo<T>> emptyList().iterator(); - } - - public boolean next() throws MissingObjectException, IOException { - if (chunkItr == null) - init(); - - if (!objectItr.hasNext()) { - currChunk = null; - if (!chunkItr.hasNext()) { - release(); - return false; - } - objectItr = chunkItr.next().iterator(); - } - - current = objectItr.next(); - return true; - } - - public T getCurrent() { - return current.object; - } - - public ObjectId getObjectId() { - return getCurrent(); - } - - public ObjectLoader open() throws IOException { - ChunkKey chunkKey = current.chunkKey; - - // Objects returned by the queue are clustered by chunk. This object - // is either in the current chunk, or are the next chunk ready on the - // prefetcher. Anything else is a programming error. - // - PackChunk chunk; - if (currChunk != null && chunkKey.equals(currChunk.getChunkKey())) - chunk = currChunk; - else { - chunk = prefetcher.get(chunkKey); - if (chunk == null) - throw new DhtMissingChunkException(chunkKey); - currChunk = chunk; - reader.recentChunk(chunk); - } - - if (current.info != null) { - int ptr = current.info.getOffset(); - int type = current.info.getType(); - return PackChunk.read(chunk, ptr, reader, type); - } else { - int ptr = chunk.findOffset(repo, current.object); - if (ptr < 0) - throw DhtReader.missing(current.object, ObjectReader.OBJ_ANY); - return PackChunk.read(chunk, ptr, reader, ObjectReader.OBJ_ANY); - } - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - release(); - return true; - } - - @Override - public void release() { - reader.getRecentChunks().setMaxBytes(reader.getOptions().getChunkLimit()); - prefetcher = null; - currChunk = null; - } - - private void init() throws IOException { - ObjectWithInfo<T> c; - - while ((c = nextObjectWithInfo()) != null) { - ChunkKey chunkKey = c.chunkKey; - Collection<ObjectWithInfo<T>> list = byChunk.get(chunkKey); - if (list == null) { - list = new ArrayList<ObjectWithInfo<T>>(); - byChunk.put(chunkKey, list); - - if (prefetcher == null) { - int limit = reader.getOptions().getChunkLimit(); - int ratio = reader.getOptions().getOpenQueuePrefetchRatio(); - int prefetchLimit = (int) (limit * (ratio / 100.0)); - reader.getRecentChunks().setMaxBytes(limit - prefetchLimit); - prefetcher = new Prefetcher(reader, 0, prefetchLimit); - } - prefetcher.push(chunkKey); - } - list.add(c); - } - - chunkItr = byChunk.values().iterator(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java deleted file mode 100644 index 57d357e4cf..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/PackChunk.java +++ /dev/null @@ -1,795 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.OBJ_BAD; -import static org.eclipse.jgit.lib.Constants.OBJ_BLOB; -import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT; -import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA; -import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA; -import static org.eclipse.jgit.lib.Constants.OBJ_TAG; -import static org.eclipse.jgit.lib.Constants.OBJ_TREE; -import static org.eclipse.jgit.lib.Constants.newMessageDigest; -import static org.eclipse.jgit.storage.dht.ChunkFormatter.TRAILER_SIZE; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.text.MessageFormat; -import java.util.zip.DataFormatException; -import java.util.zip.Inflater; - -import org.eclipse.jgit.errors.CorruptObjectException; -import org.eclipse.jgit.errors.LargeObjectException; -import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.lib.ObjectLoader; -import org.eclipse.jgit.storage.pack.BinaryDelta; -import org.eclipse.jgit.storage.pack.PackOutputStream; -import org.eclipse.jgit.transport.PackParser; - -/** - * Chunk of object data, stored under a {@link ChunkKey}. - * <p> - * A chunk typically contains thousands of objects, compressed in the Git native - * pack file format. Its associated {@link ChunkIndex} provides offsets for each - * object's header and compressed data. - * <p> - * Chunks (and their indexes) are opaque binary blobs meant only to be read by - * the Git implementation. - */ -public final class PackChunk { - /** Constructs a {@link PackChunk} while reading from the DHT. */ - public static class Members { - private ChunkKey chunkKey; - - private byte[] dataBuf; - - private int dataPtr; - - private int dataLen; - - private byte[] indexBuf; - - private int indexPtr; - - private int indexLen; - - private ChunkMeta meta; - - /** @return the chunk key. Never null. */ - public ChunkKey getChunkKey() { - return chunkKey; - } - - /** - * @param key - * @return {@code this} - */ - public Members setChunkKey(ChunkKey key) { - this.chunkKey = key; - return this; - } - - /** @return true if there is chunk data present. */ - public boolean hasChunkData() { - return dataBuf != null; - } - - /** @return the chunk data, or null if not available. */ - public byte[] getChunkData() { - return asArray(dataBuf, dataPtr, dataLen); - } - - /** @return the chunk data, or null if not available. */ - public ByteBuffer getChunkDataAsByteBuffer() { - return asByteBuffer(dataBuf, dataPtr, dataLen); - } - - private static byte[] asArray(byte[] buf, int ptr, int len) { - if (buf == null) - return null; - if (ptr == 0 && buf.length == len) - return buf; - byte[] r = new byte[len]; - System.arraycopy(buf, ptr, r, 0, len); - return r; - } - - private static ByteBuffer asByteBuffer(byte[] buf, int ptr, int len) { - return buf != null ? ByteBuffer.wrap(buf, ptr, len) : null; - } - - /** - * @param chunkData - * @return {@code this} - */ - public Members setChunkData(byte[] chunkData) { - return setChunkData(chunkData, 0, chunkData.length); - } - - /** - * @param chunkData - * @param ptr - * @param len - * @return {@code this} - */ - public Members setChunkData(byte[] chunkData, int ptr, int len) { - this.dataBuf = chunkData; - this.dataPtr = ptr; - this.dataLen = len; - return this; - } - - /** @return true if there is a chunk index present. */ - public boolean hasChunkIndex() { - return indexBuf != null; - } - - /** @return the chunk index, or null if not available. */ - public byte[] getChunkIndex() { - return asArray(indexBuf, indexPtr, indexLen); - } - - /** @return the chunk index, or null if not available. */ - public ByteBuffer getChunkIndexAsByteBuffer() { - return asByteBuffer(indexBuf, indexPtr, indexLen); - } - - /** - * @param chunkIndex - * @return {@code this} - */ - public Members setChunkIndex(byte[] chunkIndex) { - return setChunkIndex(chunkIndex, 0, chunkIndex.length); - } - - /** - * @param chunkIndex - * @param ptr - * @param len - * @return {@code this} - */ - public Members setChunkIndex(byte[] chunkIndex, int ptr, int len) { - this.indexBuf = chunkIndex; - this.indexPtr = ptr; - this.indexLen = len; - return this; - } - - /** @return true if there is meta information present. */ - public boolean hasMeta() { - return meta != null; - } - - /** @return the inline meta data, or null if not available. */ - public ChunkMeta getMeta() { - return meta; - } - - /** - * @param meta - * @return {@code this} - */ - public Members setMeta(ChunkMeta meta) { - this.meta = meta; - return this; - } - - /** - * @return the PackChunk instance. - * @throws DhtException - * if early validation indicates the chunk data is corrupt - * or not recognized by this version of the library. - */ - public PackChunk build() throws DhtException { - ChunkIndex i; - if (indexBuf != null) - i = ChunkIndex.fromBytes(chunkKey, indexBuf, indexPtr, indexLen); - else - i = null; - - return new PackChunk(chunkKey, dataBuf, dataPtr, dataLen, i, meta); - } - } - - private static final int INFLATE_STRIDE = 512; - - private final ChunkKey key; - - private final byte[] dataBuf; - - private final int dataPtr; - - private final int dataLen; - - private final ChunkIndex index; - - private final ChunkMeta meta; - - private volatile Boolean valid; - - PackChunk(ChunkKey key, byte[] dataBuf, int dataPtr, int dataLen, - ChunkIndex index, ChunkMeta meta) { - this.key = key; - this.dataBuf = dataBuf; - this.dataPtr = dataPtr; - this.dataLen = dataLen; - this.index = index; - this.meta = meta; - } - - /** @return unique name of this chunk in the database. */ - public ChunkKey getChunkKey() { - return key; - } - - /** @return index describing the objects stored within this chunk. */ - public ChunkIndex getIndex() { - return index; - } - - /** @return inline meta information, or null if no data was necessary. */ - public ChunkMeta getMeta() { - return meta; - } - - @Override - public String toString() { - return "PackChunk[" + getChunkKey() + "]"; - } - - boolean hasIndex() { - return index != null; - } - - boolean isFragment() { - return meta != null && 0 < meta.getFragmentCount(); - } - - int findOffset(RepositoryKey repo, AnyObjectId objId) { - if (key.getRepositoryId() == repo.asInt() && index != null) - return index.findOffset(objId); - return -1; - } - - boolean contains(RepositoryKey repo, AnyObjectId objId) { - return 0 <= findOffset(repo, objId); - } - - static ObjectLoader read(PackChunk pc, int pos, final DhtReader ctx, - final int typeHint) throws IOException { - try { - return read1(pc, pos, ctx, typeHint, true /* use recentChunks */); - } catch (DeltaChainCycleException cycleFound) { - // A cycle can occur if recentChunks cache was used by the reader - // to satisfy an OBJ_REF_DELTA, but the chunk that was chosen has - // a reverse delta back onto an object already being read during - // this invocation. Its not as uncommon as it sounds, as the Git - // wire protocol can sometimes copy an object the repository already - // has when dealing with reverts or cherry-picks. - // - // Work around the cycle by disabling the recentChunks cache for - // this resolution only. This will force the DhtReader to re-read - // OBJECT_INDEX and consider only the oldest chunk for any given - // object. There cannot be a cycle if the method only walks along - // the oldest chunks. - try { - ctx.getStatistics().deltaChainCycles++; - return read1(pc, pos, ctx, typeHint, false /* no recentChunks */); - } catch (DeltaChainCycleException cannotRecover) { - throw new DhtException(MessageFormat.format( - DhtText.get().cycleInDeltaChain, pc.getChunkKey(), - Integer.valueOf(pos))); - } - } - } - - @SuppressWarnings("null") - private static ObjectLoader read1(PackChunk pc, int pos, - final DhtReader ctx, final int typeHint, final boolean recent) - throws IOException, DeltaChainCycleException { - try { - Delta delta = null; - byte[] data = null; - int type = OBJ_BAD; - boolean cached = false; - - SEARCH: for (;;) { - final byte[] dataBuf = pc.dataBuf; - final int dataPtr = pc.dataPtr; - final int posPtr = dataPtr + pos; - int c = dataBuf[posPtr] & 0xff; - int typeCode = (c >> 4) & 7; - long sz = c & 15; - int shift = 4; - int p = 1; - while ((c & 0x80) != 0) { - c = dataBuf[posPtr + p++] & 0xff; - sz += ((long) (c & 0x7f)) << shift; - shift += 7; - } - - switch (typeCode) { - case OBJ_COMMIT: - case OBJ_TREE: - case OBJ_BLOB: - case OBJ_TAG: { - if (delta != null) { - data = inflate(sz, pc, pos + p, ctx); - type = typeCode; - break SEARCH; - } - - if (sz < Integer.MAX_VALUE && !pc.isFragment()) { - try { - data = pc.inflateOne(sz, pos + p, ctx); - return new ObjectLoader.SmallObject(typeCode, data); - } catch (LargeObjectException tooBig) { - // Fall through and stream. - } - } - - return new LargeNonDeltaObject(typeCode, sz, pc, pos + p, ctx); - } - - case OBJ_OFS_DELTA: { - c = dataBuf[posPtr + p++] & 0xff; - long base = c & 127; - while ((c & 128) != 0) { - base += 1; - c = dataBuf[posPtr + p++] & 0xff; - base <<= 7; - base += (c & 127); - } - - ChunkKey baseChunkKey; - int basePosInChunk; - - if (base <= pos) { - // Base occurs in the same chunk, just earlier. - baseChunkKey = pc.getChunkKey(); - basePosInChunk = pos - (int) base; - } else { - // Long offset delta, base occurs in another chunk. - // Adjust distance to be from our chunk start. - base = base - pos; - - ChunkMeta.BaseChunk baseChunk; - baseChunk = ChunkMetaUtil.getBaseChunk( - pc.key, - pc.meta, - base); - baseChunkKey = ChunkKey.fromString(baseChunk.getChunkKey()); - basePosInChunk = (int) (baseChunk.getRelativeStart() - base); - } - - delta = new Delta(delta, // - pc.key, pos, (int) sz, p, // - baseChunkKey, basePosInChunk); - if (sz != delta.deltaSize) - break SEARCH; - - DeltaBaseCache.Entry e = delta.getBase(ctx); - if (e != null) { - type = e.type; - data = e.data; - cached = true; - break SEARCH; - } - if (baseChunkKey != pc.getChunkKey()) - pc = ctx.getChunk(baseChunkKey); - pos = basePosInChunk; - continue SEARCH; - } - - case OBJ_REF_DELTA: { - ObjectId id = ObjectId.fromRaw(dataBuf, posPtr + p); - PackChunk nc = pc; - int base = pc.index.findOffset(id); - if (base < 0) { - DhtReader.ChunkAndOffset n; - n = ctx.getChunk(id, typeHint, recent); - nc = n.chunk; - base = n.offset; - } - checkCycle(delta, pc.key, pos); - delta = new Delta(delta, // - pc.key, pos, (int) sz, p + 20, // - nc.getChunkKey(), base); - if (sz != delta.deltaSize) - break SEARCH; - - DeltaBaseCache.Entry e = delta.getBase(ctx); - if (e != null) { - type = e.type; - data = e.data; - cached = true; - break SEARCH; - } - pc = nc; - pos = base; - continue SEARCH; - } - - default: - throw new DhtException(MessageFormat.format( - DhtText.get().unsupportedObjectTypeInChunk, // - Integer.valueOf(typeCode), // - pc.getChunkKey(), // - Integer.valueOf(pos))); - } - } - - // At this point there is at least one delta to apply to data. - // (Whole objects with no deltas to apply return early above.) - - do { - if (!delta.deltaChunk.equals(pc.getChunkKey())) - pc = ctx.getChunk(delta.deltaChunk); - pos = delta.deltaPos; - - // Cache only the base immediately before desired object. - if (cached) - cached = false; - else if (delta.next == null) - delta.putBase(ctx, type, data); - - final byte[] cmds = delta.decompress(pc, ctx); - final long sz = BinaryDelta.getResultSize(cmds); - final byte[] result = newResult(sz); - BinaryDelta.apply(data, cmds, result); - data = result; - delta = delta.next; - } while (delta != null); - - return new ObjectLoader.SmallObject(type, data); - - } catch (DataFormatException dfe) { - CorruptObjectException coe = new CorruptObjectException( - MessageFormat.format(DhtText.get().corruptCompressedObject, - pc.getChunkKey(), Integer.valueOf(pos))); - coe.initCause(dfe); - throw coe; - } - } - - private static byte[] inflate(long sz, PackChunk pc, int pos, - DhtReader reader) throws DataFormatException, DhtException { - if (pc.isFragment()) - return inflateFragment(sz, pc, pos, reader); - return pc.inflateOne(sz, pos, reader); - } - - private byte[] inflateOne(long sz, int pos, DhtReader reader) - throws DataFormatException { - // Because the chunk ends in a 4 byte CRC, there is always - // more data available for input than the inflater needs. - // This also helps with an optimization in libz where it - // wants at least 1 extra byte of input beyond the end. - - final byte[] dstbuf = newResult(sz); - final Inflater inf = reader.inflater(); - final int offset = pos; - int dstoff = 0; - - int bs = Math.min(dataLen - pos, INFLATE_STRIDE); - inf.setInput(dataBuf, dataPtr + pos, bs); - pos += bs; - - while (dstoff < dstbuf.length) { - int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff); - if (n == 0) { - if (inf.needsInput()) { - bs = Math.min(dataLen - pos, INFLATE_STRIDE); - inf.setInput(dataBuf, dataPtr + pos, bs); - pos += bs; - continue; - } - break; - } - dstoff += n; - } - - if (dstoff != sz) { - throw new DataFormatException(MessageFormat.format( - DhtText.get().shortCompressedObject, - getChunkKey(), - Integer.valueOf(offset))); - } - return dstbuf; - } - - private static byte[] inflateFragment(long sz, PackChunk pc, final int pos, - DhtReader reader) throws DataFormatException, DhtException { - byte[] dstbuf = newResult(sz); - int dstoff = 0; - - final Inflater inf = reader.inflater(); - final ChunkMeta meta = pc.meta; - int nextChunk = 1; - - int bs = pc.dataLen - pos - TRAILER_SIZE; - inf.setInput(pc.dataBuf, pc.dataPtr + pos, bs); - - while (dstoff < dstbuf.length) { - int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff); - if (n == 0) { - if (inf.needsInput()) { - if (meta.getFragmentCount() <= nextChunk) - break; - pc = reader.getChunk(ChunkKey.fromString( - meta.getFragment(nextChunk++))); - if (meta.getFragmentCount() == nextChunk) - bs = pc.dataLen; // Include trailer on last chunk. - else - bs = pc.dataLen - TRAILER_SIZE; - inf.setInput(pc.dataBuf, pc.dataPtr, bs); - continue; - } - break; - } - dstoff += n; - } - - if (dstoff != sz) { - throw new DataFormatException(MessageFormat.format( - DhtText.get().shortCompressedObject, - ChunkKey.fromString(meta.getFragment(0)), - Integer.valueOf(pos))); - } - return dstbuf; - } - - private static byte[] newResult(long sz) { - if (Integer.MAX_VALUE < sz) - throw new LargeObjectException.ExceedsByteArrayLimit(); - try { - return new byte[(int) sz]; - } catch (OutOfMemoryError noMemory) { - throw new LargeObjectException.OutOfMemory(noMemory); - } - } - - int readObjectTypeAndSize(int ptr, PackParser.ObjectTypeAndSize info) { - ptr += dataPtr; - - int c = dataBuf[ptr++] & 0xff; - int typeCode = (c >> 4) & 7; - long sz = c & 15; - int shift = 4; - while ((c & 0x80) != 0) { - c = dataBuf[ptr++] & 0xff; - sz += ((long) (c & 0x7f)) << shift; - shift += 7; - } - - switch (typeCode) { - case OBJ_OFS_DELTA: - c = dataBuf[ptr++] & 0xff; - while ((c & 128) != 0) - c = dataBuf[ptr++] & 0xff; - break; - - case OBJ_REF_DELTA: - ptr += 20; - break; - } - - info.type = typeCode; - info.size = sz; - return ptr - dataPtr; - } - - int read(int ptr, byte[] dst, int dstPos, int cnt) { - // Do not allow readers to read the CRC-32 from the tail. - int n = Math.min(cnt, (dataLen - TRAILER_SIZE) - ptr); - System.arraycopy(dataBuf, dataPtr + ptr, dst, dstPos, n); - return n; - } - - void copyObjectAsIs(PackOutputStream out, DhtObjectToPack obj, - boolean validate, DhtReader ctx) throws IOException, - StoredObjectRepresentationNotAvailableException { - if (validate && !isValid()) { - StoredObjectRepresentationNotAvailableException gone; - - gone = new StoredObjectRepresentationNotAvailableException(obj); - gone.initCause(new DhtException(MessageFormat.format( - DhtText.get().corruptChunk, getChunkKey()))); - throw gone; - } - - int ptr = dataPtr + obj.offset; - int c = dataBuf[ptr++] & 0xff; - int typeCode = (c >> 4) & 7; - long inflatedSize = c & 15; - int shift = 4; - while ((c & 0x80) != 0) { - c = dataBuf[ptr++] & 0xff; - inflatedSize += ((long) (c & 0x7f)) << shift; - shift += 7; - } - - switch (typeCode) { - case OBJ_OFS_DELTA: - do { - c = dataBuf[ptr++] & 0xff; - } while ((c & 128) != 0); - break; - - case OBJ_REF_DELTA: - ptr += 20; - break; - } - - // If the size is positive, its accurate. If its -1, this is a - // fragmented object that will need more handling below, - // so copy all of the chunk, minus the trailer. - - final int maxAvail = (dataLen - TRAILER_SIZE) - (ptr - dataPtr); - final int copyLen; - if (0 < obj.size) - copyLen = Math.min(obj.size, maxAvail); - else if (-1 == obj.size) - copyLen = maxAvail; - else - throw new DhtException(MessageFormat.format( - DhtText.get().expectedObjectSizeDuringCopyAsIs, obj)); - out.writeHeader(obj, inflatedSize); - out.write(dataBuf, ptr, copyLen); - - // If the object was fragmented, send all of the other fragments. - if (isFragment()) { - int cnt = meta.getFragmentCount(); - for (int fragId = 1; fragId < cnt; fragId++) { - PackChunk pc = ctx.getChunk(ChunkKey.fromString( - meta.getFragment(fragId))); - pc.copyEntireChunkAsIs(out, obj, validate); - } - } - } - - void copyEntireChunkAsIs(PackOutputStream out, DhtObjectToPack obj, - boolean validate) throws IOException { - if (validate && !isValid()) { - if (obj != null) - throw new CorruptObjectException(obj, MessageFormat.format( - DhtText.get().corruptChunk, getChunkKey())); - else - throw new DhtException(MessageFormat.format( - DhtText.get().corruptChunk, getChunkKey())); - } - - // Do not copy the trailer onto the output stream. - out.write(dataBuf, dataPtr, dataLen - TRAILER_SIZE); - } - - @SuppressWarnings("boxing") - private boolean isValid() { - Boolean v = valid; - if (v == null) { - MessageDigest m = newMessageDigest(); - m.update(dataBuf, dataPtr, dataLen); - v = key.getChunkHash().compareTo(m.digest(), 0) == 0; - valid = v; - } - return v.booleanValue(); - } - - /** @return the complete size of this chunk, in memory. */ - int getTotalSize() { - // Assume the index is part of the buffer, and report its total size.. - if (dataPtr != 0 || dataLen != dataBuf.length) - return dataBuf.length; - - int sz = dataLen; - if (index != null) - sz += index.getIndexSize(); - return sz; - } - - private static class Delta { - /** Child that applies onto this object. */ - final Delta next; - - /** The chunk the delta is stored in. */ - final ChunkKey deltaChunk; - - /** Offset of the delta object. */ - final int deltaPos; - - /** Size of the inflated delta stream. */ - final int deltaSize; - - /** Total size of the delta's pack entry header (including base). */ - final int hdrLen; - - /** The chunk the base is stored in. */ - final ChunkKey baseChunk; - - /** Offset of the base object. */ - final int basePos; - - Delta(Delta next, ChunkKey dc, int ofs, int sz, int hdrLen, - ChunkKey bc, int bp) { - this.next = next; - this.deltaChunk = dc; - this.deltaPos = ofs; - this.deltaSize = sz; - this.hdrLen = hdrLen; - this.baseChunk = bc; - this.basePos = bp; - } - - byte[] decompress(PackChunk chunk, DhtReader reader) - throws DataFormatException, DhtException { - return inflate(deltaSize, chunk, deltaPos + hdrLen, reader); - } - - DeltaBaseCache.Entry getBase(DhtReader ctx) { - return ctx.getDeltaBaseCache().get(baseChunk, basePos); - } - - void putBase(DhtReader ctx, int type, byte[] data) { - ctx.getDeltaBaseCache().put(baseChunk, basePos, type, data); - } - } - - private static void checkCycle(Delta delta, ChunkKey key, int ofs) - throws DeltaChainCycleException { - for (; delta != null; delta = delta.next) { - if (delta.deltaPos == ofs && delta.deltaChunk.equals(key)) - throw DeltaChainCycleException.INSTANCE; - } - } - - private static class DeltaChainCycleException extends Exception { - private static final long serialVersionUID = 1L; - - static final DeltaChainCycleException INSTANCE = new DeltaChainCycleException(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java deleted file mode 100644 index fef2b4f29d..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Prefetcher.java +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT; -import static org.eclipse.jgit.lib.Constants.OBJ_TREE; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.revwalk.RevCommit; -import org.eclipse.jgit.revwalk.RevTree; -import org.eclipse.jgit.storage.dht.DhtReader.ChunkAndOffset; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; - -class Prefetcher implements StreamingCallback<Collection<PackChunk.Members>> { - private static enum Status { - ON_QUEUE, LOADING, WAITING, READY, DONE; - } - - private final Database db; - - private final DhtReader.Statistics stats; - - private final int objectType; - - private final HashMap<ChunkKey, PackChunk> ready; - - private final HashMap<ChunkKey, Status> status; - - private final LinkedList<ChunkKey> queue; - - private final boolean followEdgeHints; - - private final int averageChunkSize; - - private final int highWaterMark; - - private final int lowWaterMark; - - private boolean first = true; - - private boolean automaticallyPushHints = true; - - private ChunkKey stopAt; - - private int bytesReady; - - private int bytesLoading; - - private DhtException error; - - Prefetcher(DhtReader reader, int objectType, int prefetchLimitInBytes) { - this.db = reader.getDatabase(); - this.stats = reader.getStatistics(); - this.objectType = objectType; - this.ready = new HashMap<ChunkKey, PackChunk>(); - this.status = new HashMap<ChunkKey, Status>(); - this.queue = new LinkedList<ChunkKey>(); - this.followEdgeHints = reader.getOptions().isPrefetchFollowEdgeHints(); - this.averageChunkSize = reader.getInserterOptions().getChunkSize(); - this.highWaterMark = prefetchLimitInBytes; - - int lwm = (highWaterMark / averageChunkSize) - 4; - if (lwm <= 0) - lwm = (highWaterMark / averageChunkSize) / 2; - lowWaterMark = lwm * averageChunkSize; - } - - boolean isType(int type) { - return objectType == type; - } - - void push(DhtReader ctx, Collection<RevCommit> roots) { - // Approximate walk by using hints from the most recent commit. - // Since the commits were recently parsed by the reader, we can - // ask the reader for their chunk locations and most likely get - // cache hits. - - int time = -1; - PackChunk chunk = null; - - for (RevCommit cmit : roots) { - if (time < cmit.getCommitTime()) { - ChunkAndOffset p = ctx.getChunkGently(cmit); - if (p != null && p.chunk.getMeta() != null) { - time = cmit.getCommitTime(); - chunk = p.chunk; - } - } - } - - if (chunk != null) { - synchronized (this) { - status.put(chunk.getChunkKey(), Status.DONE); - push(chunk.getMeta()); - } - } - } - - void push(DhtReader ctx, RevTree start, RevTree end) throws DhtException, - MissingObjectException { - // Unlike commits, trees aren't likely to be loaded when they - // are pushed into the prefetcher. Find the tree and load it - // as necessary to get the prefetch meta established. - // - Sync<Map<ObjectIndexKey, Collection<ObjectInfo>>> sync = Sync.create(); - Set<ObjectIndexKey> toFind = new HashSet<ObjectIndexKey>(); - toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), start)); - toFind.add(ObjectIndexKey.create(ctx.getRepositoryKey(), end)); - db.objectIndex().get(Context.READ_REPAIR, toFind, sync); - - Map<ObjectIndexKey, Collection<ObjectInfo>> trees; - try { - trees = sync.get(ctx.getOptions().getTimeout()); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } catch (TimeoutException e) { - throw new DhtTimeoutException(e); - } - - ChunkKey startKey = chunk(trees.get(start)); - if (startKey == null) - throw DhtReader.missing(start, OBJ_TREE); - - ChunkKey endKey = chunk(trees.get(end)); - if (endKey == null) - throw DhtReader.missing(end, OBJ_TREE); - - synchronized (this) { - stopAt = endKey; - push(startKey); - maybeStartGet(); - } - } - - private static ChunkKey chunk(Collection<ObjectInfo> info) { - if (info == null || info.isEmpty()) - return null; - - List<ObjectInfo> infoList = new ArrayList<ObjectInfo>(info); - ObjectInfo.sort(infoList); - return infoList.get(0).getChunkKey(); - } - - void push(ChunkKey key) { - push(Collections.singleton(key)); - } - - void push(ChunkMeta meta) { - if (meta == null) - return; - - ChunkMeta.PrefetchHint hint; - switch (objectType) { - case OBJ_COMMIT: - hint = meta.getCommitPrefetch(); - break; - case OBJ_TREE: - hint = meta.getTreePrefetch(); - break; - default: - return; - } - - if (hint != null) { - synchronized (this) { - if (followEdgeHints && 0 < hint.getEdgeCount()) - push(hint.getEdgeList()); - else - push(hint.getSequentialList()); - } - } - } - - private void push(List<String> list) { - List<ChunkKey> keys = new ArrayList<ChunkKey>(list.size()); - for (String keyString : list) - keys.add(ChunkKey.fromString(keyString)); - push(keys); - } - - void push(Iterable<ChunkKey> list) { - synchronized (this) { - for (ChunkKey key : list) { - if (status.containsKey(key)) - continue; - - status.put(key, Status.ON_QUEUE); - queue.add(key); - - if (key.equals(stopAt)) { - automaticallyPushHints = false; - break; - } - } - - if (!first) - maybeStartGet(); - } - } - - synchronized ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) { - for (PackChunk c : ready.values()) { - int p = c.findOffset(repo, objId); - if (0 <= p) - return new ChunkAndOffset(useReadyChunk(c.getChunkKey()), p); - } - return null; - } - - synchronized PackChunk get(ChunkKey key) throws DhtException { - GET: for (;;) { - if (error != null) - throw error; - - Status chunkStatus = status.get(key); - if (chunkStatus == null) - return null; - - switch (chunkStatus) { - case ON_QUEUE: - if (queue.isEmpty()) { - // Should never happen, but let the caller load. - status.put(key, Status.DONE); - return null; - - } else if (bytesReady + bytesLoading < highWaterMark) { - // Make sure its first in the queue, start, and wait. - if (!queue.getFirst().equals(key)) { - int idx = queue.indexOf(key); - if (first && objectType == OBJ_COMMIT) { - // If the prefetcher has not started yet, skip all - // chunks up to this first request. Assume this - // initial out-of-order get occurred because the - // RevWalk has already parsed all of the commits - // up to this point and does not need them again. - // - for (; 0 < idx; idx--) - status.put(queue.removeFirst(), Status.DONE); - forceStartGet(); - continue GET; - } - - stats.access(key).cntPrefetcher_OutOfOrder++; - queue.remove(idx); - queue.addFirst(key); - } - forceStartGet(); - continue GET; - - } else { - // It cannot be moved up to the front of the queue - // without violating the prefetch size. Let the - // caller load the chunk out of order. - stats.access(key).cntPrefetcher_OutOfOrder++; - status.put(key, Status.DONE); - return null; - } - - case LOADING: // Wait for a prefetch that is already started. - status.put(key, Status.WAITING); - //$FALL-THROUGH$ - case WAITING: - stats.access(key).cntPrefetcher_WaitedForLoad++; - try { - wait(); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } - continue GET; - - case READY: - return useReadyChunk(key); - - case DONE: - stats.access(key).cntPrefetcher_Revisited++; - return null; - - default: - throw new IllegalStateException(key + " " + chunkStatus); - } - } - } - - private PackChunk useReadyChunk(ChunkKey key) { - PackChunk chunk = ready.remove(key); - - status.put(chunk.getChunkKey(), Status.DONE); - bytesReady -= chunk.getTotalSize(); - - if (automaticallyPushHints) { - push(chunk.getMeta()); - maybeStartGet(); - } - - return chunk; - } - - private void maybeStartGet() { - if (!queue.isEmpty() && bytesReady + bytesLoading <= lowWaterMark) - forceStartGet(); - } - - private void forceStartGet() { - // Use a LinkedHashSet so insertion order is iteration order. - // This may help a provider that loads sequentially in the - // set's iterator order to load in the order we want data. - // - LinkedHashSet<ChunkKey> toLoad = new LinkedHashSet<ChunkKey>(); - - while (bytesReady + bytesLoading < highWaterMark && !queue.isEmpty()) { - ChunkKey key = queue.removeFirst(); - - stats.access(key).cntPrefetcher_Load++; - toLoad.add(key); - status.put(key, Status.LOADING); - bytesLoading += averageChunkSize; - - // For the first chunk, start immediately to reduce the - // startup latency associated with additional chunks. - if (first) - break; - } - - if (!toLoad.isEmpty() && error == null) - db.chunk().get(Context.LOCAL, toLoad, this); - - if (first) { - first = false; - maybeStartGet(); - } - } - - public synchronized void onPartialResult(Collection<PackChunk.Members> res) { - try { - bytesLoading -= averageChunkSize * res.size(); - for (PackChunk.Members builder : res) - chunkIsReady(builder.build()); - } catch (DhtException loadError) { - onError(loadError); - } - } - - private void chunkIsReady(PackChunk chunk) { - ChunkKey key = chunk.getChunkKey(); - ready.put(key, chunk); - bytesReady += chunk.getTotalSize(); - - if (status.put(key, Status.READY) == Status.WAITING) - notifyAll(); - } - - public synchronized void onSuccess(Collection<PackChunk.Members> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - } - - public synchronized void onFailure(DhtException asyncError) { - onError(asyncError); - } - - private void onError(DhtException asyncError) { - if (error == null) { - error = asyncError; - notifyAll(); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java deleted file mode 100644 index 9cf513d265..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/QueueObjectLookup.java +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.lib.AsyncOperation; -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.Database; - -class QueueObjectLookup<T extends ObjectId> implements AsyncOperation { - protected final RepositoryKey repo; - - protected final Database db; - - protected final DhtReader reader; - - private final DhtReaderOptions options; - - private final boolean reportMissing; - - private final ArrayList<ObjectInfo> tmp; - - private final int concurrentBatches; - - private int runningBatches; - - private Context context; - - private Iterator<T> toFind; - - private List<T> toRetry; - - private ObjectWithInfo<T> nextResult; - - private DhtException error; - - private boolean needChunkOnly; - - private boolean cacheLoadedInfo; - - QueueObjectLookup(DhtReader reader, boolean reportMissing) { - this.repo = reader.getRepositoryKey(); - this.db = reader.getDatabase(); - this.reader = reader; - this.options = reader.getOptions(); - this.reportMissing = reportMissing; - this.tmp = new ArrayList<ObjectInfo>(4); - this.context = Context.FAST_MISSING_OK; - this.toRetry = new ArrayList<T>(); - - this.concurrentBatches = options.getObjectIndexConcurrentBatches(); - } - - void setCacheLoadedInfo(boolean on) { - cacheLoadedInfo = on; - } - - void setNeedChunkOnly(boolean on) { - needChunkOnly = on; - } - - void init(Iterable<T> objectIds) { - toFind = lookInCache(objectIds).iterator(); - } - - private Iterable<T> lookInCache(Iterable<T> objects) { - RecentInfoCache infoCache = reader.getRecentInfoCache(); - List<T> missing = null; - for (T obj : objects) { - if (needChunkOnly && obj instanceof RefDataUtil.IdWithChunk) { - push(obj, ((RefDataUtil.IdWithChunk) obj).getChunkKey()); - continue; - } - - List<ObjectInfo> info = infoCache.get(obj); - if (info != null && !info.isEmpty()) { - push(obj, info.get(0)); - } else { - if (missing == null) { - if (objects instanceof List<?>) - missing = new ArrayList<T>(((List<?>) objects).size()); - else - missing = new ArrayList<T>(); - } - missing.add(obj); - } - } - if (missing != null) - return missing; - return Collections.emptyList(); - } - - synchronized ObjectWithInfo<T> nextObjectWithInfo() - throws MissingObjectException, IOException { - for (;;) { - if (error != null) - throw error; - - // Consider starting another batch before popping a result. - // This ensures lookup is running while results are being - // consumed by the calling application. - // - while (runningBatches < concurrentBatches) { - if (!toFind.hasNext() // reached end of original input - && runningBatches == 0 // all batches finished - && toRetry != null // haven't yet retried - && !toRetry.isEmpty()) { - toFind = toRetry.iterator(); - toRetry = null; - context = Context.READ_REPAIR; - } - - if (toFind.hasNext()) - startBatch(context); - else - break; - } - - ObjectWithInfo<T> c = pop(); - if (c != null) { - if (c.chunkKey != null) - return c; - else - throw missing(c.object); - - } else if (!toFind.hasNext() && runningBatches == 0) - return null; - - try { - wait(); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } - } - } - - private synchronized void startBatch(final Context ctx) { - final int batchSize = options.getObjectIndexBatchSize(); - final Map<ObjectIndexKey, T> batch = new HashMap<ObjectIndexKey, T>(); - while (toFind.hasNext() && batch.size() < batchSize) { - T obj = toFind.next(); - batch.put(ObjectIndexKey.create(repo, obj), obj); - } - - final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> cb; - - cb = new AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>() { - public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> r) { - processResults(ctx, batch, r); - } - - public void onFailure(DhtException e) { - processFailure(e); - } - }; - db.objectIndex().get(ctx, batch.keySet(), cb); - runningBatches++; - } - - private synchronized void processResults(Context ctx, - Map<ObjectIndexKey, T> batch, - Map<ObjectIndexKey, Collection<ObjectInfo>> objects) { - for (T obj : batch.values()) { - Collection<ObjectInfo> matches = objects.get(obj); - - if (matches == null || matches.isEmpty()) { - if (ctx == Context.FAST_MISSING_OK) - toRetry.add(obj); - else if (reportMissing) - push(obj, (ChunkKey) null); - continue; - } - - tmp.clear(); - tmp.addAll(matches); - ObjectInfo.sort(tmp); - if (cacheLoadedInfo) - reader.getRecentInfoCache().put(obj, tmp); - - push(obj, tmp.get(0)); - } - - runningBatches--; - notify(); - } - - private synchronized void processFailure(DhtException e) { - runningBatches--; - error = e; - notify(); - } - - private void push(T obj, ChunkKey chunkKey) { - nextResult = new ObjectWithInfo<T>(obj, chunkKey, nextResult); - } - - private void push(T obj, ObjectInfo info) { - nextResult = new ObjectWithInfo<T>(obj, info, nextResult); - } - - private ObjectWithInfo<T> pop() { - ObjectWithInfo<T> r = nextResult; - if (r == null) - return null; - nextResult = r.next; - return r; - } - - public boolean cancel(boolean mayInterruptIfRunning) { - return true; - } - - public void release() { - // Do nothing, there is nothing to abort or discard. - } - - private static <T extends ObjectId> MissingObjectException missing(T id) { - return new MissingObjectException(id, DhtText.get().objectTypeUnknown); - } - - static class ObjectWithInfo<T extends ObjectId> { - final T object; - - final ObjectInfo info; - - final ChunkKey chunkKey; - - final ObjectWithInfo<T> next; - - ObjectWithInfo(T object, ObjectInfo info, ObjectWithInfo<T> next) { - this.object = object; - this.info = info; - this.chunkKey = info.getChunkKey(); - this.next = next; - } - - ObjectWithInfo(T object, ChunkKey chunkKey, ObjectWithInfo<T> next) { - this.object = object; - this.info = null; - this.chunkKey = chunkKey; - this.next = next; - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java deleted file mode 100644 index 22608ee1b3..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentChunks.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; -import java.util.HashMap; - -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.ObjectLoader; -import org.eclipse.jgit.storage.dht.DhtReader.ChunkAndOffset; -import org.eclipse.jgit.storage.dht.RefDataUtil.IdWithChunk; - -final class RecentChunks { - private final DhtReader reader; - - private final DhtReader.Statistics stats; - - private final HashMap<ChunkKey, Node> byKey; - - private int maxBytes; - - private int curBytes; - - private Node lruHead; - - private Node lruTail; - - RecentChunks(DhtReader reader) { - this.reader = reader; - this.stats = reader.getStatistics(); - this.byKey = new HashMap<ChunkKey, Node>(); - this.maxBytes = reader.getOptions().getChunkLimit(); - } - - void setMaxBytes(int newMax) { - maxBytes = Math.max(0, newMax); - if (0 < maxBytes) - prune(); - else - clear(); - } - - PackChunk get(ChunkKey key) { - Node n = byKey.get(key); - if (n != null) { - hit(n); - stats.recentChunks_Hits++; - return n.chunk; - } - stats.recentChunks_Miss++; - return null; - } - - void put(PackChunk chunk) { - Node n = byKey.get(chunk.getChunkKey()); - if (n != null && n.chunk == chunk) { - hit(n); - return; - } - - curBytes += chunk.getTotalSize(); - prune(); - - n = new Node(); - n.chunk = chunk; - byKey.put(chunk.getChunkKey(), n); - first(n); - } - - private void prune() { - while (maxBytes < curBytes) { - Node n = lruTail; - if (n == null) - break; - - PackChunk c = n.chunk; - curBytes -= c.getTotalSize(); - byKey.remove(c.getChunkKey()); - remove(n); - } - } - - ObjectLoader open(RepositoryKey repo, AnyObjectId objId, int typeHint) - throws IOException { - if (objId instanceof IdWithChunk) { - PackChunk chunk = get(((IdWithChunk) objId).getChunkKey()); - if (chunk != null) { - int pos = chunk.findOffset(repo, objId); - if (0 <= pos) - return PackChunk.read(chunk, pos, reader, typeHint); - } - - // IdWithChunk is only a hint, and can be wrong. Locally - // searching is faster than looking in the Database. - } - - for (Node n = lruHead; n != null; n = n.next) { - int pos = n.chunk.findOffset(repo, objId); - if (0 <= pos) { - hit(n); - stats.recentChunks_Hits++; - return PackChunk.read(n.chunk, pos, reader, typeHint); - } - } - - return null; - } - - ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) { - if (objId instanceof IdWithChunk) { - PackChunk chunk = get(((IdWithChunk) objId).getChunkKey()); - if (chunk != null) { - int pos = chunk.findOffset(repo, objId); - if (0 <= pos) - return new ChunkAndOffset(chunk, pos); - } - - // IdWithChunk is only a hint, and can be wrong. Locally - // searching is faster than looking in the Database. - } - - for (Node n = lruHead; n != null; n = n.next) { - int pos = n.chunk.findOffset(repo, objId); - if (0 <= pos) { - hit(n); - stats.recentChunks_Hits++; - return new ChunkAndOffset(n.chunk, pos); - } - } - - return null; - } - - boolean has(RepositoryKey repo, AnyObjectId objId) { - for (Node n = lruHead; n != null; n = n.next) { - int pos = n.chunk.findOffset(repo, objId); - if (0 <= pos) { - hit(n); - stats.recentChunks_Hits++; - return true; - } - } - return false; - } - - void clear() { - curBytes = 0; - lruHead = null; - lruTail = null; - byKey.clear(); - } - - private void hit(Node n) { - if (lruHead != n) { - remove(n); - first(n); - } - } - - private void remove(Node node) { - Node p = node.prev; - Node n = node.next; - - if (p != null) - p.next = n; - if (n != null) - n.prev = p; - - if (lruHead == node) - lruHead = n; - if (lruTail == node) - lruTail = p; - } - - private void first(Node node) { - Node h = lruHead; - - node.prev = null; - node.next = h; - - if (h != null) - h.prev = node; - else - lruTail = node; - - lruHead = node; - } - - private static class Node { - PackChunk chunk; - - Node prev; - - Node next; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java deleted file mode 100644 index cb5882af12..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RecentInfoCache.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.ObjectId; - -final class RecentInfoCache { - private final Map<ObjectId, List<ObjectInfo>> infoCache; - - RecentInfoCache(DhtReaderOptions options) { - final int sz = options.getRecentInfoCacheSize(); - infoCache = new LinkedHashMap<ObjectId, List<ObjectInfo>>(sz, 0.75f, true) { - private static final long serialVersionUID = 1L; - - @Override - protected boolean removeEldestEntry(Entry<ObjectId, List<ObjectInfo>> e) { - return sz < size(); - } - }; - } - - List<ObjectInfo> get(AnyObjectId id) { - return infoCache.get(id); - } - - void put(AnyObjectId id, List<ObjectInfo> info) { - infoCache.put(id.copy(), copyList(info)); - } - - private static List<ObjectInfo> copyList(List<ObjectInfo> info) { - int cnt = info.size(); - if (cnt == 1) - return Collections.singletonList(info.get(0)); - - ObjectInfo[] tmp = info.toArray(new ObjectInfo[cnt]); - return Collections.unmodifiableList(Arrays.asList(tmp)); - } - - void clear() { - infoCache.clear(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefDataUtil.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefDataUtil.java deleted file mode 100644 index b0d4a68e52..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefDataUtil.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.lib.AnyObjectId; -import org.eclipse.jgit.lib.ObjectId; - -/** Tools to work with {@link RefData}. */ -public class RefDataUtil { - /** Magic constant meaning does not exist. */ - public static final RefData NONE = RefData.newBuilder().buildPartial(); - - static class IdWithChunk extends ObjectId { - private final ChunkKey chunkKey; - - IdWithChunk(AnyObjectId id, ChunkKey key) { - super(id); - this.chunkKey = key; - } - - ChunkKey getChunkKey() { - return chunkKey; - } - - @Override - public String toString() { - return name() + "->" + chunkKey; - } - } - - private RefDataUtil() { - // Utility class, do not create instances. - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java deleted file mode 100644 index b4d378f81a..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RefKey.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.encode; -import static org.eclipse.jgit.storage.dht.KeyUtils.format32; -import static org.eclipse.jgit.storage.dht.KeyUtils.parse32; -import static org.eclipse.jgit.util.RawParseUtils.decode; - -import org.eclipse.jgit.lib.Constants; - -/** Unique identifier of a reference in the DHT. */ -public final class RefKey implements RowKey { - /** - * @param repo - * @param name - * @return the key - */ - public static RefKey create(RepositoryKey repo, String name) { - return new RefKey(repo.asInt(), name); - } - - /** - * @param key - * @return the key - */ - public static RefKey fromBytes(byte[] key) { - int repo = parse32(key, 0); - String name = decode(key, 9, key.length); - return new RefKey(repo, name); - } - - /** - * @param key - * @return the key - */ - public static RefKey fromString(String key) { - int c = key.indexOf(':'); - int repo = parse32(Constants.encodeASCII(key.substring(0, c)), 0); - String name = key.substring(c + 1); - return new RefKey(repo, name); - } - - private final int repo; - - private final String name; - - RefKey(int repo, String name) { - this.repo = repo; - this.name = name; - } - - /** @return the repository this reference lives within. */ - public RepositoryKey getRepositoryKey() { - return RepositoryKey.fromInt(repo); - } - - /** @return the name of the reference. */ - public String getName() { - return name; - } - - public byte[] asBytes() { - byte[] nameRaw = encode(name); - byte[] r = new byte[9 + nameRaw.length]; - format32(r, 0, repo); - r[8] = ':'; - System.arraycopy(nameRaw, 0, r, 9, nameRaw.length); - return r; - } - - public String asString() { - return getRepositoryKey().asString() + ":" + name; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof RefKey) { - RefKey thisRef = this; - RefKey otherRef = (RefKey) other; - return thisRef.repo == otherRef.repo - && thisRef.name.equals(otherRef.name); - } - return false; - } - - @Override - public String toString() { - return "ref:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java deleted file mode 100644 index 2835d62507..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryKey.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.storage.dht.KeyUtils.format32; -import static org.eclipse.jgit.storage.dht.KeyUtils.parse32; -import static org.eclipse.jgit.util.RawParseUtils.decode; - -import org.eclipse.jgit.lib.Constants; - -/** */ -public final class RepositoryKey implements RowKey { - /** - * @param sequentialId - * @return the key - */ - public static RepositoryKey create(int sequentialId) { - return new RepositoryKey(Integer.reverse(sequentialId)); - } - - /** - * @param key - * @return the key - */ - public static RepositoryKey fromBytes(byte[] key) { - return new RepositoryKey(parse32(key, 0)); - } - - /** - * @param key - * @return the key - */ - public static RepositoryKey fromString(String key) { - return new RepositoryKey(parse32(Constants.encodeASCII(key), 0)); - } - - /** - * @param reverseId - * @return the key - */ - public static RepositoryKey fromInt(int reverseId) { - return new RepositoryKey(reverseId); - } - - private final int id; - - RepositoryKey(int id) { - this.id = id; - } - - /** @return 32 bit value describing the repository. */ - public int asInt() { - return id; - } - - public byte[] asBytes() { - byte[] r = new byte[8]; - format32(r, 0, asInt()); - return r; - } - - public String asString() { - return decode(asBytes()); - } - - @Override - public int hashCode() { - return id; - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof RepositoryKey) - return id == ((RepositoryKey) other).id; - return false; - } - - @Override - public String toString() { - return "repository:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java deleted file mode 100644 index 18443fa8ea..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepositoryName.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import static org.eclipse.jgit.lib.Constants.encode; -import static org.eclipse.jgit.util.RawParseUtils.decode; - -/** Unique name of a repository, as specified by the URL. */ -public class RepositoryName implements RowKey { - /** - * @param name - * @return the key - */ - public static RepositoryName create(String name) { - return new RepositoryName(name); - } - - /** - * @param name - * @return the key - */ - public static RepositoryName fromBytes(byte[] name) { - return new RepositoryName(decode(name)); - } - - /** - * @param name - * @return the key - */ - public static RepositoryName fromString(String name) { - return new RepositoryName(name); - } - - private final String name; - - RepositoryName(String name) { - this.name = name; - } - - public byte[] asBytes() { - return encode(name); - } - - public String asString() { - return name; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof RepositoryName) - return name.equals(((RepositoryName) other).name); - return false; - } - - @Override - public String toString() { - return "repository:" + asString(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java deleted file mode 100644 index 8c14d30452..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RepresentationSelector.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.List; - -import org.eclipse.jgit.lib.ProgressMonitor; -import org.eclipse.jgit.storage.pack.PackWriter; - -final class RepresentationSelector extends BatchObjectLookup<DhtObjectToPack> { - private final PackWriter packer; - - private final DhtObjectRepresentation rep; - - RepresentationSelector(PackWriter packer, DhtReader reader, - ProgressMonitor monitor) { - super(reader, monitor); - setRetryMissingObjects(true); - - this.packer = packer; - this.rep = new DhtObjectRepresentation(); - } - - protected void onResult(DhtObjectToPack obj, List<ObjectInfo> info) { - // Go through the objects backwards. This is necessary because - // info is sorted oldest->newest but PackWriter wants the reverse - // order to try and prevent delta chain cycles. - // - for (int i = info.size() - 1; 0 <= i; i--) { - rep.set(info.get(i)); - packer.select(obj, rep); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java deleted file mode 100644 index e088b361c4..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/RowKey.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -/** - * Key for any row that the DHT will be asked to store. - * <p> - * Implementations of this interface know how to encode and decode themselves - * from a byte array format, expecting the DHT to use the byte array as the row - * key within the database. - * <p> - * It is strongly encouraged to use only row keys that are valid UTF-8 strings, - * as most DHT systems have client tools that can interact with rows using the - * UTF-8 encoding. - */ -public interface RowKey { - /** @return key formatted as byte array for storage in the DHT. */ - public byte[] asBytes(); - - /** @return key formatted as a String for storage in the DHT. */ - public String asString(); - - /** @return relatively unique hash code value for in-memory compares. */ - public int hashCode(); - - /** - * Compare this key to another key for equality. - * - * @param other - * the other key instance, may be null. - * @return true if these keys reference the same row. - */ - public boolean equals(Object other); - - /** @return pretty printable string for debugging/reporting only. */ - public String toString(); -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java deleted file mode 100644 index 3069886283..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/SizeQueue.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.io.IOException; - -import org.eclipse.jgit.errors.MissingObjectException; -import org.eclipse.jgit.lib.AsyncObjectSizeQueue; -import org.eclipse.jgit.lib.ObjectId; - -final class SizeQueue<T extends ObjectId> extends QueueObjectLookup<T> - implements AsyncObjectSizeQueue<T> { - private ObjectWithInfo<T> currResult; - - SizeQueue(DhtReader reader, Iterable<T> objectIds, boolean reportMissing) { - super(reader, reportMissing); - init(objectIds); - } - - public boolean next() throws MissingObjectException, IOException { - currResult = nextObjectWithInfo(); - return currResult != null; - } - - public T getCurrent() { - return currResult.object; - } - - public long getSize() { - return currResult.info.getSize(); - } - - public ObjectId getObjectId() { - return getCurrent(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java deleted file mode 100644 index 9ec379f0ec..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/StreamingCallback.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -/** - * Extension of {@link AsyncCallback} supporting partial results. - * <p> - * Instead of buffering all results for {@link #onSuccess(Object)}, the storage - * provider may choose to offer some results earlier by invoking the - * {@link #onPartialResult(Object)} method declared in this interface. - * <p> - * If any results were delivered early to {@link #onPartialResult(Object)} then - * {@link #onSuccess(Object)} is invoked with {@code null} when all results have - * been supplied and no more remain to be delivered. - * <p> - * If an error occurs, {@link #onFailure(DhtException)} will be invoked, - * potentially after one or more {@link #onPartialResult(Object)} notifications - * were already made. In an error condition, {@link #onSuccess(Object)} will not - * be invoked. - * - * @param <T> - * type of object returned from the operation on success. - */ -public interface StreamingCallback<T> extends AsyncCallback<T> { - /** - * Receives partial results from the operation. - * - * @param result - * the result value from the operation. - */ - public void onPartialResult(T result); -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java deleted file mode 100644 index 4833375e46..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Sync.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Helper to implement a synchronous method in terms of an asynchronous one. - * <p> - * Implementors can use this type to wait for an asynchronous computation to - * complete on a background thread by passing the Sync instance as though it - * were the AsyncCallback: - * - * <pre> - * Sync<T> sync = Sync.create(); - * async(..., sync); - * return sync.get(timeout, TimeUnit.MILLISECONDS); - * </pre> - * - * @param <T> - * type of value object. - */ -public abstract class Sync<T> implements AsyncCallback<T> { - private static final Sync<?> NONE = new Sync<Object>() { - public void onSuccess(Object result) { - // Discard - } - - public void onFailure(DhtException error) { - // Discard - } - - @Override - public Object get(long timeout, TimeUnit unit) throws DhtException, - InterruptedException, TimeoutException { - return null; - } - }; - - /** - * Helper method to create a new sync object. - * - * @param <T> - * type of value object. - * @return a new instance. - */ - public static <T> Sync<T> create() { - return new Value<T>(); - } - - /** - * Singleton callback that ignores onSuccess, onFailure. - * - * @param <T> - * type of value object. - * @return callback that discards all results. - */ - @SuppressWarnings("unchecked") - public static <T> Sync<T> none() { - return (Sync<T>) NONE; - } - - /** - * Wait for the asynchronous operation to complete. - * <p> - * To prevent application deadlock, waiting can only be performed with the - * supplied timeout. - * - * @param timeout - * amount of time to wait before failing. - * @return the returned value. - * @throws DhtException - * the asynchronous operation failed. - * @throws InterruptedException - * the current thread was interrupted before the operation - * completed. - * @throws TimeoutException - * the timeout elapsed before the operation completed. - */ - public T get(Timeout timeout) throws DhtException, InterruptedException, - TimeoutException { - return get(timeout.getTime(), timeout.getUnit()); - } - - /** - * Wait for the asynchronous operation to complete. - * <p> - * To prevent application deadlock, waiting can only be performed with the - * supplied timeout. - * - * @param timeout - * amount of time to wait before failing. - * @param unit - * units of {@code timeout}. For example - * {@link TimeUnit#MILLISECONDS}. - * @return the returned value. - * @throws DhtException - * the asynchronous operation failed. - * @throws InterruptedException - * the current thread was interrupted before the operation - * completed. - * @throws TimeoutException - * the timeout elapsed before the operation completed. - */ - public abstract T get(long timeout, TimeUnit unit) throws DhtException, - InterruptedException, TimeoutException; - - private static class Value<T> extends Sync<T> { - - private final CountDownLatch wait = new CountDownLatch(1); - - private T data; - - private DhtException error; - - /** - * Wait for the asynchronous operation to complete. - * <p> - * To prevent application deadlock, waiting can only be performed with - * the supplied timeout. - * - * @param timeout - * amount of time to wait before failing. - * @param unit - * units of {@code timeout}. For example - * {@link TimeUnit#MILLISECONDS}. - * @return the returned value. - * @throws DhtException - * the asynchronous operation failed. - * @throws InterruptedException - * the current thread was interrupted before the operation - * completed. - * @throws TimeoutException - * the timeout elapsed before the operation completed. - */ - public T get(long timeout, TimeUnit unit) throws DhtException, - InterruptedException, TimeoutException { - if (wait.await(timeout, unit)) { - if (error != null) - throw error; - return data; - } - throw new TimeoutException(); - } - - public void onSuccess(T obj) { - data = obj; - wait.countDown(); - } - - public void onFailure(DhtException err) { - error = err; - wait.countDown(); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java deleted file mode 100644 index 2e4f3a4cc9..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/Timeout.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht; - -import java.text.MessageFormat; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.eclipse.jgit.lib.Config; -import org.eclipse.jgit.util.StringUtils; - -/** Length of time to wait for an operation before giving up. */ -public class Timeout { - /** - * Construct a new timeout, expressed in milliseconds. - * - * @param millis - * number of milliseconds to wait. - * @return the timeout. - */ - public static Timeout milliseconds(int millis) { - return new Timeout(millis, TimeUnit.MILLISECONDS); - } - - /** - * Construct a new timeout, expressed in seconds. - * - * @param sec - * number of seconds to wait. - * @return the timeout. - */ - public static Timeout seconds(int sec) { - return new Timeout(sec, TimeUnit.SECONDS); - } - - /** - * Construct a new timeout, expressed in (possibly fractional) seconds. - * - * @param sec - * number of seconds to wait. - * @return the timeout. - */ - public static Timeout seconds(double sec) { - return new Timeout((long) (sec * 1000), TimeUnit.MILLISECONDS); - } - - /** - * Obtain a timeout from the configuration. - * - * @param cfg - * configuration to read. - * @param section - * section key to read. - * @param subsection - * subsection to read, may be null. - * @param name - * variable to read. - * @param defaultValue - * default to return if no timeout is specified in the - * configuration. - * @return the configured timeout. - */ - public static Timeout getTimeout(Config cfg, String section, - String subsection, String name, Timeout defaultValue) { - String valStr = cfg.getString(section, subsection, name); - if (valStr == null) - return defaultValue; - - valStr = valStr.trim(); - if (valStr.length() == 0) - return defaultValue; - - Matcher m = matcher("^([1-9][0-9]*(?:\\.[0-9]*)?)\\s*(.*)$", valStr); - if (!m.matches()) - throw notTimeUnit(section, subsection, name, valStr); - - String digits = m.group(1); - String unitName = m.group(2).trim(); - - long multiplier; - TimeUnit unit; - if ("".equals(unitName)) { - multiplier = 1; - unit = TimeUnit.MILLISECONDS; - - } else if (anyOf(unitName, "ms", "millisecond", "milliseconds")) { - multiplier = 1; - unit = TimeUnit.MILLISECONDS; - - } else if (anyOf(unitName, "s", "sec", "second", "seconds")) { - multiplier = 1; - unit = TimeUnit.SECONDS; - - } else if (anyOf(unitName, "m", "min", "minute", "minutes")) { - multiplier = 60; - unit = TimeUnit.SECONDS; - - } else if (anyOf(unitName, "h", "hr", "hour", "hours")) { - multiplier = 3600; - unit = TimeUnit.SECONDS; - - } else - throw notTimeUnit(section, subsection, name, valStr); - - if (digits.indexOf('.') == -1) { - try { - return new Timeout(multiplier * Long.parseLong(digits), unit); - } catch (NumberFormatException nfe) { - throw notTimeUnit(section, subsection, name, valStr); - } - } else { - double inputTime; - try { - inputTime = multiplier * Double.parseDouble(digits); - } catch (NumberFormatException nfe) { - throw notTimeUnit(section, subsection, name, valStr); - } - - if (unit == TimeUnit.MILLISECONDS) { - TimeUnit newUnit = TimeUnit.NANOSECONDS; - long t = (long) (inputTime * newUnit.convert(1, unit)); - return new Timeout(t, newUnit); - - } else if (unit == TimeUnit.SECONDS && multiplier == 1) { - TimeUnit newUnit = TimeUnit.MILLISECONDS; - long t = (long) (inputTime * newUnit.convert(1, unit)); - return new Timeout(t, newUnit); - - } else { - return new Timeout((long) inputTime, unit); - } - } - } - - private static Matcher matcher(String pattern, String valStr) { - return Pattern.compile(pattern).matcher(valStr); - } - - private static boolean anyOf(String a, String... cases) { - for (String b : cases) { - if (StringUtils.equalsIgnoreCase(a, b)) - return true; - } - return false; - } - - private static IllegalArgumentException notTimeUnit(String section, - String subsection, String name, String valueString) { - String key = section - + (subsection != null ? "." + subsection : "") - + "." + name; - return new IllegalArgumentException(MessageFormat.format( - DhtText.get().notTimeUnit, key, valueString)); - } - - private final long time; - - private final TimeUnit unit; - - /** - * Construct a new timeout. - * - * @param time - * how long to wait. - * @param unit - * the unit that {@code time} was expressed in. - */ - public Timeout(long time, TimeUnit unit) { - this.time = time; - this.unit = unit; - } - - /** @return how long to wait, expressed as {@link #getUnit()}s. */ - public long getTime() { - return time; - } - - /** @return the unit of measure for {@link #getTime()}. */ - public TimeUnit getUnit() { - return unit; - } - - @Override - public int hashCode() { - return (int) time; - } - - @Override - public boolean equals(Object other) { - if (other instanceof Timeout) - return getTime() == ((Timeout) other).getTime() - && getUnit().equals(((Timeout) other).getUnit()); - return false; - } - - @Override - public String toString() { - return getTime() + " " + getUnit(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java deleted file mode 100644 index db0fded3f2..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ChunkTable.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.PackChunk; -import org.eclipse.jgit.storage.dht.StreamingCallback; - -/** - * Stores object data in compressed pack format. - * <p> - * Each chunk stores multiple objects, using the highly compressed and Git - * native pack file format. Chunks are sized during insertion, but average - * around 1 MB for historical chunks, and may be as small as a few KB for very - * recent chunks that were written in small bursts. - * <p> - * Objects whose compressed form is too large to fit into a single chunk are - * fragmented across multiple chunks, and the fragment information is used to - * put them back together in the correct order. Since the fragmenting occurs - * after data compression, random access to bytes of the large object is not - * currently possible. - * <p> - * Chunk keys are very well distributed, by embedding a uniformly random number - * at the start of the key, and also including a small time component. This - * layout permits chunks to be evenly spread across a cluster of disks or - * servers in a round-robin fashion (based on a hash of the leading bytes), but - * also offers some chance for older chunks to be located near each other and - * have that part of the storage system see less activity over time. - */ -public interface ChunkTable { - /** - * Asynchronously load one or more chunks - * <p> - * Callers are responsible for breaking up very large collections of chunk - * keys into smaller units, based on the reader's batch size option. Since - * chunks typically 1 MB each, 10-20 keys is a reasonable batch size, but - * depends on available JVM memory and performance of this method obtaining - * chunks from the database. - * - * @param options - * options to control reading. - * @param keys - * the chunk keys to obtain. - * @param callback - * receives the results when ready. If this is an instance of - * {@link StreamingCallback}, implementors should try to deliver - * results early. - */ - public void get(Context options, Set<ChunkKey> keys, - AsyncCallback<Collection<PackChunk.Members>> callback); - - /** - * Asynchronously load one or more chunk meta fields. - * <p> - * Usually meta is loaded by {@link #get(Context, Set, AsyncCallback)}, but - * some uses may require looking up the fragment data without having the - * entire chunk. - * - * @param options - * options to control reading. - * @param keys - * the chunk keys to obtain. - * @param callback - * receives the results when ready. If this is an instance of - * {@link StreamingCallback}, implementors should try to deliver - * results early. - */ - public void getMeta(Context options, Set<ChunkKey> keys, - AsyncCallback<Map<ChunkKey, ChunkMeta>> callback); - - /** - * Put some (or all) of a single chunk. - * <p> - * The higher level storage layer typically stores chunks in pieces. Its - * common to first store the data, then much later store the fragments and - * index. Sometimes all of the members are ready at once, and can be put - * together as a single unit. This method handles both approaches to storing - * a chunk. - * <p> - * Implementors must use a partial writing approach, for example: - * - * <pre> - * ColumnUpdateList list = ...; - * if (chunk.getChunkData() != null) - * list.addColumn("chunk_data", chunk.getChunkData()); - * if (chunk.getChunkIndex() != null) - * list.addColumn("chunk_index", chunk.getChunkIndex()); - * if (chunk.getFragments() != null) - * list.addColumn("fragments", chunk.getFragments()); - * createOrUpdateRow(chunk.getChunkKey(), list); - * </pre> - * - * @param chunk - * description of the chunk to be stored. - * @param buffer - * buffer to enqueue the put onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void put(PackChunk.Members chunk, WriteBuffer buffer) - throws DhtException; - - /** - * Completely remove a chunk and all of its data elements. - * <p> - * Chunk removal should occur as quickly as possible after the flush has - * completed, as the caller has already ensured the chunk is not in use. - * - * @param key - * key of the chunk to remove. - * @param buffer - * buffer to enqueue the remove onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java deleted file mode 100644 index b0e7ff4874..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Context.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -/** - * Options used when accessing a {@link Database}. - * <p> - * <i>Warning:</i> This type may change from enumeration to class in the future. - */ -public enum Context { - /** Perform a fast read, but may miss results. */ - FAST_MISSING_OK, - - /** Read from a local replica. */ - LOCAL, - - /** Repair the local replica if a read failed. */ - READ_REPAIR; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java deleted file mode 100644 index fbad5d80e8..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/Database.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -/** - * A distributed database implementation. - * <p> - * A DHT provider must implement this interface to return table references for - * each of the named tables. The database and the tables it returns are held as - * singletons, and thus must be thread-safe. If the underlying implementation - * needs to use individual "connections" for each operation, it is responsible - * for setting up a connection pool, borrowing and returning resources within - * each of the table APIs. - * <p> - * Most APIs on the tables are asynchronous and must perform their computation - * in the background using a different thread than the caller. Implementations - * that have only an underlying synchronous API should configure and use an - * {@link java.util.concurrent.ExecutorService} to perform computation in the - * background on a thread pool. - * <p> - * Tables returned by these methods should be singletons, as the higher level - * DHT implementation usually invokes these methods each time it needs to use a - * given table. The suggested implementation approach is: - * - * <pre> - * class MyDatabase implements Database { - * private final RepositoryIndexTable rep = new MyRepositoryIndex(); - * - * private final RefTable ref = new MyRefTable(); - * - * public RepositoryIndexTable repositoryIndex() { - * return rep; - * } - * - * public RefTable ref() { - * return ref; - * } - * } - * </pre> - */ -public interface Database { - /** @return a handle to the table listing known repositories. */ - public RepositoryIndexTable repositoryIndex(); - - /** @return a handle to the table storing repository metadata. */ - public RepositoryTable repository(); - - /** @return a handle to the table listing references in a repository. */ - public RefTable ref(); - - /** @return a handle to the table listing known objects. */ - public ObjectIndexTable objectIndex(); - - /** @return a handle to the table listing pack data chunks. */ - public ChunkTable chunk(); - - /** - * Create a new WriteBuffer for the current thread. - * <p> - * Unlike other methods on this interface, the returned buffer <b>must</b> - * be a new object on every invocation. Buffers do not need to be - * thread-safe. - * - * @return a new buffer to handle pending writes. - */ - public WriteBuffer newWriteBuffer(); -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java deleted file mode 100644 index 9245815f69..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/ObjectIndexTable.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; - -import org.eclipse.jgit.lib.ObjectId; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.ObjectIndexKey; -import org.eclipse.jgit.storage.dht.ObjectInfo; - -/** - * Associates an {@link ObjectId} to the {@link ChunkKey} its stored in. - * <p> - * This table provides a global index listing every single object within the - * repository, and which chunks the object can be found it. Readers use this - * table to find an object when they are forced to start from a bare SHA-1 that - * was input by a user, or supplied over the network from a client. - */ -public interface ObjectIndexTable { - /** - * Asynchronously locate one or more objects in the repository. - * <p> - * Callers are responsible for breaking up very large collections of objects - * into smaller units, based on the reader's batch size option. 1,000 to - * 10,000 is a reasonable range for the reader to batch on. - * - * @param options - * options to control reading. - * @param objects - * set of object names to locate the chunks of. - * @param callback - * receives the results when ready. - */ - public void get(Context options, Set<ObjectIndexKey> objects, - AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback); - - /** - * Record the fact that {@code objId} can be found by {@code info}. - * <p> - * If there is already data for {@code objId} in the table, this method - * should add the new chunk onto the existing data list. - * <p> - * This method should use batched asynchronous puts as much as possible. - * Initial imports of an existing repository may require millions of add - * operations to this table, one for each object being imported. - * - * @param objId - * the unique ObjectId. - * @param info - * a chunk that is known to store {@code objId}. - * @param buffer - * buffer to enqueue the put onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer) - throws DhtException; - - /** - * Remove a single chunk from an object. - * <p> - * If this is the last remaining chunk for the object, the object should - * also be removed from the table. Removal can be deferred, or can occur - * immediately. That is, {@code get()} may return the object with an empty - * collection, but to prevent unlimited disk usage the database should - * eventually remove the object. - * - * @param objId - * the unique ObjectId. - * @param chunk - * the chunk that needs to be removed from this object. - * @param buffer - * buffer to enqueue the remove onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) - throws DhtException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java deleted file mode 100644 index b46ca0b5a6..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RefTable.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RefDataUtil; -import org.eclipse.jgit.storage.dht.RefKey; -import org.eclipse.jgit.storage.dht.RepositoryKey; - -/** - * Tracks all branches and tags for a repository. - * <p> - * Each repository has one or more references, pointing to the most recent - * revision on that branch, or to the tagged revision if its a tag. - */ -public interface RefTable { - /** - * Read all known references in the repository. - * - * @param options - * options to control reading. - * @param repository - * the repository to load the references from. - * @return map of all references. Empty map if there are no references. - * @throws DhtException - * the database cannot be read. - * @throws TimeoutException - * the operation to read the database timed out. - */ - public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository) - throws DhtException, TimeoutException; - - /** - * Compare a reference, and delete if it matches. - * - * @param refKey - * reference to delete. - * @param oldData - * the old data for the reference. The delete only occurs if the - * value is still equal to {@code oldData}. - * @return true if the delete was successful; false if the current value - * does not match {@code oldData}. - * @throws DhtException - * the database cannot be updated. - * @throws TimeoutException - * the operation to modify the database timed out. - */ - public boolean compareAndRemove(RefKey refKey, RefData oldData) - throws DhtException, TimeoutException; - - /** - * Compare a reference, and put if it matches. - * - * @param refKey - * reference to create or replace. - * @param oldData - * the old data for the reference. The put only occurs if the - * value is still equal to {@code oldData}. Use - * {@link RefDataUtil#NONE} if the reference should not exist and - * is being created. - * @param newData - * new value to store. - * @return true if the put was successful; false if the current value does - * not match {@code prior}. - * @throws DhtException - * the database cannot be updated. - * @throws TimeoutException - * the operation to modify the database timed out. - */ - public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData) - throws DhtException, TimeoutException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java deleted file mode 100644 index 36afd13229..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryIndexTable.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.RepositoryName; - -/** - * Maps a repository name from a URL, to the internal {@link RepositoryKey}. - * <p> - * The internal identifier is used for all data storage, as its part of the row - * keys for each data row that makes up the repository. By using an internal - * key, repositories can be efficiently renamed in O(1) time, without changing - * existing data rows. - */ -public interface RepositoryIndexTable { - /** - * Find a repository by name. - * - * @param name - * name of the repository, from the URL. - * @return the internal key; null if not found. - * @throws DhtException - * @throws TimeoutException - */ - public RepositoryKey get(RepositoryName name) throws DhtException, - TimeoutException; - - /** - * Atomically record the association of name to identifier. - * <p> - * This method must use some sort of transaction system to ensure the name - * either points at {@code key} when complete, or fails fast with an - * exception if the name is used by a different key. This may require - * running some sort of lock management service in parallel to the database. - * - * @param name - * name of the repository. - * @param key - * internal key used to find the repository's data. - * @throws DhtException - * @throws TimeoutException - */ - public void putUnique(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException; - - /** - * Remove the association of a name to an identifier. - * <p> - * This method must use some sort of transaction system to ensure the name - * is removed only if it currently references {@code key}. This may require - * running some sort of lock management service in parallel to the database. - * - * @param name - * name of the repository. - * @param key - * internal key defining the repository. - * @throws DhtException - * @throws TimeoutException - */ - public void remove(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java deleted file mode 100644 index 8f2dab83ed..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/RepositoryTable.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import java.util.Collection; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.storage.dht.CachedPackKey; -import org.eclipse.jgit.storage.dht.ChunkInfo; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RepositoryKey; - -/** - * Tracks high-level information about all known repositories. - */ -public interface RepositoryTable { - /** - * Generate a new unique RepositoryKey. - * - * @return a new unique key. - * @throws DhtException - * keys cannot be generated at this time. - */ - public RepositoryKey nextKey() throws DhtException; - - /** - * Record the existence of a chunk. - * - * @param repo - * repository owning the chunk. - * @param info - * information about the chunk. - * @param buffer - * buffer to enqueue the put onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) - throws DhtException; - - /** - * Remove the information about a chunk. - * - * @param repo - * repository owning the chunk. - * @param chunk - * the chunk that needs to be deleted. - * @param buffer - * buffer to enqueue the remove onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) - throws DhtException; - - /** - * Get the cached packs, if any. - * - * @param repo - * repository owning the packs. - * @return cached pack descriptions. - * @throws DhtException - * @throws TimeoutException - */ - public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo) - throws DhtException, TimeoutException; - - /** - * Record the existence of a cached pack. - * - * @param repo - * repository owning the pack. - * @param info - * information about the pack. - * @param buffer - * buffer to enqueue the put onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) - throws DhtException; - - /** - * Remove the existence of a cached pack. - * - * @param repo - * repository owning the pack. - * @param key - * information about the pack. - * @param buffer - * buffer to enqueue the put onto. - * @throws DhtException - * if the buffer flushed and an enqueued operation failed. - */ - public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) - throws DhtException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java deleted file mode 100644 index 5521ec2fb8..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/WriteBuffer.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi; - -import org.eclipse.jgit.storage.dht.DhtException; - -/** Potentially buffers writes until full, or until flush. */ -public interface WriteBuffer { - /** - * Flush any pending writes, and wait for them to complete. - * - * @throws DhtException - * one or more writes failed. As writes may occur in any order, - * the exact state of the database is unspecified. - */ - public void flush() throws DhtException; - - /** - * Abort pending writes, and wait for acknowledgment. - * <p> - * Once a buffer has been aborted, it cannot be reused. Application code - * must discard the buffer instance and use a different buffer to issue - * subsequent operations. - * <p> - * If writes have not been started yet, they should be discarded and not - * submitted to the storage system. - * <p> - * If writes have already been started asynchronously in the background, - * this method may try to cancel them, but must wait for the operation to - * either complete or abort before returning. This allows callers to clean - * up by scanning the storage system and making corrections to clean up any - * partial writes. - * - * @throws DhtException - * one or more already started writes failed. - */ - public void abort() throws DhtException; -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java deleted file mode 100644 index 4eb26bd0d8..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheBuffer.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import static java.util.Collections.singleton; - -import java.util.ArrayList; -import java.util.List; - -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.Sync; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change; -import org.eclipse.jgit.storage.dht.spi.util.AbstractWriteBuffer; - -/** WriteBuffer implementation for a {@link CacheDatabase}. */ -public class CacheBuffer extends AbstractWriteBuffer { - private final WriteBuffer dbBuffer; - - private final CacheService client; - - private final Sync<Void> none; - - private List<CacheService.Change> pending; - - private List<CacheService.Change> afterFlush; - - /** - * Initialize a new buffer. - * - * @param dbBuffer - * the underlying database's own buffer. - * @param client - * connection to the cache service. - * @param options - * options controlling cache operations. - */ - public CacheBuffer(WriteBuffer dbBuffer, CacheService client, - CacheOptions options) { - super(null, options.getWriteBufferSize()); - this.dbBuffer = dbBuffer; - this.client = client; - this.none = Sync.none(); - } - - /** - * Schedule removal of a key from the cache. - * <p> - * Unlike {@link #removeAfterFlush(CacheKey)}, these removals can be flushed - * when the cache buffer is full, potentially before any corresponding - * removal is written to the underlying database. - * - * @param key - * key to remove. - * @throws DhtException - * a prior flush failed. - */ - public void remove(CacheKey key) throws DhtException { - modify(CacheService.Change.remove(key)); - } - - /** - * Schedule a removal only after the underlying database flushes. - * <p> - * Unlike {@link #remove(CacheKey)}, these removals are buffered until the - * application calls {@link #flush()} and aren't sent to the cache service - * until after the underlying database flush() operation is completed - * successfully. - * - * @param key - * key to remove. - */ - public void removeAfterFlush(CacheKey key) { - if (afterFlush == null) - afterFlush = newList(); - afterFlush.add(CacheService.Change.remove(key)); - } - - /** - * Schedule storing (or replacing) a key in the cache. - * - * @param key - * key to store. - * @param value - * new value to store. - * @throws DhtException - * a prior flush failed. - */ - public void put(CacheKey key, byte[] value) throws DhtException { - modify(CacheService.Change.put(key, value)); - } - - /** - * Schedule any cache change. - * - * @param op - * the cache operation. - * @throws DhtException - * a prior flush failed. - */ - public void modify(CacheService.Change op) throws DhtException { - int sz = op.getKey().getBytes().length; - if (op.getData() != null) - sz += op.getData().length; - if (add(sz)) { - if (pending == null) - pending = newList(); - pending.add(op); - queued(sz); - } else { - client.modify(singleton(op), wrap(none, sz)); - } - } - - /** @return the underlying database's own write buffer. */ - public WriteBuffer getWriteBuffer() { - return dbBuffer; - } - - @Override - protected void startQueuedOperations(int bytes) throws DhtException { - client.modify(pending, wrap(none, bytes)); - pending = null; - } - - public void flush() throws DhtException { - dbBuffer.flush(); - - if (afterFlush != null) { - for (CacheService.Change op : afterFlush) - modify(op); - afterFlush = null; - } - - super.flush(); - } - - @Override - public void abort() throws DhtException { - pending = null; - afterFlush = null; - - dbBuffer.abort(); - super.abort(); - } - - private static List<Change> newList() { - return new ArrayList<CacheService.Change>(); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java deleted file mode 100644 index b7f94fd6c8..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheChunkTable.java +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import static java.util.Collections.singleton; -import static java.util.Collections.singletonMap; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutorService; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.PackChunk; -import org.eclipse.jgit.storage.dht.StreamingCallback; -import org.eclipse.jgit.storage.dht.Sync; -import org.eclipse.jgit.storage.dht.spi.ChunkTable; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change; - -import com.google.protobuf.CodedInputStream; -import com.google.protobuf.CodedOutputStream; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.WireFormat; - -/** Cache wrapper around ChunkTable. */ -public class CacheChunkTable implements ChunkTable { - private final ChunkTable db; - - private final ExecutorService executor; - - private final CacheService client; - - private final Sync<Void> none; - - private final Namespace nsChunk = Namespace.CHUNK; - - private final Namespace nsMeta = Namespace.CHUNK_META; - - /** - * Initialize a new wrapper. - * - * @param dbTable - * the underlying database's corresponding table. - * @param cacheDatabase - * the cache database. - */ - public CacheChunkTable(ChunkTable dbTable, CacheDatabase cacheDatabase) { - this.db = dbTable; - this.executor = cacheDatabase.getExecutorService(); - this.client = cacheDatabase.getClient(); - this.none = Sync.none(); - } - - public void get(Context options, Set<ChunkKey> keys, - AsyncCallback<Collection<PackChunk.Members>> callback) { - List<CacheKey> toFind = new ArrayList<CacheKey>(keys.size()); - for (ChunkKey k : keys) - toFind.add(nsChunk.key(k)); - client.get(toFind, new ChunkFromCache(options, keys, callback)); - } - - public void getMeta(Context options, Set<ChunkKey> keys, - AsyncCallback<Map<ChunkKey, ChunkMeta>> callback) { - List<CacheKey> toFind = new ArrayList<CacheKey>(keys.size()); - for (ChunkKey k : keys) - toFind.add(nsMeta.key(k)); - client.get(toFind, new MetaFromCache(options, keys, callback)); - } - - public void put(PackChunk.Members chunk, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.put(chunk, buf.getWriteBuffer()); - - // Only store fragmented meta. This is all callers should ask for. - if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0) { - buf.put(nsMeta.key(chunk.getChunkKey()), - chunk.getMeta().toByteArray()); - } - - if (chunk.hasChunkData()) - buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk)); - else - buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey())); - } - - public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - buf.remove(nsChunk.key(key)); - buf.remove(nsMeta.key(key)); - db.remove(key, buf.getWriteBuffer()); - } - - private static byte[] encode(PackChunk.Members members) { - // Its too slow to encode ByteBuffer through the standard code. - // Since the message is only 3 fields, do it by hand. - ByteBuffer data = members.getChunkDataAsByteBuffer(); - ByteBuffer index = members.getChunkIndexAsByteBuffer(); - ChunkMeta meta = members.getMeta(); - - int sz = 0; - if (data != null) - sz += computeByteBufferSize(1, data); - if (index != null) - sz += computeByteBufferSize(2, index); - if (meta != null) - sz += CodedOutputStream.computeMessageSize(3, meta); - - byte[] r = new byte[sz]; - CodedOutputStream out = CodedOutputStream.newInstance(r); - try { - if (data != null) - writeByteBuffer(out, 1, data); - if (index != null) - writeByteBuffer(out, 2, index); - if (meta != null) - out.writeMessage(3, meta); - } catch (IOException err) { - throw new RuntimeException("Cannot buffer chunk", err); - } - return r; - } - - private static int computeByteBufferSize(int fieldNumber, ByteBuffer data) { - int n = data.remaining(); - return CodedOutputStream.computeTagSize(fieldNumber) - + CodedOutputStream.computeRawVarint32Size(n) - + n; - } - - private static void writeByteBuffer(CodedOutputStream out, int fieldNumber, - ByteBuffer data) throws IOException { - byte[] d = data.array(); - int p = data.arrayOffset() + data.position(); - int n = data.remaining(); - out.writeTag(fieldNumber, WireFormat.WIRETYPE_LENGTH_DELIMITED); - out.writeRawVarint32(n); - out.writeRawBytes(d, p, n); - } - - private static PackChunk.Members decode(ChunkKey key, byte[] raw) { - PackChunk.Members members = new PackChunk.Members(); - members.setChunkKey(key); - - // Its too slow to convert using the standard code, as copies - // are made. Instead find offsets in the stream and use that. - CodedInputStream in = CodedInputStream.newInstance(raw); - try { - int tag = in.readTag(); - for (;;) { - switch (WireFormat.getTagFieldNumber(tag)) { - case 0: - return members; - case 1: { - int cnt = in.readRawVarint32(); - int ptr = in.getTotalBytesRead(); - members.setChunkData(raw, ptr, cnt); - in.skipRawBytes(cnt); - tag = in.readTag(); - if (WireFormat.getTagFieldNumber(tag) != 2) - continue; - } - //$FALL-THROUGH$ - case 2: { - int cnt = in.readRawVarint32(); - int ptr = in.getTotalBytesRead(); - members.setChunkIndex(raw, ptr, cnt); - in.skipRawBytes(cnt); - tag = in.readTag(); - if (WireFormat.getTagFieldNumber(tag) != 3) - continue; - } - //$FALL-THROUGH$ - case 3: { - int cnt = in.readRawVarint32(); - int oldLimit = in.pushLimit(cnt); - members.setMeta(ChunkMeta.parseFrom(in)); - in.popLimit(oldLimit); - tag = in.readTag(); - continue; - } - default: - in.skipField(tag); - } - } - } catch (IOException err) { - throw new RuntimeException("Cannot decode chunk", err); - } - } - - private class ChunkFromCache implements - StreamingCallback<Map<CacheKey, byte[]>> { - private final Object lock = new Object(); - - private final Context options; - - private final Set<ChunkKey> remaining; - - private final AsyncCallback<Collection<PackChunk.Members>> normalCallback; - - private final StreamingCallback<Collection<PackChunk.Members>> streamingCallback; - - private final List<PackChunk.Members> all; - - ChunkFromCache(Context options, Set<ChunkKey> keys, - AsyncCallback<Collection<PackChunk.Members>> callback) { - this.options = options; - this.remaining = new HashSet<ChunkKey>(keys); - this.normalCallback = callback; - - if (callback instanceof StreamingCallback<?>) { - streamingCallback = (StreamingCallback<Collection<PackChunk.Members>>) callback; - all = null; - } else { - streamingCallback = null; - all = new ArrayList<PackChunk.Members>(keys.size()); - } - } - - public void onPartialResult(Map<CacheKey, byte[]> result) { - for (Map.Entry<CacheKey, byte[]> ent : result.entrySet()) { - ChunkKey key = ChunkKey.fromBytes(ent.getKey().getBytes()); - PackChunk.Members members = decode(key, ent.getValue()); - - if (streamingCallback != null) { - streamingCallback.onPartialResult(singleton(members)); - - synchronized (lock) { - remaining.remove(key); - } - } else { - synchronized (lock) { - all.add(members); - remaining.remove(key); - } - } - } - } - - public void onSuccess(Map<CacheKey, byte[]> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) { - normalCallback.onSuccess(all); - } else { - db.get(options, remaining, new ChunkFromDatabase(all, - normalCallback, streamingCallback)); - } - } - } - - public void onFailure(DhtException error) { - // TODO(spearce) We may want to just drop to database here. - normalCallback.onFailure(error); - } - } - - private class ChunkFromDatabase implements - StreamingCallback<Collection<PackChunk.Members>> { - private final Object lock = new Object(); - - private final List<PackChunk.Members> all; - - private final AsyncCallback<Collection<PackChunk.Members>> normalCallback; - - private final StreamingCallback<Collection<PackChunk.Members>> streamingCallback; - - ChunkFromDatabase( - List<PackChunk.Members> all, - AsyncCallback<Collection<PackChunk.Members>> normalCallback, - StreamingCallback<Collection<PackChunk.Members>> streamingCallback) { - this.all = all; - this.normalCallback = normalCallback; - this.streamingCallback = streamingCallback; - } - - public void onPartialResult(Collection<PackChunk.Members> result) { - final List<PackChunk.Members> toPutIntoCache = copy(result); - - if (streamingCallback != null) - streamingCallback.onPartialResult(result); - else { - synchronized (lock) { - all.addAll(result); - } - } - - // Encoding is rather expensive, so move the cache population - // into it a different background thread to prevent the current - // database task from being starved of time. - // - executor.submit(new Runnable() { - public void run() { - for (PackChunk.Members members : toPutIntoCache) { - ChunkKey key = members.getChunkKey(); - Change op = Change.put(nsChunk.key(key), encode(members)); - client.modify(singleton(op), none); - } - } - }); - } - - private <T> List<T> copy(Collection<T> result) { - return new ArrayList<T>(result); - } - - public void onSuccess(Collection<PackChunk.Members> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - normalCallback.onSuccess(all); - } - } - - public void onFailure(DhtException error) { - normalCallback.onFailure(error); - } - } - - private class MetaFromCache implements - StreamingCallback<Map<CacheKey, byte[]>> { - private final Object lock = new Object(); - - private final Context options; - - private final Set<ChunkKey> remaining; - - private final AsyncCallback<Map<ChunkKey, ChunkMeta>> normalCallback; - - private final StreamingCallback<Map<ChunkKey, ChunkMeta>> streamingCallback; - - private final Map<ChunkKey, ChunkMeta> all; - - MetaFromCache(Context options, Set<ChunkKey> keys, - AsyncCallback<Map<ChunkKey, ChunkMeta>> callback) { - this.options = options; - this.remaining = new HashSet<ChunkKey>(keys); - this.normalCallback = callback; - - if (callback instanceof StreamingCallback<?>) { - streamingCallback = (StreamingCallback<Map<ChunkKey, ChunkMeta>>) callback; - all = null; - } else { - streamingCallback = null; - all = new HashMap<ChunkKey, ChunkMeta>(); - } - } - - public void onPartialResult(Map<CacheKey, byte[]> result) { - for (Map.Entry<CacheKey, byte[]> ent : result.entrySet()) { - ChunkKey key = ChunkKey.fromBytes(ent.getKey().getBytes()); - ChunkMeta meta; - try { - meta = ChunkMeta.parseFrom(ent.getValue()); - } catch (InvalidProtocolBufferException e) { - // Invalid meta message, remove the cell from cache. - client.modify(singleton(Change.remove(ent.getKey())), - Sync.<Void> none()); - continue; - } - - if (streamingCallback != null) { - streamingCallback.onPartialResult(singletonMap(key, meta)); - - synchronized (lock) { - remaining.remove(key); - } - } else { - synchronized (lock) { - all.put(key, meta); - remaining.remove(key); - } - } - } - } - - public void onSuccess(Map<CacheKey, byte[]> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) { - normalCallback.onSuccess(all); - } else { - db.getMeta(options, remaining, new MetaFromDatabase(all, - normalCallback, streamingCallback)); - } - } - } - - public void onFailure(DhtException error) { - // TODO(spearce) We may want to just drop to database here. - normalCallback.onFailure(error); - } - } - - private class MetaFromDatabase implements - StreamingCallback<Map<ChunkKey, ChunkMeta>> { - private final Object lock = new Object(); - - private final Map<ChunkKey, ChunkMeta> all; - - private final AsyncCallback<Map<ChunkKey, ChunkMeta>> normalCallback; - - private final StreamingCallback<Map<ChunkKey, ChunkMeta>> streamingCallback; - - MetaFromDatabase(Map<ChunkKey, ChunkMeta> all, - AsyncCallback<Map<ChunkKey, ChunkMeta>> normalCallback, - StreamingCallback<Map<ChunkKey, ChunkMeta>> streamingCallback) { - this.all = all; - this.normalCallback = normalCallback; - this.streamingCallback = streamingCallback; - } - - public void onPartialResult(Map<ChunkKey, ChunkMeta> result) { - final Map<ChunkKey, ChunkMeta> toPutIntoCache = copy(result); - - if (streamingCallback != null) - streamingCallback.onPartialResult(result); - else { - synchronized (lock) { - all.putAll(result); - } - } - - // Encoding is rather expensive, so move the cache population - // into it a different background thread to prevent the current - // database task from being starved of time. - // - executor.submit(new Runnable() { - public void run() { - for (Map.Entry<ChunkKey, ChunkMeta> ent - : toPutIntoCache.entrySet()) { - ChunkKey key = ent.getKey(); - Change op = Change.put(nsMeta.key(key), - ent.getValue().toByteArray()); - client.modify(singleton(op), none); - } - } - }); - } - - private <K, V> Map<K, V> copy(Map<K, V> result) { - return new HashMap<K, V>(result); - } - - public void onSuccess(Map<ChunkKey, ChunkMeta> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - normalCallback.onSuccess(all); - } - } - - public void onFailure(DhtException error) { - normalCallback.onFailure(error); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java deleted file mode 100644 index da3ea5fd4c..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheDatabase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.concurrent.ExecutorService; - -import org.eclipse.jgit.storage.dht.spi.ChunkTable; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.dht.spi.RefTable; -import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable; -import org.eclipse.jgit.storage.dht.spi.RepositoryTable; - -/** - * Uses a cache for fast-lookups, but falls-back to another Database. - * <p> - * On a read miss, this database falls back to read another Database, and then - * puts the read value into the cache for later access. - */ -public class CacheDatabase implements Database { - private final Database database; - - private final ExecutorService executorService; - - private final CacheService client; - - private final CacheOptions options; - - private final CacheRepositoryIndexTable repositoryIndex; - - private final CacheRepositoryTable repository; - - private final CacheRefTable ref; - - private final CacheObjectIndexTable objectIndex; - - private final CacheChunkTable chunk; - - /** - * Initialize a cache database. - * - * @param database - * underlying storage database, used for read-misses and all - * writes. - * @param executor - * executor service to perform expensive cache updates in the - * background. - * @param client - * implementation of the cache service. - * @param options - * configuration of the cache. - */ - public CacheDatabase(Database database, ExecutorService executor, - CacheService client, CacheOptions options) { - this.database = database; - this.executorService = executor; - this.client = client; - this.options = options; - - repositoryIndex = new CacheRepositoryIndexTable(database - .repositoryIndex(), this); - - repository = new CacheRepositoryTable(database.repository(), this); - ref = new CacheRefTable(database.ref(), this); - objectIndex = new CacheObjectIndexTable(database.objectIndex(), this); - chunk = new CacheChunkTable(database.chunk(), this); - } - - /** @return the underlying database the cache wraps. */ - public Database getDatabase() { - return database; - } - - /** @return executor pool for long operations. */ - public ExecutorService getExecutorService() { - return executorService; - } - - /** @return client connecting to the cache service. */ - public CacheService getClient() { - return client; - } - - /** @return connection options for the cache service. */ - public CacheOptions getOptions() { - return options; - } - - public RepositoryIndexTable repositoryIndex() { - return repositoryIndex; - } - - public RepositoryTable repository() { - return repository; - } - - public RefTable ref() { - return ref; - } - - public ObjectIndexTable objectIndex() { - return objectIndex; - } - - public ChunkTable chunk() { - return chunk; - } - - public CacheBuffer newWriteBuffer() { - return new CacheBuffer(database.newWriteBuffer(), client, options); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java deleted file mode 100644 index 67c6c0ff08..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheKey.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.Arrays; - -import org.eclipse.jgit.storage.dht.RowKey; -import org.eclipse.jgit.util.RawParseUtils; - -/** Simple byte array based key for cache storage. */ -public class CacheKey { - private final Namespace ns; - - private final byte[] key; - - private volatile int hashCode; - - /** - * Wrap a database key. - * - * @param ns - * the namespace the key is contained within. - * @param key - * the key to wrap. - */ - public CacheKey(Namespace ns, RowKey key) { - this(ns, key.asBytes()); - } - - /** - * Wrap a byte array. - * - * @param ns - * the namespace the key is contained within. - * @param key - * the key to wrap. - */ - public CacheKey(Namespace ns, byte[] key) { - this.ns = ns; - this.key = key; - } - - /** @return namespace to segregate keys by. */ - public Namespace getNamespace() { - return ns; - } - - /** @return this key's bytes, within {@link #getNamespace()}. */ - public byte[] getBytes() { - return key; - } - - @Override - public int hashCode() { - if (hashCode == 0) { - int h = 5381; - for (int ptr = 0; ptr < key.length; ptr++) - h = ((h << 5) + h) + (key[ptr] & 0xff); - if (h == 0) - h = 1; - hashCode = h; - } - return hashCode; - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof CacheKey) { - CacheKey m = (CacheKey) other; - return ns.equals(m.ns) && Arrays.equals(key, m.key); - } - return false; - } - - @Override - public String toString() { - return ns + ":" + RawParseUtils.decode(key); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java deleted file mode 100644 index 0cd3549e04..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheObjectIndexTable.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutorService; - -import org.eclipse.jgit.generated.storage.dht.proto.GitCache.CachedObjectIndex; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.ObjectIndexKey; -import org.eclipse.jgit.storage.dht.ObjectInfo; -import org.eclipse.jgit.storage.dht.StreamingCallback; -import org.eclipse.jgit.storage.dht.Sync; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** Cache wrapper around ObjectIndexTable. */ -public class CacheObjectIndexTable implements ObjectIndexTable { - private final ObjectIndexTable db; - - private final ExecutorService executor; - - private final CacheService client; - - private final Namespace ns = Namespace.OBJECT_INDEX; - - /** - * Initialize a new wrapper. - * - * @param dbTable - * the underlying database's corresponding table. - * @param cacheDatabase - * the cache database. - */ - public CacheObjectIndexTable(ObjectIndexTable dbTable, - CacheDatabase cacheDatabase) { - this.db = dbTable; - this.executor = cacheDatabase.getExecutorService(); - this.client = cacheDatabase.getClient(); - } - - public void get(Context options, Set<ObjectIndexKey> objects, - AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) { - List<CacheKey> toFind = new ArrayList<CacheKey>(objects.size()); - for (ObjectIndexKey k : objects) - toFind.add(ns.key(k)); - client.get(toFind, new LoaderFromCache(options, objects, callback)); - } - - public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer) - throws DhtException { - // During addition, the cache is not populated. This prevents a - // race condition when the cache is cold. Readers need to scan - // the database and ensure the oldest ObjectInfo is loaded into - // the cache in order to allow PackChunk to break delta cycles. - // - // This does have a small performance penalty, as recently added - // objects are often read not long after they were written. But - // without good multi-system transaction support between the - // cache and the underlying storage we cannot do better. - // - db.add(objId, info, ((CacheBuffer) buffer).getWriteBuffer()); - } - - public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.remove(objId, chunk, buf.getWriteBuffer()); - - // TODO This suffers from a race condition. The removal from the - // cache can occur before the database update takes place, and a - // concurrent reader might re-populate the cache with the stale data. - // - buf.remove(ns.key(objId)); - } - - private class LoaderFromCache implements - StreamingCallback<Map<CacheKey, byte[]>> { - private final Object lock = new Object(); - - private final Context options; - - private final Set<ObjectIndexKey> remaining; - - private final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback; - - private final StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback; - - private final Map<ObjectIndexKey, Collection<ObjectInfo>> all; - - LoaderFromCache( - Context options, - Set<ObjectIndexKey> objects, - AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) { - this.options = options; - this.remaining = new HashSet<ObjectIndexKey>(objects); - this.normalCallback = callback; - - if (callback instanceof StreamingCallback<?>) { - streamingCallback = (StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>>) callback; - all = null; - } else { - streamingCallback = null; - all = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>(); - } - } - - public void onPartialResult(Map<CacheKey, byte[]> result) { - Map<ObjectIndexKey, Collection<ObjectInfo>> tmp; - if (streamingCallback != null) - tmp = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>(); - else - tmp = null; - - for (Map.Entry<CacheKey, byte[]> e : result.entrySet()) { - ObjectIndexKey objKey; - Collection<ObjectInfo> list; - try { - list = decode(e.getValue()); - } catch (InvalidProtocolBufferException badCell) { - client.modify( - Collections.singleton(Change.remove(e.getKey())), - Sync.<Void> none()); - continue; - } - objKey = ObjectIndexKey.fromBytes(e.getKey().getBytes()); - - if (tmp != null) - tmp.put(objKey, list); - else { - synchronized (lock) { - all.put(objKey, list); - remaining.remove(objKey); - } - } - } - - if (tmp != null) { - streamingCallback.onPartialResult(tmp); - synchronized (lock) { - remaining.removeAll(tmp.keySet()); - } - } - } - - private Collection<ObjectInfo> decode(byte[] value) - throws InvalidProtocolBufferException { - CachedObjectIndex cacheEntry = CachedObjectIndex.parseFrom(value); - int sz = cacheEntry.getItemCount(); - ObjectInfo[] r = new ObjectInfo[sz]; - for (int i = 0; i < sz; i++) { - CachedObjectIndex.Item item = cacheEntry.getItem(i); - r[i] = new ObjectInfo( - ChunkKey.fromString(item.getChunkKey()), - item.getTime(), - item.getObjectInfo()); - } - return Arrays.asList(r); - } - - public void onSuccess(Map<CacheKey, byte[]> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - if (remaining.isEmpty() || options == Context.FAST_MISSING_OK) { - normalCallback.onSuccess(all); - } else { - db.get(options, remaining, new LoaderFromDatabase(all, - normalCallback, streamingCallback)); - } - } - } - - public void onFailure(DhtException error) { - // TODO(spearce) We may want to just drop to database here. - normalCallback.onFailure(error); - } - } - - private class LoaderFromDatabase implements - StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> { - private final Object lock = new Object(); - - private final Map<ObjectIndexKey, Collection<ObjectInfo>> all; - - private final AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback; - - private final StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback; - - LoaderFromDatabase( - Map<ObjectIndexKey, Collection<ObjectInfo>> all, - AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> normalCallback, - StreamingCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> streamingCallback) { - this.all = all; - this.normalCallback = normalCallback; - this.streamingCallback = streamingCallback; - } - - public void onPartialResult( - Map<ObjectIndexKey, Collection<ObjectInfo>> result) { - final Map<ObjectIndexKey, Collection<ObjectInfo>> toPut = copy(result); - - if (streamingCallback != null) - streamingCallback.onPartialResult(result); - else { - synchronized (lock) { - all.putAll(result); - } - } - - // Encoding is rather expensive, so move the cache population - // into it a different background thread to prevent the current - // database task from being starved of time. - // - executor.submit(new Runnable() { - public void run() { - List<Change> ops = new ArrayList<Change>(toPut.size()); - - for (Map.Entry<ObjectIndexKey, Collection<ObjectInfo>> e : all(toPut)) { - List<ObjectInfo> items = copy(e.getValue()); - ObjectInfo.sort(items); - ops.add(Change.put(ns.key(e.getKey()), encode(items))); - } - - client.modify(ops, Sync.<Void> none()); - } - - private byte[] encode(List<ObjectInfo> items) { - CachedObjectIndex.Builder b; - b = CachedObjectIndex.newBuilder(); - for (ObjectInfo info : items) { - CachedObjectIndex.Item.Builder i = b.addItemBuilder(); - i.setChunkKey(info.getChunkKey().asString()); - i.setObjectInfo(info.getData()); - if (0 < info.getTime()) - i.setTime(info.getTime()); - } - return b.build().toByteArray(); - } - }); - } - - private <K, V> Map<K, V> copy(Map<K, V> map) { - return new HashMap<K, V>(map); - } - - private <T> List<T> copy(Collection<T> result) { - return new ArrayList<T>(result); - } - - private <K, V> Set<Map.Entry<K, V>> all(final Map<K, V> toPut) { - return toPut.entrySet(); - } - - public void onSuccess(Map<ObjectIndexKey, Collection<ObjectInfo>> result) { - if (result != null && !result.isEmpty()) - onPartialResult(result); - - synchronized (lock) { - normalCallback.onSuccess(all); - } - } - - public void onFailure(DhtException error) { - normalCallback.onFailure(error); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java deleted file mode 100644 index 9eef55c3ff..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheOptions.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import org.eclipse.jgit.lib.Config; -import org.eclipse.jgit.storage.dht.Timeout; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; - -/** Options to configure the cache. */ -public class CacheOptions { - private Timeout timeout; - - private int writeBufferSize; - - /** Initialize default options. */ - public CacheOptions() { - setTimeout(Timeout.milliseconds(500)); - setWriteBufferSize(512 * 1024); - } - - /** @return default timeout for all operations. */ - public Timeout getTimeout() { - return timeout; - } - - /** - * Set the default timeout to wait on long operations. - * - * @param maxWaitTime - * new wait time. - * @return {@code this} - */ - public CacheOptions setTimeout(Timeout maxWaitTime) { - if (maxWaitTime == null || maxWaitTime.getTime() < 0) - throw new IllegalArgumentException(); - timeout = maxWaitTime; - return this; - } - - /** @return size in bytes to buffer operations. */ - public int getWriteBufferSize() { - return writeBufferSize; - } - - /** - * Set the maximum number of outstanding bytes in a {@link WriteBuffer}. - * - * @param sizeInBytes - * maximum number of bytes. - * @return {@code this} - */ - public CacheOptions setWriteBufferSize(int sizeInBytes) { - writeBufferSize = Math.max(1024, sizeInBytes); - return this; - } - - /** - * Update properties by setting fields from the configuration. - * <p> - * If a property is not defined in the configuration, then it is left - * unmodified. - * - * @param rc - * configuration to read properties from. - * @return {@code this} - */ - public CacheOptions fromConfig(final Config rc) { - setTimeout(Timeout.getTimeout(rc, "cache", "dht", "timeout", getTimeout())); - setWriteBufferSize(rc.getInt("cache", "dht", "writeBufferSize", getWriteBufferSize())); - return this; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java deleted file mode 100644 index 2b6c8dac31..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRefTable.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RefKey; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.RefTable; - -/** - * Cache wrapper around RefTable. - * <p> - * Currently this is a straight pass-through. - */ -public class CacheRefTable implements RefTable { - private final RefTable db; - - /** - * Initialize a new wrapper. - * - * @param dbTable - * the underlying database's corresponding table. - * @param cacheDatabase - * the cache database. - */ - public CacheRefTable(RefTable dbTable, CacheDatabase cacheDatabase) { - this.db = dbTable; - } - - public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository) - throws DhtException, TimeoutException { - return db.getAll(options, repository); - } - - public boolean compareAndRemove(RefKey refKey, RefData oldData) - throws DhtException, TimeoutException { - return db.compareAndRemove(refKey, oldData); - } - - public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData) - throws DhtException, TimeoutException { - return db.compareAndPut(refKey, oldData, newData); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java deleted file mode 100644 index b50092c6d1..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryIndexTable.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; - -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.RepositoryName; -import org.eclipse.jgit.storage.dht.Sync; -import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable; -import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change; - -/** Cache wrapper around RepositoryIndexTable. */ -public class CacheRepositoryIndexTable implements RepositoryIndexTable { - private final RepositoryIndexTable db; - - private final CacheService client; - - private final CacheOptions options; - - private final Namespace ns; - - private final Sync<Void> none; - - /** - * Initialize a new wrapper. - * - * @param dbTable - * the underlying database's corresponding table. - * @param cacheDatabase - * the cache database. - */ - public CacheRepositoryIndexTable(RepositoryIndexTable dbTable, - CacheDatabase cacheDatabase) { - this.db = dbTable; - this.client = cacheDatabase.getClient(); - this.options = cacheDatabase.getOptions(); - this.ns = Namespace.REPOSITORY_INDEX; - this.none = Sync.none(); - } - - public RepositoryKey get(RepositoryName name) throws DhtException, - TimeoutException { - CacheKey memKey = ns.key(name); - Sync<Map<CacheKey, byte[]>> sync = Sync.create(); - client.get(singleton(memKey), sync); - - Map<CacheKey, byte[]> result; - try { - result = sync.get(options.getTimeout()); - } catch (InterruptedException e) { - throw new TimeoutException(); - } catch (TimeoutException timeout) { - // Fall through and read the database directly. - result = emptyMap(); - } - - byte[] data = result.get(memKey); - if (data != null) { - if (data.length == 0) - return null; - return RepositoryKey.fromBytes(data); - } - - RepositoryKey key = db.get(name); - data = key != null ? key.asBytes() : new byte[0]; - client.modify(singleton(Change.put(memKey, data)), none); - return key; - } - - public void putUnique(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException { - db.putUnique(name, key); - - Sync<Void> sync = Sync.create(); - CacheKey memKey = ns.key(name); - byte[] data = key.asBytes(); - client.modify(singleton(Change.put(memKey, data)), sync); - try { - sync.get(options.getTimeout()); - } catch (InterruptedException e) { - throw new TimeoutException(); - } - } - - public void remove(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException { - db.remove(name, key); - - Sync<Void> sync = Sync.create(); - CacheKey memKey = ns.key(name); - client.modify(singleton(Change.remove(memKey)), sync); - try { - sync.get(options.getTimeout()); - } catch (InterruptedException e) { - throw new TimeoutException(); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java deleted file mode 100644 index a378e0a8b3..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheRepositoryTable.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; - -import java.util.Collection; -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitCache.CachedPackInfoList; -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.storage.dht.CachedPackKey; -import org.eclipse.jgit.storage.dht.ChunkInfo; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.Sync; -import org.eclipse.jgit.storage.dht.spi.RepositoryTable; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.cache.CacheService.Change; - -import com.google.protobuf.InvalidProtocolBufferException; - -/** Cache wrapper around RepositoryTable. */ -public class CacheRepositoryTable implements RepositoryTable { - private final RepositoryTable db; - - private final CacheService client; - - private final CacheOptions options; - - private final Namespace nsCachedPack = Namespace.CACHED_PACK; - - private final Sync<Void> none; - - /** - * Initialize a new wrapper. - * - * @param dbTable - * the underlying database's corresponding table. - * @param cacheDatabase - * the cache database. - */ - public CacheRepositoryTable(RepositoryTable dbTable, - CacheDatabase cacheDatabase) { - this.db = dbTable; - this.client = cacheDatabase.getClient(); - this.options = cacheDatabase.getOptions(); - this.none = Sync.none(); - } - - public RepositoryKey nextKey() throws DhtException { - return db.nextKey(); - } - - public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.put(repo, info, buf.getWriteBuffer()); - } - - public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.remove(repo, chunk, buf.getWriteBuffer()); - } - - public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo) - throws DhtException, TimeoutException { - CacheKey memKey = nsCachedPack.key(repo); - Sync<Map<CacheKey, byte[]>> sync = Sync.create(); - client.get(singleton(memKey), sync); - - Map<CacheKey, byte[]> result; - try { - result = sync.get(options.getTimeout()); - } catch (InterruptedException e) { - throw new TimeoutException(); - } catch (TimeoutException timeout) { - // Fall through and read the database directly. - result = emptyMap(); - } - - byte[] data = result.get(memKey); - if (data != null) { - try { - return CachedPackInfoList.parseFrom(data).getPackList(); - } catch (InvalidProtocolBufferException e) { - // Invalidate the cache entry and fall through. - client.modify(singleton(Change.remove(memKey)), none); - } - } - - Collection<CachedPackInfo> r = db.getCachedPacks(repo); - CachedPackInfoList.Builder list = CachedPackInfoList.newBuilder(); - list.addAllPack(r); - client.modify( - singleton(Change.put(memKey, list.build().toByteArray())), - none); - return r; - } - - public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.put(repo, info, buf.getWriteBuffer()); - buf.removeAfterFlush(nsCachedPack.key(repo)); - } - - public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) - throws DhtException { - CacheBuffer buf = (CacheBuffer) buffer; - db.remove(repo, key, buf.getWriteBuffer()); - buf.removeAfterFlush(nsCachedPack.key(repo)); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java deleted file mode 100644 index 31616b51c5..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/CacheService.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.Collection; -import java.util.Map; - -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.StreamingCallback; - -/** Connects to the network based memory cache server(s). */ -public interface CacheService { - /** - * Lookup one or more cache keys and return the results. - * <p> - * Callers are responsible for breaking up very large collections of chunk - * keys into smaller units, based on the reader's batch size option. - * - * @param keys - * keys to locate. - * @param callback - * receives the results when ready. If this is an instance of - * {@link StreamingCallback}, implementors should try to deliver - * results early. - */ - void get(Collection<CacheKey> keys, - AsyncCallback<Map<CacheKey, byte[]>> callback); - - /** - * Modify one or more cache keys. - * - * @param changes - * changes to apply to the cache. - * @param callback - * receives notification when the changes have been applied. - */ - void modify(Collection<Change> changes, AsyncCallback<Void> callback); - - /** A change to the cache. */ - public static class Change { - /** Operation the change describes. */ - public static enum Type { - /** Store (or replace) the key. */ - PUT, - - /** Only store the key if not already stored. */ - PUT_IF_ABSENT, - - /** Remove the associated key. */ - REMOVE; - } - - /** - * Initialize a put operation. - * - * @param key - * the key to store. - * @param data - * the value to store. - * @return the operation. - */ - public static Change put(CacheKey key, byte[] data) { - return new Change(Type.PUT, key, data); - } - - /** - * Initialize a put operation. - * - * @param key - * the key to store. - * @param data - * the value to store. - * @return the operation. - */ - public static Change putIfAbsent(CacheKey key, byte[] data) { - return new Change(Type.PUT_IF_ABSENT, key, data); - } - - /** - * Initialize a remove operation. - * - * @param key - * the key to remove. - * @return the operation. - */ - public static Change remove(CacheKey key) { - return new Change(Type.REMOVE, key, null); - } - - private final Type type; - - private final CacheKey key; - - private final byte[] data; - - /** - * Initialize a new change. - * - * @param type - * @param key - * @param data - */ - public Change(Type type, CacheKey key, byte[] data) { - this.type = type; - this.key = key; - this.data = data; - } - - /** @return type of change that will take place. */ - public Type getType() { - return type; - } - - /** @return the key that will be modified. */ - public CacheKey getKey() { - return key; - } - - /** @return new data value if this is a PUT type of change. */ - public byte[] getData() { - return data; - } - - @Override - public String toString() { - return getType() + " " + getKey(); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java deleted file mode 100644 index 76dc311987..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/cache/Namespace.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.cache; - -import java.util.Arrays; - -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.storage.dht.RowKey; -import org.eclipse.jgit.storage.dht.spi.ChunkTable; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable; -import org.eclipse.jgit.util.RawParseUtils; - -/** Defines a space within the cache cluster. */ -public class Namespace { - /** Namespace used by the {@link ChunkTable}. */ - public static final Namespace CHUNK = create("chunk"); - - /** Namespace used by the {@link ChunkTable} for meta field only. */ - public static final Namespace CHUNK_META = create("chunkMeta"); - - /** Namespace used by the {@link ObjectIndexTable}. */ - public static final Namespace OBJECT_INDEX = create("objectIndex"); - - /** Namespace used by the {@link RepositoryIndexTable}. */ - public static final Namespace REPOSITORY_INDEX = create("repositoryIndex"); - - /** Namespace used by the cached pack information. */ - public static final Namespace CACHED_PACK = create("cachedPack"); - - /** - * Create a namespace from a string name. - * - * @param name - * the name to wrap. - * @return the namespace. - */ - public static Namespace create(String name) { - return new Namespace(Constants.encode(name)); - } - - /** - * Create a namespace from a byte array. - * - * @param name - * the name to wrap. - * @return the namespace. - */ - public static Namespace create(byte[] name) { - return new Namespace(name); - } - - private final byte[] name; - - private volatile int hashCode; - - private Namespace(byte[] name) { - this.name = name; - } - - /** @return this namespace, encoded in UTF-8. */ - public byte[] getBytes() { - return name; - } - - /** - * Construct a MemKey within this namespace. - * - * @param key - * the key to include. - * @return key within this namespace. - */ - public CacheKey key(byte[] key) { - return new CacheKey(this, key); - } - - /** - * Construct a MemKey within this namespace. - * - * @param key - * the key to include. - * @return key within this namespace. - */ - public CacheKey key(RowKey key) { - return new CacheKey(this, key); - } - - @Override - public int hashCode() { - if (hashCode == 0) { - int h = 5381; - for (int ptr = 0; ptr < name.length; ptr++) - h = ((h << 5) + h) + (name[ptr] & 0xff); - if (h == 0) - h = 1; - hashCode = h; - } - return hashCode; - } - - @Override - public boolean equals(Object other) { - if (other == this) - return true; - if (other instanceof Namespace) - return Arrays.equals(name, ((Namespace) other).name); - return false; - } - - @Override - public String toString() { - return RawParseUtils.decode(name); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java deleted file mode 100644 index 277b2b83a8..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemChunkTable.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.ChunkMeta; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtText; -import org.eclipse.jgit.storage.dht.PackChunk; -import org.eclipse.jgit.storage.dht.spi.ChunkTable; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; - -import com.google.protobuf.InvalidProtocolBufferException; - -final class MemChunkTable implements ChunkTable { - private final MemTable table = new MemTable(); - - private final ColumnMatcher colData = new ColumnMatcher("data"); - - private final ColumnMatcher colIndex = new ColumnMatcher("index"); - - private final ColumnMatcher colMeta = new ColumnMatcher("meta"); - - public void get(Context options, Set<ChunkKey> keys, - AsyncCallback<Collection<PackChunk.Members>> callback) { - int cnt = keys.size(); - List<PackChunk.Members> out = new ArrayList<PackChunk.Members>(cnt); - - for (ChunkKey chunk : keys) { - byte[] row = chunk.asBytes(); - MemTable.Cell cell; - - cell = table.get(row, colData.name()); - if (cell == null) - continue; - - PackChunk.Members m = new PackChunk.Members(); - m.setChunkKey(chunk); - m.setChunkData(cell.getValue()); - - cell = table.get(row, colIndex.name()); - if (cell != null) - m.setChunkIndex(cell.getValue()); - - cell = table.get(row, colMeta.name()); - if (cell != null) { - try { - m.setMeta(ChunkMeta.parseFrom(cell.getValue())); - } catch (InvalidProtocolBufferException err) { - callback.onFailure(new DhtException(MessageFormat.format( - DhtText.get().invalidChunkMeta, chunk), err)); - return; - } - } - - out.add(m); - } - - callback.onSuccess(out); - } - - public void getMeta(Context options, Set<ChunkKey> keys, - AsyncCallback<Map<ChunkKey, ChunkMeta>> callback) { - Map<ChunkKey, ChunkMeta> out = new HashMap<ChunkKey, ChunkMeta>(); - - for (ChunkKey chunk : keys) { - byte[] row = chunk.asBytes(); - MemTable.Cell cell = table.get(row, colMeta.name()); - if (cell != null) { - try { - out.put(chunk, ChunkMeta.parseFrom(cell.getValue())); - } catch (InvalidProtocolBufferException err) { - callback.onFailure(new DhtException(MessageFormat.format( - DhtText.get().invalidChunkMeta, chunk), err)); - return; - } - } - } - - callback.onSuccess(out); - } - - public void put(PackChunk.Members chunk, WriteBuffer buffer) - throws DhtException { - byte[] row = chunk.getChunkKey().asBytes(); - - if (chunk.hasChunkData()) - table.put(row, colData.name(), chunk.getChunkData()); - - if (chunk.hasChunkIndex()) - table.put(row, colIndex.name(), chunk.getChunkIndex()); - - if (chunk.hasMeta()) - table.put(row, colMeta.name(), chunk.getMeta().toByteArray()); - } - - public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException { - table.deleteRow(key.asBytes()); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java deleted file mode 100644 index e3bb7fdd11..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemObjectIndexTable.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore; -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtText; -import org.eclipse.jgit.storage.dht.ObjectIndexKey; -import org.eclipse.jgit.storage.dht.ObjectInfo; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; - -import com.google.protobuf.InvalidProtocolBufferException; - -final class MemObjectIndexTable implements ObjectIndexTable { - private final MemTable table = new MemTable(); - - private final ColumnMatcher colInfo = new ColumnMatcher("info:"); - - public void get(Context options, Set<ObjectIndexKey> objects, - AsyncCallback<Map<ObjectIndexKey, Collection<ObjectInfo>>> callback) { - Map<ObjectIndexKey, Collection<ObjectInfo>> out = new HashMap<ObjectIndexKey, Collection<ObjectInfo>>(); - - for (ObjectIndexKey objId : objects) { - for (MemTable.Cell cell : table.scanFamily(objId.asBytes(), colInfo)) { - Collection<ObjectInfo> chunks = out.get(objId); - ChunkKey chunkKey; - if (chunks == null) { - chunks = new ArrayList<ObjectInfo>(4); - out.put(objId, chunks); - } - - chunkKey = ChunkKey.fromBytes(colInfo.suffix(cell.getName())); - try { - chunks.add(new ObjectInfo( - chunkKey, - cell.getTimestamp(), - GitStore.ObjectInfo.parseFrom(cell.getValue()))); - } catch (InvalidProtocolBufferException badCell) { - callback.onFailure(new DhtException(MessageFormat.format( - DhtText.get().invalidObjectInfo, objId, chunkKey), - badCell)); - return; - } - } - } - - callback.onSuccess(out); - } - - public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer) - throws DhtException { - ChunkKey chunk = info.getChunkKey(); - table.put(objId.asBytes(), colInfo.append(chunk.asBytes()), - info.getData().toByteArray()); - } - - public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) - throws DhtException { - table.delete(objId.asBytes(), colInfo.append(chunk.asBytes())); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java deleted file mode 100644 index 595e3fdd7c..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRefTable.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.text.MessageFormat; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.RefData; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtText; -import org.eclipse.jgit.storage.dht.RefDataUtil; -import org.eclipse.jgit.storage.dht.RefKey; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.spi.Context; -import org.eclipse.jgit.storage.dht.spi.RefTable; -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; - -import com.google.protobuf.InvalidProtocolBufferException; - -final class MemRefTable implements RefTable { - private final MemTable table = new MemTable(); - - private final ColumnMatcher colRef = new ColumnMatcher("ref:"); - - public Map<RefKey, RefData> getAll(Context options, RepositoryKey repository) - throws DhtException, TimeoutException { - Map<RefKey, RefData> out = new HashMap<RefKey, RefData>(); - for (MemTable.Cell cell : table.scanFamily(repository.asBytes(), colRef)) { - RefKey ref = RefKey.fromBytes(colRef.suffix(cell.getName())); - try { - out.put(ref, RefData.parseFrom(cell.getValue())); - } catch (InvalidProtocolBufferException badCell) { - throw new DhtException(MessageFormat.format( - DhtText.get().invalidRefData, ref), badCell); - } - } - return out; - } - - public boolean compareAndPut(RefKey refKey, RefData oldData, RefData newData) - throws DhtException, TimeoutException { - RepositoryKey repo = refKey.getRepositoryKey(); - return table.compareAndSet( // - repo.asBytes(), // - colRef.append(refKey.asBytes()), // - oldData != RefDataUtil.NONE ? oldData.toByteArray() : null, // - newData.toByteArray()); - } - - public boolean compareAndRemove(RefKey refKey, RefData oldData) - throws DhtException, TimeoutException { - RepositoryKey repo = refKey.getRepositoryKey(); - return table.compareAndSet( // - repo.asBytes(), // - colRef.append(refKey.asBytes()), // - oldData != RefDataUtil.NONE ? oldData.toByteArray() : null, // - null); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java deleted file mode 100644 index 000ff77327..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryIndexTable.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.text.MessageFormat; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtText; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.RepositoryName; -import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable; -import org.eclipse.jgit.storage.dht.spi.memory.MemTable.Cell; -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; - -final class MemRepositoryIndexTable implements RepositoryIndexTable { - private final MemTable table = new MemTable(); - - private final ColumnMatcher colId = new ColumnMatcher("id"); - - public RepositoryKey get(RepositoryName name) throws DhtException, - TimeoutException { - Cell cell = table.get(name.asBytes(), colId.name()); - if (cell == null) - return null; - return RepositoryKey.fromBytes(cell.getValue()); - } - - public void putUnique(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException { - boolean ok = table.compareAndSet( // - name.asBytes(), // - colId.name(), // - null, // - key.asBytes()); - if (!ok) - throw new DhtException(MessageFormat.format( - DhtText.get().repositoryAlreadyExists, name.asString())); - } - - public void remove(RepositoryName name, RepositoryKey key) - throws DhtException, TimeoutException { - boolean ok = table.compareAndSet( - name.asBytes(), - colId.name(), - key.asBytes(), - null); - if (!ok) - throw new DhtException(MessageFormat.format( - DhtText.get().repositoryAlreadyExists, name.asString())); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java deleted file mode 100644 index d393934a23..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemRepositoryTable.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.text.MessageFormat; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; - -import org.eclipse.jgit.generated.storage.dht.proto.GitStore.CachedPackInfo; -import org.eclipse.jgit.storage.dht.CachedPackKey; -import org.eclipse.jgit.storage.dht.ChunkInfo; -import org.eclipse.jgit.storage.dht.ChunkKey; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtText; -import org.eclipse.jgit.storage.dht.RepositoryKey; -import org.eclipse.jgit.storage.dht.spi.RepositoryTable; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; - -import com.google.protobuf.InvalidProtocolBufferException; - -final class MemRepositoryTable implements RepositoryTable { - private final AtomicInteger nextId = new AtomicInteger(); - - private final MemTable table = new MemTable(); - - private final ColumnMatcher colChunkInfo = new ColumnMatcher("chunk-info:"); - - private final ColumnMatcher colCachedPack = new ColumnMatcher("cached-pack:"); - - public RepositoryKey nextKey() throws DhtException { - return RepositoryKey.create(nextId.incrementAndGet()); - } - - public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) - throws DhtException { - table.put(repo.asBytes(), - colChunkInfo.append(info.getChunkKey().asBytes()), - info.getData().toByteArray()); - } - - public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) - throws DhtException { - table.delete(repo.asBytes(), colChunkInfo.append(chunk.asBytes())); - } - - public Collection<CachedPackInfo> getCachedPacks(RepositoryKey repo) - throws DhtException, TimeoutException { - List<CachedPackInfo> out = new ArrayList<CachedPackInfo>(4); - for (MemTable.Cell cell : table.scanFamily(repo.asBytes(), colCachedPack)) { - try { - out.add(CachedPackInfo.parseFrom(cell.getValue())); - } catch (InvalidProtocolBufferException e) { - throw new DhtException(MessageFormat.format( - DhtText.get().invalidCachedPackInfo, repo, - CachedPackKey.fromBytes(cell.getName())), e); - } - } - return out; - } - - public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) - throws DhtException { - CachedPackKey key = CachedPackKey.fromInfo(info); - table.put(repo.asBytes(), - colCachedPack.append(key.asBytes()), - info.toByteArray()); - } - - public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) - throws DhtException { - table.delete(repo.asBytes(), colCachedPack.append(key.asBytes())); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java deleted file mode 100644 index ec28b34064..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemTable.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.eclipse.jgit.storage.dht.spi.util.ColumnMatcher; -import org.eclipse.jgit.util.RawParseUtils; -import org.eclipse.jgit.util.SystemReader; - -/** - * Tiny in-memory NoSQL style table. - * <p> - * This table is thread-safe, but not very efficient. It uses a single lock to - * protect its internal data structure from concurrent access, and stores all - * data as byte arrays. To reduce memory usage, the arrays passed by the caller - * during put or compareAndSet are used as-is in the internal data structure, - * and may be returned later. Callers should not modify byte arrays once they - * are stored in the table, or when obtained from the table. - */ -public class MemTable { - private final Map<Key, Map<Key, Cell>> map; - - private final Object lock; - - /** Initialize an empty table. */ - public MemTable() { - map = new HashMap<Key, Map<Key, Cell>>(); - lock = new Object(); - } - - /** - * Put a value into a cell. - * - * @param row - * @param col - * @param val - */ - public void put(byte[] row, byte[] col, byte[] val) { - synchronized (lock) { - Key rowKey = new Key(row); - Map<Key, Cell> r = map.get(rowKey); - if (r == null) { - r = new HashMap<Key, Cell>(4); - map.put(rowKey, r); - } - r.put(new Key(col), new Cell(row, col, val)); - } - } - - /** - * Delete an entire row. - * - * @param row - */ - public void deleteRow(byte[] row) { - synchronized (lock) { - map.remove(new Key(row)); - } - } - - /** - * Delete a cell. - * - * @param row - * @param col - */ - public void delete(byte[] row, byte[] col) { - synchronized (lock) { - Key rowKey = new Key(row); - Map<Key, Cell> r = map.get(rowKey); - if (r == null) - return; - - r.remove(new Key(col)); - if (r.isEmpty()) - map.remove(rowKey); - } - } - - /** - * Compare and put or delete a cell. - * <p> - * This method performs an atomic compare-and-swap operation on the named - * cell. If the cell does not yet exist, it will be created. If the cell - * exists, it will be replaced, and if {@code newVal} is null, the cell will - * be deleted. - * - * @param row - * @param col - * @param oldVal - * if null, the cell must not exist, otherwise the cell's current - * value must exactly equal this value for the update to occur. - * @param newVal - * if null, the cell will be removed, otherwise the cell will be - * created or updated to this value. - * @return true if successful, false if {@code oldVal} does not match. - */ - public boolean compareAndSet(byte[] row, byte[] col, byte[] oldVal, - byte[] newVal) { - synchronized (lock) { - Key rowKey = new Key(row); - Key colKey = new Key(col); - - Map<Key, Cell> r = map.get(rowKey); - if (r == null) { - r = new HashMap<Key, Cell>(4); - map.put(rowKey, r); - } - - Cell oldCell = r.get(colKey); - if (!same(oldCell, oldVal)) { - if (r.isEmpty()) - map.remove(rowKey); - return false; - } - - if (newVal != null) { - r.put(colKey, new Cell(row, col, newVal)); - return true; - } - - r.remove(colKey); - if (r.isEmpty()) - map.remove(rowKey); - return true; - } - } - - private static boolean same(Cell oldCell, byte[] expVal) { - if (oldCell == null) - return expVal == null; - - if (expVal == null) - return false; - - return Arrays.equals(oldCell.value, expVal); - } - - /** - * Get a single cell, or null. - * - * @param row - * @param col - * @return the cell, or null. - */ - public Cell get(byte[] row, byte[] col) { - synchronized (lock) { - Map<Key, Cell> r = map.get(new Key(row)); - return r != null ? r.get(new Key(col)) : null; - } - } - - /** - * Scan all cells in a row. - * - * @param row - * the row to scan. - * @param family - * if not null, the family to filter and return. - * @return iterator for the cells. Cells may appear in any order, including - * random. Never null. - */ - public Iterable<Cell> scanFamily(byte[] row, ColumnMatcher family) { - synchronized (lock) { - Map<Key, Cell> r = map.get(new Key(row)); - if (r == null) - return Collections.emptyList(); - - if (family == null) - return new ArrayList<Cell>(r.values()); - - ArrayList<Cell> out = new ArrayList<Cell>(4); - for (Cell cell : r.values()) { - if (family.sameFamily(cell.getName())) - out.add(cell); - } - return out; - } - } - - private static class Key { - final byte[] key; - - Key(byte[] key) { - this.key = key; - } - - @Override - public int hashCode() { - int hash = 5381; - for (int ptr = 0; ptr < key.length; ptr++) - hash = ((hash << 5) + hash) + (key[ptr] & 0xff); - return hash; - } - - @Override - public boolean equals(Object other) { - if (this == other) - return true; - if (other instanceof Key) - return Arrays.equals(key, ((Key) other).key); - return false; - } - - @Override - public String toString() { - return RawParseUtils.decode(key); - } - } - - /** A cell value in a column. */ - public static class Cell { - final byte[] row; - - final byte[] name; - - final byte[] value; - - final long timestamp; - - Cell(byte[] row, byte[] name, byte[] value) { - this.row = row; - this.name = name; - this.value = value; - this.timestamp = SystemReader.getInstance().getCurrentTime(); - } - - /** @return key of the row holding the cell. */ - public byte[] getRow() { - return row; - } - - /** @return name of the cell's column. */ - public byte[] getName() { - return name; - } - - /** @return the cell's value. */ - public byte[] getValue() { - return value; - } - - /** @return system clock time of last modification. */ - public long getTimestamp() { - return timestamp; - } - - @Override - public String toString() { - return RawParseUtils.decode(name); - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java deleted file mode 100644 index 065055b520..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/memory/MemoryDatabase.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.memory; - -import java.io.IOException; - -import org.eclipse.jgit.lib.Repository; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtRepository; -import org.eclipse.jgit.storage.dht.DhtRepositoryBuilder; -import org.eclipse.jgit.storage.dht.spi.ChunkTable; -import org.eclipse.jgit.storage.dht.spi.Database; -import org.eclipse.jgit.storage.dht.spi.ObjectIndexTable; -import org.eclipse.jgit.storage.dht.spi.RefTable; -import org.eclipse.jgit.storage.dht.spi.RepositoryIndexTable; -import org.eclipse.jgit.storage.dht.spi.RepositoryTable; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; - -/** - * Stores Git repositories in non-persistent JVM heap memory. - * <p> - * This database type is only suitable for unit testing, and other toy - * applications. All chunk data is held within the JVM heap as byte arrays, - * which is not the most efficient representation available. - */ -public class MemoryDatabase implements Database { - private final RepositoryIndexTable repositoryIndex; - - private final RepositoryTable repository; - - private final RefTable ref; - - private final ObjectIndexTable objectIndex; - - private final ChunkTable chunk; - - /** Initialize an empty database. */ - public MemoryDatabase() { - repositoryIndex = new MemRepositoryIndexTable(); - repository = new MemRepositoryTable(); - ref = new MemRefTable(); - objectIndex = new MemObjectIndexTable(); - chunk = new MemChunkTable(); - } - - /** - * Open a repository by name on this database. - * - * @param name - * the name of the repository. - * @return the repository instance. If the repository does not yet exist, - * the caller can use {@link Repository#create(boolean)} to create. - * @throws IOException - */ - public DhtRepository open(String name) throws IOException { - return (DhtRepository) new DhtRepositoryBuilder<DhtRepositoryBuilder, DhtRepository, MemoryDatabase>() - .setDatabase(this) // - .setRepositoryName(name) // - .setMustExist(false) // - .build(); - } - - public RepositoryIndexTable repositoryIndex() { - return repositoryIndex; - } - - public RepositoryTable repository() { - return repository; - } - - public RefTable ref() { - return ref; - } - - public ObjectIndexTable objectIndex() { - return objectIndex; - } - - public ChunkTable chunk() { - return chunk; - } - - public WriteBuffer newWriteBuffer() { - return new WriteBuffer() { - public void flush() throws DhtException { - // Do nothing. - } - - public void abort() throws DhtException { - // Do nothing. - } - }; - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java deleted file mode 100644 index ad55206fe7..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/AbstractWriteBuffer.java +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.util; - -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.eclipse.jgit.storage.dht.AsyncCallback; -import org.eclipse.jgit.storage.dht.DhtException; -import org.eclipse.jgit.storage.dht.DhtTimeoutException; -import org.eclipse.jgit.storage.dht.spi.WriteBuffer; - -/** - * Abstract buffer service built on top of an ExecutorService. - * <p> - * Writes are combined together into batches, to reduce RPC overhead when there - * are many small writes occurring. Batches are sent asynchronously when they - * reach 512 KiB worth of key/column/value data. The calling application is - * throttled when the outstanding writes are equal to the buffer size, waiting - * until the cluster has replied with success or failure. - * <p> - * This buffer implementation is not thread-safe, it assumes only one thread - * will use the buffer instance. (It does however correctly synchronize with the - * background tasks it spawns.) - */ -public abstract class AbstractWriteBuffer implements WriteBuffer { - private final static int AUTO_FLUSH_SIZE = 512 * 1024; - - private final ExecutorService executor; - - private final int bufferSize; - - private final List<Future<?>> running; - - private final Object runningLock; - - private final Semaphore spaceAvailable; - - private int queuedCount; - - private boolean flushing; - - private Callable<?> finalTask; - - /** - * Initialize a buffer with a backing executor service. - * - * @param executor - * service to run mutation tasks on. - * @param bufferSize - * maximum number of bytes to have pending at once. - */ - protected AbstractWriteBuffer(ExecutorService executor, int bufferSize) { - this.executor = executor; - this.bufferSize = bufferSize; - this.running = new LinkedList<Future<?>>(); - this.runningLock = new Object(); - this.spaceAvailable = new Semaphore(bufferSize); - } - - /** - * Notify the buffer data is being added onto it. - * <p> - * This method waits until the buffer has sufficient space for the requested - * data, thereby throttling the calling application code. It returns true if - * its recommendation is for the buffer subclass to copy the data onto its - * internal buffer and defer starting until later. It returns false if the - * recommendation is to start the operation immediately, due to the large - * size of the request. - * <p> - * Buffer implementors should keep in mind that the return value is offered - * as advice only, they may choose to implement different behavior. - * - * @param size - * an estimated number of bytes that the buffer will be - * responsible for until the operation completes. This should - * include the row keys and column headers, in addition to the - * data values. - * @return true to enqueue the operation; false to start it right away. - * @throws DhtException - * the current thread was interrupted before space became - * available in the buffer. - */ - protected boolean add(int size) throws DhtException { - acquireSpace(size); - return size < AUTO_FLUSH_SIZE; - } - - /** - * Notify the buffer bytes were enqueued. - * - * @param size - * the estimated number of bytes that were enqueued. - * @throws DhtException - * a previously started operation completed and failed. - */ - protected void queued(int size) throws DhtException { - queuedCount += size; - - if (AUTO_FLUSH_SIZE < queuedCount) { - startQueuedOperations(queuedCount); - queuedCount = 0; - } - } - - /** - * Start all queued operations. - * <p> - * This method is invoked by {@link #queued(int)} or by {@link #flush()} - * when there is a non-zero number of bytes already enqueued as a result of - * prior {@link #add(int)} and {#link {@link #queued(int)} calls. - * <p> - * Implementors should use {@link #start(Callable, int)} to begin their - * mutation tasks in the background. - * - * @param bufferedByteCount - * number of bytes that were already enqueued. This count should - * be passed to {@link #start(Callable, int)}. - * @throws DhtException - * a previously started operation completed and failed. - */ - protected abstract void startQueuedOperations(int bufferedByteCount) - throws DhtException; - - public void flush() throws DhtException { - try { - flushing = true; - - if (0 < queuedCount) { - startQueuedOperations(queuedCount); - queuedCount = 0; - } - - // If a task was created above, try to use the current thread - // instead of burning an executor thread for the final work. - - if (finalTask != null) { - try { - waitFor(finalTask); - } finally { - finalTask = null; - } - } - - synchronized (runningLock) { - checkRunningTasks(true); - } - } finally { - flushing = false; - } - } - - public void abort() throws DhtException { - synchronized (runningLock) { - checkRunningTasks(true); - } - } - - private void acquireSpace(int sz) throws DhtException { - try { - final int permits = permitsForSize(sz); - if (spaceAvailable.tryAcquire(permits)) - return; - - if (0 < queuedCount) { - startQueuedOperations(queuedCount); - queuedCount = 0; - } - - spaceAvailable.acquire(permits); - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - } - } - - private int permitsForSize(int size) { - // Do not acquire more than the configured buffer size, - // even if the actual write size is larger. Trying to - // acquire more would never succeed. - - if (size <= 0) - size = 1; - return Math.min(size, bufferSize); - } - - /** - * Start a mutation task. - * - * @param <T> - * any type the task might return. - * @param task - * the mutation task. The result of the task is discarded, so - * callers should perform result validation within the task. - * @param size - * number of bytes that are buffered within the task. - * @throws DhtException - * a prior task has completed, and failed. - */ - protected <T> void start(final Callable<T> task, int size) - throws DhtException { - final int permits = permitsForSize(size); - final Callable<T> op = new Callable<T>() { - public T call() throws Exception { - try { - return task.call(); - } finally { - spaceAvailable.release(permits); - } - } - }; - - if (flushing && finalTask == null) { - // If invoked by flush(), don't start on an executor. - // - finalTask = op; - return; - } - - synchronized (runningLock) { - if (!flushing) - checkRunningTasks(false); - running.add(executor.submit(op)); - } - } - - /** - * Wrap a callback to update the buffer. - * <p> - * Flushing the buffer will wait for the returned callback to complete. - * - * @param <T> - * any type the task might return. - * @param callback - * callback invoked when the task has finished. - * @param size - * number of bytes that are buffered within the task. - * @return wrapped callback that will update the buffer state when the - * callback is invoked. - * @throws DhtException - * a prior task has completed, and failed. - */ - protected <T> AsyncCallback<T> wrap(final AsyncCallback<T> callback, - int size) throws DhtException { - int permits = permitsForSize(size); - WrappedCallback<T> op = new WrappedCallback<T>(callback, permits); - synchronized (runningLock) { - checkRunningTasks(false); - running.add(op); - } - return op; - } - - private void checkRunningTasks(boolean wait) throws DhtException { - if (running.isEmpty()) - return; - - Iterator<Future<?>> itr = running.iterator(); - while (itr.hasNext()) { - Future<?> task = itr.next(); - if (task.isDone() || wait) { - itr.remove(); - waitFor(task); - } - } - } - - private static void waitFor(Callable<?> task) throws DhtException { - try { - task.call(); - } catch (DhtException err) { - throw err; - } catch (Exception err) { - throw new DhtException(err); - } - } - - private static void waitFor(Future<?> task) throws DhtException { - try { - task.get(); - - } catch (InterruptedException e) { - throw new DhtTimeoutException(e); - - } catch (ExecutionException err) { - - Throwable t = err; - while (t != null) { - if (t instanceof DhtException) - throw (DhtException) t; - t = t.getCause(); - } - - throw new DhtException(err); - } - } - - private final class WrappedCallback<T> implements AsyncCallback<T>, - Future<T> { - private final AsyncCallback<T> callback; - - private final int permits; - - private final CountDownLatch sync; - - private volatile boolean done; - - WrappedCallback(AsyncCallback<T> callback, int permits) { - this.callback = callback; - this.permits = permits; - this.sync = new CountDownLatch(1); - } - - public void onSuccess(T result) { - try { - callback.onSuccess(result); - } finally { - done(); - } - } - - public void onFailure(DhtException error) { - try { - callback.onFailure(error); - } finally { - done(); - } - } - - private void done() { - spaceAvailable.release(permits); - done = true; - sync.countDown(); - } - - public boolean cancel(boolean mayInterrupt) { - return false; - } - - public T get() throws InterruptedException, ExecutionException { - sync.await(); - return null; - } - - public T get(long time, TimeUnit unit) throws InterruptedException, - ExecutionException, TimeoutException { - sync.await(time, unit); - return null; - } - - public boolean isCancelled() { - return false; - } - - public boolean isDone() { - return done; - } - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java deleted file mode 100644 index 17ef5dd908..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ColumnMatcher.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.util; - -import java.util.Arrays; - -import org.eclipse.jgit.lib.Constants; -import org.eclipse.jgit.storage.dht.RowKey; -import org.eclipse.jgit.util.RawParseUtils; - -/** Utility to deal with columns named as byte arrays. */ -public class ColumnMatcher { - private final byte[] name; - - /** - * Create a new column matcher for the given named string. - * - * @param nameStr - * the column name, as a string. - */ - public ColumnMatcher(String nameStr) { - name = Constants.encode(nameStr); - } - - /** @return the column name, encoded in UTF-8. */ - public byte[] name() { - return name; - } - - /** - * Check if the column is an exact match. - * - * @param col - * the column as read from the database. - * @return true only if {@code col} is exactly the same as this column. - */ - public boolean sameName(byte[] col) { - return Arrays.equals(name, col); - } - - /** - * Check if the column is a member of this family. - * <p> - * This method checks that {@link #name()} (the string supplied to the - * constructor) is a prefix of {@code col}. - * - * @param col - * the column as read from the database. - * @return true if {@code col} is a member of this column family. - */ - public boolean sameFamily(byte[] col) { - if (name.length < col.length) { - for (int i = 0; i < name.length; i++) { - if (name[i] != col[i]) { - return false; - } - } - return true; - } - return false; - } - - /** - * Extract the portion of the column name that comes after the family. - * - * @param col - * the column as read from the database. - * @return everything after the family name. - */ - public byte[] suffix(byte[] col) { - byte[] r = new byte[col.length - name.length]; - System.arraycopy(col, name.length, r, 0, r.length); - return r; - } - - /** - * Append a suffix onto this column name. - * - * @param suffix - * name component to appear after the family name. - * @return the joined name, ready for storage in the database. - */ - public byte[] append(RowKey suffix) { - return append(suffix.asBytes()); - } - - /** - * Append a suffix onto this column name. - * - * @param suffix - * name component to appear after the family name. - * @return the joined name, ready for storage in the database. - */ - public byte[] append(byte[] suffix) { - byte[] r = new byte[name.length + suffix.length]; - System.arraycopy(name, 0, r, 0, name.length); - System.arraycopy(suffix, 0, r, name.length, suffix.length); - return r; - } - - @Override - public String toString() { - return RawParseUtils.decode(name); - } -} diff --git a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java b/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java deleted file mode 100644 index ed0b918c28..0000000000 --- a/org.eclipse.jgit.storage.dht/src/org/eclipse/jgit/storage/dht/spi/util/ExecutorTools.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (C) 2011, Google Inc. - * and other copyright owners as documented in the project's IP log. - * - * This program and the accompanying materials are made available - * under the terms of the Eclipse Distribution License v1.0 which - * accompanies this distribution, is reproduced below, and is - * available at http://www.eclipse.org/org/documents/edl-v10.php - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * - Neither the name of the Eclipse Foundation, Inc. nor the - * names of its contributors may be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -package org.eclipse.jgit.storage.dht.spi.util; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicInteger; - -/** Optional executor support for implementors to build on top of. */ -public class ExecutorTools { - /** - * Get the default executor service for this JVM. - * <p> - * The default executor service is created the first time it is requested, - * and is shared with all future requests. It uses a fixed sized thread pool - * that is allocated 2 threads per CPU. Each thread is configured to be a - * daemon thread, permitting the JVM to do a clean shutdown when the - * application thread stop, even if work is still pending in the service. - * - * @return the default executor service. - */ - public static ExecutorService getDefaultExecutorService() { - return DefaultExecutors.service; - } - - private static class DefaultExecutors { - static final ExecutorService service; - static { - int ncpu = Runtime.getRuntime().availableProcessors(); - ThreadFactory threadFactory = new ThreadFactory() { - private final AtomicInteger cnt = new AtomicInteger(); - - private final ThreadGroup group = new ThreadGroup("JGit-DHT"); - - public Thread newThread(Runnable body) { - int id = cnt.incrementAndGet(); - String name = "JGit-DHT-Worker-" + id; - ClassLoader myCL = getClass().getClassLoader(); - - Thread thread = new Thread(group, body, name); - thread.setDaemon(true); - thread.setContextClassLoader(myCL); - return thread; - } - }; - service = Executors.newFixedThreadPool(2 * ncpu, threadFactory); - } - } - - private ExecutorTools() { - // Static helper class, do not make instances. - } -} |