Browse Source

jdk8u40-b25 updates

tags/light-jdk8u45+3
Ivan Dubrov 9 years ago
parent
commit
53c3f53814

+ 3
- 1
gradle.properties View File

@@ -27,7 +27,9 @@ targetJre=build/jre
#hotspotTag=jdk8u31-b13
#hotspotTag=jdk8u20-b22
#hotspotTag=jdk8u5-b13
hotspotTag=jdk7u71-b01
#hotspotTag=jdk8u31-b13
hotspotTag=jdk8u40-b25
#hotspotTag=jdk7u71-b01
#hotspotTag=jdk7u60-b09
#hotspotTag=jdk7u55-b13
#hotspotTag=jdk7u51-b13

+ 18
- 17
hotspot/.hg/patches/arguments-java8u31.patch View File

@@ -1,8 +1,10 @@
diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp
index 1565f88..5982b58 100644
--- a/src/share/vm/runtime/arguments.cpp
+++ b/src/share/vm/runtime/arguments.cpp
@@ -62,8 +62,8 @@
# HG changeset patch
# Parent 94dee6e9176e68ea1122af0dc98b6a5c152ea1e5

diff -r 94dee6e9176e src/share/vm/runtime/arguments.cpp
--- a/src/share/vm/runtime/arguments.cpp Tue Mar 31 17:58:49 2015 -0700
+++ b/src/share/vm/runtime/arguments.cpp Tue Mar 31 18:01:20 2015 -0700
@@ -65,8 +65,8 @@
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
@@ -13,18 +15,18 @@ index 1565f88..5982b58 100644
#define DEFAULT_JAVA_LAUNCHER "generic"
// Disable options not supported in this release, with a warning if they
@@ -1511,6 +1511,10 @@
void Arguments::set_ergonomics_flags() {
@@ -1561,6 +1561,10 @@
}
void Arguments::select_gc_ergonomically() {
+ if (AllowEnhancedClassRedefinition) {
+ // (DCEVM) enforces serial GC
+ FLAG_SET_ERGO(bool, UseSerialGC, true);
+ }
if (os::is_server_class_machine()) {
// If no other collector is requested explicitly,
// let the VM select the collector based on
@@ -1988,6 +1992,17 @@
if (should_auto_select_low_pause_collector()) {
FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
@@ -2082,6 +2086,17 @@
if (UseConcMarkSweepGC || UseParNewGC) i++;
if (UseParallelGC || UseParallelOldGC) i++;
if (UseG1GC) i++;
@@ -42,11 +44,10 @@ index 1565f88..5982b58 100644
if (i > 1) {
jio_fprintf(defaultStream::error_stream(),
"Conflicting collector combinations in option list; "
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 63222af..7597eda 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -1283,6 +1283,9 @@
diff -r 94dee6e9176e src/share/vm/runtime/globals.hpp
--- a/src/share/vm/runtime/globals.hpp Tue Mar 31 17:58:49 2015 -0700
+++ b/src/share/vm/runtime/globals.hpp Tue Mar 31 18:01:20 2015 -0700
@@ -1305,6 +1305,9 @@
product(intx, TraceRedefineClasses, 0, \
"Trace level for JVMTI RedefineClasses") \
\
@@ -55,4 +56,4 @@ index 63222af..7597eda 100644
+ \
develop(bool, StressMethodComparator, false, \
"Run the MethodComparator on all loaded methods") \
\
\

+ 59
- 0
hotspot/.hg/patches/arguments-java8u40.patch View File

@@ -0,0 +1,59 @@
# HG changeset patch
# Parent 94dee6e9176e68ea1122af0dc98b6a5c152ea1e5

diff -r 94dee6e9176e src/share/vm/runtime/arguments.cpp
--- a/src/share/vm/runtime/arguments.cpp Tue Mar 31 17:58:49 2015 -0700
+++ b/src/share/vm/runtime/arguments.cpp Tue Mar 31 18:01:20 2015 -0700
@@ -65,8 +65,8 @@
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
-// Note: This is a special bug reporting site for the JVM
-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
+// (DCEVM) The DCE VM has its own JIRA bug tracking system.
+#define DEFAULT_VENDOR_URL_BUG "https://github.com/Guidewire/DCEVM/issues"
#define DEFAULT_JAVA_LAUNCHER "generic"
// Disable options not supported in this release, with a warning if they
@@ -1561,6 +1561,10 @@
}
void Arguments::select_gc_ergonomically() {
+ if (AllowEnhancedClassRedefinition) {
+ // (DCEVM) enforces serial GC
+ FLAG_SET_ERGO(bool, UseSerialGC, true);
+ }
if (os::is_server_class_machine()) {
if (should_auto_select_low_pause_collector()) {
FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
@@ -2082,6 +2086,17 @@
if (UseConcMarkSweepGC || UseParNewGC) i++;
if (UseParallelGC || UseParallelOldGC) i++;
if (UseG1GC) i++;
+
+ if (AllowEnhancedClassRedefinition) {
+ // (DCEVM) Must use serial GC. This limitation applies because the instance size changing GC modifications
+ // are only built into the mark and compact algorithm.
+ if (!UseSerialGC && i >= 1) {
+ jio_fprintf(defaultStream::error_stream(),
+ "Must use the serial GC in the DCEVM\n");
+ status = false;
+ }
+ }
+
if (i > 1) {
jio_fprintf(defaultStream::error_stream(),
"Conflicting collector combinations in option list; "
diff -r 94dee6e9176e src/share/vm/runtime/globals.hpp
--- a/src/share/vm/runtime/globals.hpp Tue Mar 31 17:58:49 2015 -0700
+++ b/src/share/vm/runtime/globals.hpp Tue Mar 31 18:01:20 2015 -0700
@@ -1305,6 +1305,9 @@
product(intx, TraceRedefineClasses, 0, \
"Trace level for JVMTI RedefineClasses") \
\
+ product(bool, AllowEnhancedClassRedefinition, true, \
+ "Allow enhanced class redefinition beyond swapping method bodies")\
+ \
develop(bool, StressMethodComparator, false, \
"Run the MethodComparator on all loaded methods") \
\

+ 22
- 22
hotspot/.hg/patches/dmh-field-accessors-java8.patch View File

@@ -2,12 +2,12 @@ Add support for DirectMethodHandle field implementations (StaticAccessor/Accesso

During the redefinition run, these instances are updated to point to new field index location.
# HG changeset patch
# Parent 93419aa9be423015ad99c0e5fa53abca1939aa00
# Parent a5d69314e0a8e05605ca678e31eeba92ec173400

diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -2617,6 +2617,50 @@
diff -r a5d69314e0a8 src/share/vm/classfile/javaClasses.cpp
--- a/src/share/vm/classfile/javaClasses.cpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp Tue Mar 31 18:06:35 2015 -0700
@@ -2667,6 +2667,50 @@
}
}
@@ -58,7 +58,7 @@ diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/jav
// Support for java_lang_invoke_MethodHandle
int java_lang_invoke_MethodHandle::_type_offset;
@@ -3269,6 +3313,9 @@
@@ -3343,6 +3387,9 @@
java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_CallSite::compute_offsets();
@@ -68,10 +68,10 @@ diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/jav
}
java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -1003,6 +1003,55 @@
diff -r a5d69314e0a8 src/share/vm/classfile/javaClasses.hpp
--- a/src/share/vm/classfile/javaClasses.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -1020,6 +1020,55 @@
static int member_offset_in_bytes() { return _member_offset; }
};
@@ -127,10 +127,10 @@ diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/jav
// Interface to java.lang.invoke.LambdaForm objects
// (These are a private interface for managing adapter code generation.)
diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp
--- a/src/share/vm/classfile/systemDictionary.hpp
+++ b/src/share/vm/classfile/systemDictionary.hpp
@@ -151,6 +151,8 @@
diff -r a5d69314e0a8 src/share/vm/classfile/systemDictionary.hpp
--- a/src/share/vm/classfile/systemDictionary.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -153,6 +153,8 @@
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \
@@ -139,10 +139,10 @@ diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfil
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \
do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -256,6 +256,8 @@
diff -r a5d69314e0a8 src/share/vm/classfile/vmSymbols.hpp
--- a/src/share/vm/classfile/vmSymbols.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -265,6 +265,8 @@
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
@@ -151,10 +151,10 @@ diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSym
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
@@ -397,6 +399,10 @@
template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
@@ -414,6 +416,10 @@
template(getProtectionDomain_name, "getProtectionDomain") \
template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
+ template(static_offset_name, "staticOffset") \
+ template(static_base_name, "staticBase") \
+ template(field_offset_name, "fieldOffset") \

+ 164
- 0
hotspot/.hg/patches/dmh-field-accessors-java8u40.patch View File

@@ -0,0 +1,164 @@
Add support for DirectMethodHandle field implementations (StaticAccessor/Accessor).

During the redefinition run, these instances are updated to point to new field index location.
# HG changeset patch
# Parent a5d69314e0a8e05605ca678e31eeba92ec173400

diff -r a5d69314e0a8 src/share/vm/classfile/javaClasses.cpp
--- a/src/share/vm/classfile/javaClasses.cpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp Tue Mar 31 18:06:35 2015 -0700
@@ -2667,6 +2667,50 @@
}
}
+// Support for java_lang_invoke_DirectMethodHandle$StaticAccessor
+
+int java_lang_invoke_DirectMethodHandle_StaticAccessor::_static_offset_offset;
+
+long java_lang_invoke_DirectMethodHandle_StaticAccessor::static_offset(oop dmh) {
+ assert(_static_offset_offset != 0, "");
+ return dmh->long_field(_static_offset_offset);
+}
+
+void java_lang_invoke_DirectMethodHandle_StaticAccessor::set_static_offset(oop dmh, long static_offset) {
+ assert(_static_offset_offset != 0, "");
+ dmh->long_field_put(_static_offset_offset, static_offset);
+}
+
+
+void java_lang_invoke_DirectMethodHandle_StaticAccessor::compute_offsets() {
+ Klass* klass_oop = SystemDictionary::DirectMethodHandle_StaticAccessor_klass();
+ if (klass_oop != NULL && EnableInvokeDynamic) {
+ compute_offset(_static_offset_offset, klass_oop, vmSymbols::static_offset_name(), vmSymbols::long_signature());
+ }
+}
+
+// Support for java_lang_invoke_DirectMethodHandle$Accessor
+
+int java_lang_invoke_DirectMethodHandle_Accessor::_field_offset_offset;
+
+int java_lang_invoke_DirectMethodHandle_Accessor::field_offset(oop dmh) {
+ assert(_field_offset_offset != 0, "");
+ return dmh->int_field(_field_offset_offset);
+}
+
+void java_lang_invoke_DirectMethodHandle_Accessor::set_field_offset(oop dmh, int field_offset) {
+ assert(_field_offset_offset != 0, "");
+ dmh->int_field_put(_field_offset_offset, field_offset);
+}
+
+
+void java_lang_invoke_DirectMethodHandle_Accessor::compute_offsets() {
+ Klass* klass_oop = SystemDictionary::DirectMethodHandle_Accessor_klass();
+ if (klass_oop != NULL && EnableInvokeDynamic) {
+ compute_offset(_field_offset_offset, klass_oop, vmSymbols::field_offset_name(), vmSymbols::int_signature());
+ }
+}
+
// Support for java_lang_invoke_MethodHandle
int java_lang_invoke_MethodHandle::_type_offset;
@@ -3343,6 +3387,9 @@
java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_CallSite::compute_offsets();
+
+ java_lang_invoke_DirectMethodHandle_StaticAccessor::compute_offsets();
+ java_lang_invoke_DirectMethodHandle_Accessor::compute_offsets();
}
java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes
diff -r a5d69314e0a8 src/share/vm/classfile/javaClasses.hpp
--- a/src/share/vm/classfile/javaClasses.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -1020,6 +1020,55 @@
static int member_offset_in_bytes() { return _member_offset; }
};
+// Interface to java.lang.invoke.DirectMethodHandle$StaticAccessor objects
+
+class java_lang_invoke_DirectMethodHandle_StaticAccessor: AllStatic {
+ friend class JavaClasses;
+
+ private:
+ static int _static_offset_offset; // offset to static field
+
+ static void compute_offsets();
+
+ public:
+ // Accessors
+ static long static_offset(oop dmh);
+ static void set_static_offset(oop dmh, long value);
+
+ // Testers
+ static bool is_subclass(Klass* klass) {
+ return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_StaticAccessor_klass());
+ }
+ static bool is_instance(oop obj) {
+ return obj != NULL && is_subclass(obj->klass());
+ }
+};
+
+// Interface to java.lang.invoke.DirectMethodHandle$Accessor objects
+
+class java_lang_invoke_DirectMethodHandle_Accessor: AllStatic {
+ friend class JavaClasses;
+
+ private:
+ static int _field_offset_offset; // offset to field
+
+ static void compute_offsets();
+
+ public:
+ // Accessors
+ static int field_offset(oop dmh);
+ static void set_field_offset(oop dmh, int value);
+
+ // Testers
+ static bool is_subclass(Klass* klass) {
+ return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_Accessor_klass());
+ }
+ static bool is_instance(oop obj) {
+ return obj != NULL && is_subclass(obj->klass());
+ }
+};
+
+
// Interface to java.lang.invoke.LambdaForm objects
// (These are a private interface for managing adapter code generation.)
diff -r a5d69314e0a8 src/share/vm/classfile/systemDictionary.hpp
--- a/src/share/vm/classfile/systemDictionary.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -153,6 +153,8 @@
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \
+ do_klass(DirectMethodHandle_StaticAccessor_klass, java_lang_invoke_DirectMethodHandle_StaticAccessor, Opt ) \
+ do_klass(DirectMethodHandle_Accessor_klass, java_lang_invoke_DirectMethodHandle_Accessor, Opt ) \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \
do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \
diff -r a5d69314e0a8 src/share/vm/classfile/vmSymbols.hpp
--- a/src/share/vm/classfile/vmSymbols.hpp Tue Mar 31 18:05:19 2015 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp Tue Mar 31 18:06:35 2015 -0700
@@ -265,6 +265,8 @@
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
+ template(java_lang_invoke_DirectMethodHandle_StaticAccessor, "java/lang/invoke/DirectMethodHandle$StaticAccessor") \
+ template(java_lang_invoke_DirectMethodHandle_Accessor, "java/lang/invoke/DirectMethodHandle$Accessor") \
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
@@ -414,6 +416,10 @@
template(getProtectionDomain_name, "getProtectionDomain") \
template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
+ template(static_offset_name, "staticOffset") \
+ template(static_base_name, "staticBase") \
+ template(field_offset_name, "fieldOffset") \
+ template(field_type_name, "fieldType") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \

+ 64
- 58
hotspot/.hg/patches/gc-java8.patch View File

@@ -1,8 +1,11 @@
# HG changeset patch
# Parent 8f44f8a7e50563e6c9a82fb0ed6c7bce4925bd3b
Change MarkAndSweep garbage collector to allow changing instances during redefinition.
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -161,6 +161,12 @@

diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -163,6 +163,12 @@
}
}
@@ -15,7 +18,7 @@ diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeL
// Like CompactibleSpace forward() but always calls cross_threshold() to
// update the block offset table. Removed initialize_threshold call because
// CFLS does not use a block offset array for contiguous spaces.
@@ -2098,7 +2104,7 @@
@@ -2097,7 +2103,7 @@
// Support for compaction
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
@@ -24,7 +27,7 @@ diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeL
// prepare_for_compaction() uses the space between live objects
// so that later phase can skip dead space quickly. So verification
// of the free lists doesn't work after.
@@ -2119,7 +2125,7 @@
@@ -2118,7 +2124,7 @@
}
void CompactibleFreeListSpace::compact() {
@@ -33,9 +36,9 @@ diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeL
}
// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -150,6 +150,7 @@
// Support for compacting cms
@@ -44,19 +47,19 @@ diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeL
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
// Initialization helpers.
diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp
@@ -46,6 +46,8 @@
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.cpp
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -48,6 +48,8 @@
STWGCTimer* MarkSweep::_gc_timer = NULL;
SerialOldTracer* MarkSweep::_gc_tracer = NULL;
+GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL;
+
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
@@ -171,3 +173,100 @@
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
@@ -165,3 +167,100 @@
}
#endif
@@ -157,10 +160,10 @@ diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/
+ }
+ }
+}
diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp
@@ -107,8 +107,12 @@
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.hpp
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -96,8 +96,12 @@
friend class AdjustPointerClosure;
friend class KeepAliveClosure;
friend class VM_MarkSweep;
@@ -173,7 +176,7 @@ diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/
//
// Vars
//
@@ -169,6 +173,9 @@
@@ -157,6 +161,9 @@
static inline void push_objarray(oop obj, size_t index);
@@ -183,17 +186,17 @@ diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/
static void follow_stack(); // Empty marking stack.
static void follow_klass(Klass* klass);
diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp
--- a/src/share/vm/memory/genMarkSweep.cpp
+++ b/src/share/vm/memory/genMarkSweep.cpp
@@ -334,11 +334,16 @@
diff -r 8f44f8a7e505 src/share/vm/memory/genMarkSweep.cpp
--- a/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -327,11 +327,16 @@
// in the same order in phase2, phase3 and phase4. We don't quite do that
// here (perm_gen first rather than last), so we tell the validate code
// to use a higher index (saved from phase2) when verifying perm_gen.
+ assert(_rescued_oops == NULL, "must be empty before processing");
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("4");
+ MarkSweep::copy_rescued_objects_back();
@@ -203,10 +206,10 @@ diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkS
+
+ MarkSweep::copy_rescued_objects_back();
}
diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
--- a/src/share/vm/memory/space.cpp
+++ b/src/share/vm/memory/space.cpp
@@ -379,9 +379,8 @@
diff -r 8f44f8a7e505 src/share/vm/memory/space.cpp
--- a/src/share/vm/memory/space.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -377,9 +377,8 @@
_compaction_top = bottom();
}
@@ -218,7 +221,7 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
// First check if we should switch compaction space
assert(this == cp->space, "'this' should be current compaction space.");
size_t compaction_max_size = pointer_delta(end(), compact_top);
@@ -401,8 +400,15 @@
@@ -399,8 +398,15 @@
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
}
@@ -235,7 +238,7 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
q->forward_to(oop(compact_top));
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
} else {
@@ -423,6 +429,58 @@
@@ -421,6 +427,58 @@
return compact_top;
}
@@ -294,7 +297,7 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) {
@@ -444,12 +502,17 @@
@@ -442,12 +500,17 @@
#define adjust_obj_size(s) s
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
@@ -314,7 +317,7 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
}
void Space::adjust_pointers() {
@@ -487,6 +550,111 @@
@@ -485,6 +548,111 @@
assert(q == t, "just checking");
}
@@ -426,7 +429,7 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
void CompactibleSpace::adjust_pointers() {
// Check first is there is any work to do.
if (used() == 0) {
@@ -497,7 +665,12 @@
@@ -495,7 +663,12 @@
}
void CompactibleSpace::compact() {
@@ -440,10 +443,10 @@ diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
}
void Space::print_short() const { print_short_on(tty); }
diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
--- a/src/share/vm/memory/space.hpp
+++ b/src/share/vm/memory/space.hpp
@@ -450,6 +450,9 @@
diff -r 8f44f8a7e505 src/share/vm/memory/space.hpp
--- a/src/share/vm/memory/space.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -392,6 +392,9 @@
// indicates when the next such action should be taken.
virtual void prepare_for_compaction(CompactPoint* cp);
// MarkSweep support phase3
@@ -453,7 +456,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
virtual void adjust_pointers();
// MarkSweep support phase4
virtual void compact();
@@ -479,6 +482,15 @@
@@ -421,6 +424,15 @@
// accordingly".
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
HeapWord* compact_top);
@@ -469,16 +472,19 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
// Return a size with adjusments as required of the space.
virtual size_t adjust_object_size_v(size_t size) const { return size; }
@@ -509,7 +521,7 @@
size_t word_len);
};
diff -r 8f44f8a7e505 src/share/vm/memory/space.inline.hpp
--- a/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -35,7 +35,7 @@
return block_start_const(p);
}
-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
+#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \
/* Compute the new addresses for the live objects and store it in the mark \
* Used by universe::mark_sweep_phase2() \
*/ \
@@ -567,7 +579,17 @@
@@ -93,7 +93,17 @@
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
size_t size = block_size(q); \
@@ -496,7 +502,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
q += size; \
end_of_live = q; \
} else { \
@@ -616,6 +638,8 @@
@@ -142,6 +152,8 @@
} \
} \
\
@@ -505,7 +511,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
assert(q == t, "just checking"); \
if (liveRange != NULL) { \
liveRange->set_end(q); \
@@ -662,13 +686,8 @@
@@ -188,13 +200,8 @@
q += size; \
} \
\
@@ -521,7 +527,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
} \
\
const intx interval = PrefetchScanIntervalInBytes; \
@@ -696,7 +715,7 @@
@@ -222,7 +229,7 @@
assert(q == t, "just checking"); \
}
@@ -530,7 +536,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
@@ -721,13 +740,9 @@
@@ -247,13 +254,9 @@
} \
) /* debug_only */ \
\
@@ -546,7 +552,7 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
@@ -745,11 +760,34 @@
@@ -271,11 +274,34 @@
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
@@ -582,22 +588,22 @@ diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp
--- a/src/share/vm/memory/universe.cpp
+++ b/src/share/vm/memory/universe.cpp
@@ -78,6 +78,8 @@
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
diff -r 8f44f8a7e505 src/share/vm/memory/universe.cpp
--- a/src/share/vm/memory/universe.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/universe.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -84,6 +84,8 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+bool Universe::_is_redefining_gc_run = false;
+
// Known objects
Klass* Universe::_boolArrayKlassObj = NULL;
Klass* Universe::_byteArrayKlassObj = NULL;
diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp
--- a/src/share/vm/memory/universe.hpp
+++ b/src/share/vm/memory/universe.hpp
@@ -248,7 +248,13 @@
diff -r 8f44f8a7e505 src/share/vm/memory/universe.hpp
--- a/src/share/vm/memory/universe.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/universe.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -251,7 +251,13 @@
static void compute_verify_oop_data();

+ 619
- 0
hotspot/.hg/patches/gc-java8u40.patch View File

@@ -0,0 +1,619 @@
# HG changeset patch
# Parent 8f44f8a7e50563e6c9a82fb0ed6c7bce4925bd3b
Change MarkAndSweep garbage collector to allow changing instances during redefinition.

diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -163,6 +163,12 @@
}
}
+HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size,
+ CompactPoint* cp, HeapWord* compact_top) {
+ ShouldNotReachHere();
+ return NULL;
+}
+
// Like CompactibleSpace forward() but always calls cross_threshold() to
// update the block offset table. Removed initialize_threshold call because
// CFLS does not use a block offset array for contiguous spaces.
@@ -2097,7 +2103,7 @@
// Support for compaction
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
+ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false);
// prepare_for_compaction() uses the space between live objects
// so that later phase can skip dead space quickly. So verification
// of the free lists doesn't work after.
@@ -2118,7 +2124,7 @@
}
void CompactibleFreeListSpace::compact() {
- SCAN_AND_COMPACT(obj_size);
+ SCAN_AND_COMPACT(obj_size, false);
}
// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -150,6 +150,7 @@
// Support for compacting cms
HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
+ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
// Initialization helpers.
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.cpp
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -48,6 +48,8 @@
STWGCTimer* MarkSweep::_gc_timer = NULL;
SerialOldTracer* MarkSweep::_gc_tracer = NULL;
+GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL;
+
MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
@@ -165,3 +167,100 @@
}
#endif
+
+// (DCEVM) Copy the rescued objects to their destination address after compaction.
+void MarkSweep::copy_rescued_objects_back() {
+
+ if (_rescued_oops != NULL) {
+
+ for (int i=0; i<_rescued_oops->length(); i++) {
+ HeapWord* rescued_ptr = _rescued_oops->at(i);
+ oop rescued_obj = (oop) rescued_ptr;
+
+ int size = rescued_obj->size();
+ oop new_obj = rescued_obj->forwardee();
+
+ assert(rescued_obj->klass()->new_version() != NULL, "just checking");
+
+ if (rescued_obj->klass()->new_version()->update_information() != NULL) {
+ MarkSweep::update_fields(rescued_obj, new_obj);
+ } else {
+ rescued_obj->set_klass(rescued_obj->klass()->new_version());
+ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size);
+ }
+
+ FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size);
+
+ new_obj->init_mark();
+ assert(new_obj->is_oop(), "must be a valid oop");
+ }
+ _rescued_oops->clear();
+ _rescued_oops = NULL;
+ }
+}
+
+// (DCEVM) Update instances of a class whose fields changed.
+void MarkSweep::update_fields(oop q, oop new_location) {
+
+ assert(q->klass()->new_version() != NULL, "class of old object must have new version");
+
+ Klass* old_klass_oop = q->klass();
+ Klass* new_klass_oop = q->klass()->new_version();
+
+ InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop);
+ InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop);
+
+ int size = q->size_given_klass(old_klass);
+ int new_size = q->size_given_klass(new_klass);
+
+ HeapWord* tmp = NULL;
+ oop tmp_obj = q;
+
+ // Save object somewhere, there is an overlap in fields
+ if (new_klass_oop->is_copying_backwards()) {
+ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) ||
+ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) {
+ tmp = NEW_RESOURCE_ARRAY(HeapWord, size);
+ q = (oop) tmp;
+ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size);
+ }
+ }
+
+ q->set_klass(new_klass_oop);
+ int *cur = new_klass_oop->update_information();
+ assert(cur != NULL, "just checking");
+ MarkSweep::update_fields(new_location, q, cur);
+
+ if (tmp != NULL) {
+ FREE_RESOURCE_ARRAY(HeapWord, tmp, size);
+ }
+}
+
+void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) {
+ assert(cur != NULL, "just checking");
+ char* to = (char*)(HeapWord*)new_location;
+ while (*cur != 0) {
+ int size = *cur;
+ if (size > 0) {
+ cur++;
+ int offset = *cur;
+ HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset);
+ if (size == HeapWordSize) {
+ *((HeapWord*)to) = *from;
+ } else if (size == HeapWordSize * 2) {
+ *((HeapWord*)to) = *from;
+ *(((HeapWord*)to) + 1) = *(from + 1);
+ } else {
+ Copy::conjoint_jbytes(from, to, size);
+ }
+ to += size;
+ cur++;
+ } else {
+ assert(size < 0, "");
+ int skip = -*cur;
+ Copy::fill_to_bytes(to, skip, 0);
+ to += skip;
+ cur++;
+ }
+ }
+}
diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.hpp
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -96,8 +96,12 @@
friend class AdjustPointerClosure;
friend class KeepAliveClosure;
friend class VM_MarkSweep;
+ friend class GenMarkSweep;
friend void marksweep_init();
+public:
+ static GrowableArray<HeapWord*>* _rescued_oops;
+
//
// Vars
//
@@ -157,6 +161,9 @@
static inline void push_objarray(oop obj, size_t index);
+ static void copy_rescued_objects_back();
+ static void update_fields(oop q, oop new_location);
+ static void update_fields(oop new_location, oop tmp_obj, int *cur);
static void follow_stack(); // Empty marking stack.
static void follow_klass(Klass* klass);
diff -r 8f44f8a7e505 src/share/vm/memory/genMarkSweep.cpp
--- a/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -327,11 +327,16 @@
// in the same order in phase2, phase3 and phase4. We don't quite do that
// here (perm_gen first rather than last), so we tell the validate code
// to use a higher index (saved from phase2) when verifying perm_gen.
+ assert(_rescued_oops == NULL, "must be empty before processing");
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
trace("4");
+ MarkSweep::copy_rescued_objects_back();
+
GenCompactClosure blk;
gch->generation_iterate(&blk, true);
+
+ MarkSweep::copy_rescued_objects_back();
}
diff -r 8f44f8a7e505 src/share/vm/memory/space.cpp
--- a/src/share/vm/memory/space.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -377,9 +377,8 @@
_compaction_top = bottom();
}
-HeapWord* CompactibleSpace::forward(oop q, size_t size,
- CompactPoint* cp, HeapWord* compact_top) {
- // q is alive
+// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap.
+HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) {
// First check if we should switch compaction space
assert(this == cp->space, "'this' should be current compaction space.");
size_t compaction_max_size = pointer_delta(end(), compact_top);
@@ -399,8 +398,15 @@
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
}
+ return compact_top;
+}
+
+HeapWord* CompactibleSpace::forward(oop q, size_t size,
+ CompactPoint* cp, HeapWord* compact_top) {
+ compact_top = forward_compact_top(size, cp, compact_top);
+
// store the forwarding pointer into the mark word
- if ((HeapWord*)q != compact_top) {
+ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) {
q->forward_to(oop(compact_top));
assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
} else {
@@ -421,6 +427,58 @@
return compact_top;
}
+// Compute the forward sizes and leave out objects whose position could
+// possibly overlap other objects.
+HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size,
+ CompactPoint* cp, HeapWord* compact_top) {
+ size_t forward_size = size;
+
+ // (DCEVM) There is a new version of the class of q => different size
+ if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) {
+
+ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
+ assert(size != new_size, "instances without changed size have to be updated prior to GC run");
+ forward_size = new_size;
+ }
+
+ compact_top = forward_compact_top(forward_size, cp, compact_top);
+
+ if (must_rescue(oop(q), oop(compact_top))) {
+ if (MarkSweep::_rescued_oops == NULL) {
+ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
+ }
+ MarkSweep::_rescued_oops->append(q);
+ return compact_top;
+ }
+
+ return forward(oop(q), forward_size, cp, compact_top);
+}
+
+// Compute the forwarding addresses for the objects that need to be rescued.
+HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) {
+ // TODO: empty the _rescued_oops after ALL spaces are compacted!
+ if (MarkSweep::_rescued_oops != NULL) {
+ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) {
+ HeapWord* q = MarkSweep::_rescued_oops->at(i);
+
+ /* size_t size = oop(q)->size(); changing this for cms for perm gen */
+ size_t size = block_size(q);
+
+ // (DCEVM) There is a new version of the class of q => different size
+ if (oop(q)->klass()->new_version() != NULL) {
+ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
+ assert(size != new_size, "instances without changed size have to be updated prior to GC run");
+ size = new_size;
+ }
+
+ compact_top = cp->space->forward(oop(q), size, cp, compact_top);
+ assert(compact_top <= end(), "must not write over end of space!");
+ }
+ MarkSweep::_rescued_oops->clear();
+ MarkSweep::_rescued_oops = NULL;
+ }
+ return compact_top;
+}
bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord* q, size_t deadlength) {
@@ -442,12 +500,17 @@
#define adjust_obj_size(s) s
void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
+ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false);
}
// Faster object search.
void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
+ if (!Universe::is_redefining_gc_run()) {
+ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false);
+ } else {
+ // Redefinition run
+ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true);
+ }
}
void Space::adjust_pointers() {
@@ -485,6 +548,111 @@
assert(q == t, "just checking");
}
+
+#ifdef ASSERT
+
+int CompactibleSpace::space_index(oop obj) {
+ GenCollectedHeap* heap = GenCollectedHeap::heap();
+
+ //if (heap->is_in_permanent(obj)) {
+ // return -1;
+ //}
+
+ int index = 0;
+ for (int i = heap->n_gens() - 1; i >= 0; i--) {
+ Generation* gen = heap->get_gen(i);
+ CompactibleSpace* space = gen->first_compaction_space();
+ while (space != NULL) {
+ if (space->is_in_reserved(obj)) {
+ return index;
+ }
+ space = space->next_compaction_space();
+ index++;
+ }
+ }
+
+ tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj);
+ index = 0;
+ for (int i = heap->n_gens() - 1; i >= 0; i--) {
+ Generation* gen = heap->get_gen(i);
+ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end());
+
+ CompactibleSpace* space = gen->first_compaction_space();
+ while (space != NULL) {
+ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end());
+ space = space->next_compaction_space();
+ index++;
+ }
+ }
+
+ ShouldNotReachHere();
+ return 0;
+}
+#endif
+
+bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) {
+ // Only redefined objects can have the need to be rescued.
+ if (oop(old_obj)->klass()->new_version() == NULL) return false;
+
+ //if (old_obj->is_perm()) {
+ // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj.
+ // return true;
+ //}
+
+ int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version());
+ int original_size = old_obj->size();
+
+ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1);
+ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj);
+ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj);
+ if (old_in_tenured == new_in_tenured) {
+ // Rescue if object may overlap with a higher memory address.
+ bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size);
+ if (old_in_tenured) {
+ // Old and new address are in same space, so just compare the address.
+ // Must rescue if object moves towards the top of the space.
+ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space");
+ } else {
+ // In the new generation, eden is located before the from space, so a
+ // simple pointer comparison is sufficient.
+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
+ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result");
+ }
+ return overlap;
+
+ } else {
+ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces");
+ if (tenured_gen->is_in_reserved(new_obj)) {
+ // Must never rescue when moving from the new into the old generation.
+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
+ assert(space_index(old_obj) > space_index(new_obj), "must be");
+ return false;
+
+ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ {
+ // Must always rescue when moving from the old into the new generation.
+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
+ assert(space_index(old_obj) < space_index(new_obj), "must be");
+ return true;
+ }
+ }
+}
+
+HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) {
+ assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise");
+
+ int size = oop(old_obj)->size();
+ HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size);
+ Copy::aligned_disjoint_words(old_obj, rescued_obj, size);
+
+ if (MarkSweep::_rescued_oops == NULL) {
+ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
+ }
+
+ MarkSweep::_rescued_oops->append(rescued_obj);
+ return rescued_obj;
+}
+
void CompactibleSpace::adjust_pointers() {
// Check first is there is any work to do.
if (used() == 0) {
@@ -495,7 +663,12 @@
}
void CompactibleSpace::compact() {
- SCAN_AND_COMPACT(obj_size);
+ if(!Universe::is_redefining_gc_run()) {
+ SCAN_AND_COMPACT(obj_size, false);
+ } else {
+ // Redefinition run
+ SCAN_AND_COMPACT(obj_size, true)
+ }
}
void Space::print_short() const { print_short_on(tty); }
diff -r 8f44f8a7e505 src/share/vm/memory/space.hpp
--- a/src/share/vm/memory/space.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -392,6 +392,9 @@
// indicates when the next such action should be taken.
virtual void prepare_for_compaction(CompactPoint* cp);
// MarkSweep support phase3
+ DEBUG_ONLY(int space_index(oop obj));
+ bool must_rescue(oop old_obj, oop new_obj);
+ HeapWord* rescue(HeapWord* old_obj);
virtual void adjust_pointers();
// MarkSweep support phase4
virtual void compact();
@@ -421,6 +424,15 @@
// accordingly".
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
HeapWord* compact_top);
+ // (DCEVM) same as forwad, but can rescue objects. Invoked only during
+ // redefinition runs
+ HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp,
+ HeapWord* compact_top);
+
+ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top);
+
+ // (tw) Compute new compact top without actually forwarding the object.
+ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
// Return a size with adjusments as required of the space.
virtual size_t adjust_object_size_v(size_t size) const { return size; }
diff -r 8f44f8a7e505 src/share/vm/memory/space.inline.hpp
--- a/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -35,7 +35,7 @@
return block_start_const(p);
}
-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
+#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \
/* Compute the new addresses for the live objects and store it in the mark \
* Used by universe::mark_sweep_phase2() \
*/ \
@@ -93,7 +93,17 @@
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
size_t size = block_size(q); \
+ if (redefinition_run) { \
+ compact_top = cp->space->forward_with_rescue(q, size, \
+ cp, compact_top); \
+ if (q < first_dead && oop(q)->is_gc_marked()) { \
+ /* Was moved (otherwise, forward would reset mark), \
+ set first_dead to here */ \
+ first_dead = q; \
+ } \
+ } else { \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
+ } \
q += size; \
end_of_live = q; \
} else { \
@@ -142,6 +152,8 @@
} \
} \
\
+ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \
+ \
assert(q == t, "just checking"); \
if (liveRange != NULL) { \
liveRange->set_end(q); \
@@ -188,13 +200,8 @@
q += size; \
} \
\
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ This is funky. Using this to read the previously written \
- * LiveRange. See also use below. */ \
- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
- } \
+ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
+ q = _first_dead; \
} \
\
const intx interval = PrefetchScanIntervalInBytes; \
@@ -222,7 +229,7 @@
assert(q == t, "just checking"); \
}
-#define SCAN_AND_COMPACT(obj_size) { \
+#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
@@ -247,13 +254,9 @@
} \
) /* debug_only */ \
\
- if (_first_dead == t) { \
- q = t; \
- } else { \
- /* $$$ Funky */ \
- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
+ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
+ q = _first_dead; \
} \
- } \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
@@ -271,11 +274,34 @@
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
+ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \
+ rescue(q); \
+ debug_only(Copy::fill_to_words(q, size, 0)); \
+ q += size; \
+ continue; \
+ } \
+ \
/* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
/* copy object and reinit its mark */ \
- assert(q != compaction_top, "everything in this pass should be moving"); \
+ assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \
+ "everything in this pass should be moving"); \
+ if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \
+ Klass* new_version = oop(q)->klass()->new_version(); \
+ if (new_version->update_information() == NULL) { \
+ Copy::aligned_conjoint_words(q, compaction_top, size); \
+ oop(compaction_top)->set_klass(new_version); \
+ } else { \
+ MarkSweep::update_fields(oop(q), oop(compaction_top)); \
+ } \
+ oop(compaction_top)->init_mark(); \
+ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
+ \
+ debug_only(prev_q = q); \
+ q += size; \
+ continue; \
+ } \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
diff -r 8f44f8a7e505 src/share/vm/memory/universe.cpp
--- a/src/share/vm/memory/universe.cpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/universe.cpp Tue Mar 31 18:05:19 2015 -0700
@@ -84,6 +84,8 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+bool Universe::_is_redefining_gc_run = false;
+
// Known objects
Klass* Universe::_boolArrayKlassObj = NULL;
Klass* Universe::_byteArrayKlassObj = NULL;
diff -r 8f44f8a7e505 src/share/vm/memory/universe.hpp
--- a/src/share/vm/memory/universe.hpp Tue Mar 31 18:01:20 2015 -0700
+++ b/src/share/vm/memory/universe.hpp Tue Mar 31 18:05:19 2015 -0700
@@ -251,7 +251,13 @@
static void compute_verify_oop_data();
+ static bool _is_redefining_gc_run;
+
public:
+
+ static bool is_redefining_gc_run() { return _is_redefining_gc_run; }
+ static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; }
+
// Known classes in the VM
static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; }
static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; }

+ 226
- 257
hotspot/.hg/patches/light-jdk8u20-b22.patch
File diff suppressed because it is too large
View File


+ 3681
- 0
hotspot/.hg/patches/light-jdk8u40-b25.patch
File diff suppressed because it is too large
View File


+ 7
- 4
hotspot/.hg/patches/series View File

@@ -4,12 +4,15 @@ distro-name.patch
# Add AllowEnhancedRedefinition argument
arguments-java8.patch #+light-jdk8u5-b13 #+light-jdk8u20-b22
arguments-java8u31.patch #+light-jdk8u31-b13
arguments-java8u40.patch #+light-jdk8u40-b25

# GC changes to allow modifying instances during redefinition run
gc-java8.patch #+jdk8
gc-java8.patch #+light-jdk8u5-b13 #+light-jdk8u20-b22 #+light-jdk8u31-b13
gc-java8u40.patch #+light-jdk8u40-b25

# Add support for certain DMH implementations
dmh-field-accessors-java8.patch #+jdk8
dmh-field-accessors-java8.patch #+light-jdk8u5-b13 #+light-jdk8u20-b22 #+light-jdk8u31-b13
dmh-field-accessors-java8u40.patch #+light-jdk8u40-b25

# Rest of the changes
full-jdk7u11-b21.patch #+full-jdk7u11-b21
@@ -33,6 +36,6 @@ full-jdk7u60-deopt-cp.patch #+full-jdk7u60-b09 #+full-jdk7u71-b01
light-jdk8u5-b13.patch #+light-jdk8u5-b13

light-jdk8u20-b22.patch #+light-jdk8u20-b22 #+light-jdk8u31-b13
light-jdk8u20-deopt-cp.patch #+light-jdk8u20-b22 #+light-jdk8u31-b13
light-jdk8u40-b25.patch #+light-jdk8u40-b25
light-jdk8u20-deopt-cp.patch #+light-jdk8u20-b22 #+light-jdk8u31-b13 #+light-jdk8u40-b25


Loading…
Cancel
Save