diff options
author | Ivan Dubrov <idubrov@guidewire.com> | 2014-04-30 14:49:27 -0700 |
---|---|---|
committer | Ivan Dubrov <idubrov@guidewire.com> | 2014-04-30 14:49:27 -0700 |
commit | b5cdfa25d3e04841d2a90ae6d792a2672745e3b7 (patch) | |
tree | 56a928e644ff05acc392a35d170dd64ded4b0e0b /hotspot | |
parent | 615430e4e81681cbb25f831c6b0b4add1bfe324d (diff) | |
download | dcevm-b5cdfa25d3e04841d2a90ae6d792a2672745e3b7.tar.gz dcevm-b5cdfa25d3e04841d2a90ae6d792a2672745e3b7.zip |
Migrating to Mercurial MQ
Diffstat (limited to 'hotspot')
-rw-r--r-- | hotspot/.hg/patches/arguments-java8.patch | 57 | ||||
-rw-r--r-- | hotspot/.hg/patches/distro-name.patch | 13 | ||||
-rw-r--r-- | hotspot/.hg/patches/full-jdk7u11-b21.patch | 12076 | ||||
-rw-r--r-- | hotspot/.hg/patches/full-jdk7u45-b08.patch | 12034 | ||||
-rw-r--r-- | hotspot/.hg/patches/full-jdk7u51-b13.patch | 12233 | ||||
-rw-r--r-- | hotspot/.hg/patches/gc-java8.patch | 613 | ||||
-rw-r--r-- | hotspot/.hg/patches/light-jdk7u40-b43.patch | 10126 | ||||
-rw-r--r-- | hotspot/.hg/patches/light-jdk7u51-b13.patch | 10126 | ||||
-rw-r--r-- | hotspot/.hg/patches/light-jdk8u5-b13.patch | 886 | ||||
-rw-r--r-- | hotspot/.hg/patches/series | 16 |
10 files changed, 57393 insertions, 787 deletions
diff --git a/hotspot/.hg/patches/arguments-java8.patch b/hotspot/.hg/patches/arguments-java8.patch new file mode 100644 index 00000000..fc505020 --- /dev/null +++ b/hotspot/.hg/patches/arguments-java8.patch @@ -0,0 +1,57 @@ +Adds AllowEnhancedClassRedefinition argument. +diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp +--- a/src/share/vm/runtime/arguments.cpp ++++ b/src/share/vm/runtime/arguments.cpp +@@ -59,8 +59,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif // INCLUDE_ALL_GCS + +-// Note: This is a special bug reporting site for the JVM +-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" ++// (DCEVM) The DCE VM has its own JIRA bug tracking system. ++#define DEFAULT_VENDOR_URL_BUG "https://github.com/Guidewire/DCEVM/issues" + #define DEFAULT_JAVA_LAUNCHER "generic" + + // Disable options not supported in this release, with a warning if they +@@ -1507,6 +1507,10 @@ + + void Arguments::set_ergonomics_flags() { + ++ if (AllowEnhancedClassRedefinition) { ++ // (DCEVM) enforces serial GC ++ FLAG_SET_ERGO(bool, UseSerialGC, true); ++ } + if (os::is_server_class_machine()) { + // If no other collector is requested explicitly, + // let the VM select the collector based on +@@ -1948,6 +1952,17 @@ + if (UseConcMarkSweepGC || UseParNewGC) i++; + if (UseParallelGC || UseParallelOldGC) i++; + if (UseG1GC) i++; ++ ++ if (AllowEnhancedClassRedefinition) { ++ // (DCEVM) Must use serial GC. This limitation applies because the instance size changing GC modifications ++ // are only built into the mark and compact algorithm. ++ if (!UseSerialGC && i >= 1) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Must use the serial GC in the DCEVM\n"); ++ status = false; ++ } ++ } ++ + if (i > 1) { + jio_fprintf(defaultStream::error_stream(), + "Conflicting collector combinations in option list; " +diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp +--- a/src/share/vm/runtime/globals.hpp ++++ b/src/share/vm/runtime/globals.hpp +@@ -1273,6 +1273,9 @@ + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, AllowEnhancedClassRedefinition, true, \ ++ "Allow enhanced class redefinition beyond swapping method bodies")\ ++ \ + develop(bool, StressMethodComparator, false, \ + "Run the MethodComparator on all loaded methods") \ + \ diff --git a/hotspot/.hg/patches/distro-name.patch b/hotspot/.hg/patches/distro-name.patch new file mode 100644 index 00000000..62e5edca --- /dev/null +++ b/hotspot/.hg/patches/distro-name.patch @@ -0,0 +1,13 @@ +Changes name of VM. +diff -r 8a6717910608 make/openjdk_distro +--- a/make/openjdk_distro Tue Mar 11 13:02:13 2014 -0700 ++++ b/make/openjdk_distro Wed Apr 30 11:27:18 2014 -0700 +@@ -27,6 +27,6 @@ + # + + # Don't put quotes (fail windows build). +-HOTSPOT_VM_DISTRO=OpenJDK ++HOTSPOT_VM_DISTRO=Dynamic Code Evolution + COMPANY_NAME= + PRODUCT_NAME=OpenJDK + diff --git a/hotspot/.hg/patches/full-jdk7u11-b21.patch b/hotspot/.hg/patches/full-jdk7u11-b21.patch new file mode 100644 index 00000000..76a16fcd --- /dev/null +++ b/hotspot/.hg/patches/full-jdk7u11-b21.patch @@ -0,0 +1,12076 @@ +diff --git a/src/cpu/x86/vm/templateTable_x86_32.cpp b/src/cpu/x86/vm/templateTable_x86_32.cpp +--- a/src/cpu/x86/vm/templateTable_x86_32.cpp ++++ b/src/cpu/x86/vm/templateTable_x86_32.cpp +@@ -2109,6 +2109,22 @@ + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic : // fall through + case Bytecodes::_putstatic : // fall through + case Bytecodes::_getfield : // fall through +@@ -2203,6 +2219,7 @@ + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, + Register index, ++ int byte_no, + bool is_static, + bool has_tos) { + if (JvmtiExport::can_post_field_access()) { +@@ -2229,7 +2246,11 @@ + // cache: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), + rax, cache); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2250,7 +2271,7 @@ + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) pop_and_check_object(obj); +@@ -2385,7 +2406,7 @@ + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); + +@@ -2443,7 +2464,11 @@ + // rcx: jvalue object on the stack + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), + rbx, rax, rcx); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // (tw) Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2459,7 +2484,7 @@ + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). +@@ -2697,6 +2722,11 @@ + // rax,: cache entry pointer + // rcx: jvalue object on the stack + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx); ++ ++ // (tw) Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(2, noreg, rax, rcx, sizeof(u2)); ++ + if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value + __ pop(rax); // restore lower value + __ addptr(rsp, sizeof(jvalue)); // release jvalue object space +@@ -2817,6 +2847,11 @@ + // rcx: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -2987,6 +3022,26 @@ + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::oldMethodBit)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + // Keep recv in rcx for callee expects it there +@@ -3068,6 +3123,31 @@ + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ //__ movl(rax, rdx); ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::oldMethodBit)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Get receiver klass into rdx - also a null check ++ __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); ++ __ verify_oop(rdx); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), rcx, rax, rbx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, rsi, rdi); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore rdi + __ load_klass(rdx, rcx); +diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp +--- a/src/cpu/x86/vm/templateTable_x86_64.cpp ++++ b/src/cpu/x86/vm/templateTable_x86_64.cpp +@@ -2145,6 +2145,22 @@ + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic: + case Bytecodes::_putstatic: + case Bytecodes::_getfield: +@@ -2251,7 +2267,7 @@ + // The registers cache and index expected to be set before call. + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, Register index, +- bool is_static, bool has_tos) { ++ int byte_no, bool is_static, bool has_tos) { + // do the JVMTI work here to avoid disturbing the register state below + // We use c_rarg registers here because we want to use the register used in + // the call to the VM +@@ -2282,7 +2298,11 @@ + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2304,7 +2324,7 @@ + const Register bc = c_rarg3; // uses same reg as obj, so don't mix them + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) { +@@ -2438,7 +2458,7 @@ + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + transition(vtos, vtos); + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); +@@ -2491,7 +2511,11 @@ + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2507,7 +2531,7 @@ + const Register bc = c_rarg3; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // [jk] not needed currently +@@ -2715,6 +2739,11 @@ + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + rbx, c_rarg2, c_rarg3); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(2, noreg, rax, rcx, sizeof(u2)); ++ + __ pop(rax); // restore lower value + __ addptr(rsp, sizeof(jvalue)); // release jvalue object space + __ bind(L2); +@@ -2815,6 +2844,11 @@ + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -3030,6 +3064,26 @@ + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::oldMethodBit)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +@@ -3113,6 +3167,35 @@ + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::oldMethodBit)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ // DCEVM: FIXME: What exactly should we store here? ++ __ push(rcx); // destroyed by Linux arguments passing conventions ++ __ movptr(r14, rcx); ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), r14, rax, rbx); ++ __ pop(rcx); ++ ++ // Get receiver klass into rdx - also a null check ++ __ restore_locals(); // restore r14 ++ __ load_klass(rdx, rcx); ++ __ verify_oop(rdx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, r13, r14); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore r14 + __ load_klass(rdx, rcx); +diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp +--- a/src/share/vm/c1/c1_Compilation.hpp ++++ b/src/share/vm/c1/c1_Compilation.hpp +@@ -239,8 +239,8 @@ + #define BAILOUT(msg) { bailout(msg); return; } + #define BAILOUT_(msg, res) { bailout(msg); return res; } + +-#define CHECK_BAILOUT() { if (bailed_out()) return; } +-#define CHECK_BAILOUT_(res) { if (bailed_out()) return res; } ++#define CHECK_BAILOUT() { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return; } ++#define CHECK_BAILOUT_(res) { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return res; } + + + class InstructionMark: public StackObj { +diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp +--- a/src/share/vm/ci/ciEnv.cpp ++++ b/src/share/vm/ci/ciEnv.cpp +@@ -1181,3 +1181,11 @@ + // If memory is low, we stop compiling methods. + record_method_not_compilable("out of memory"); + } ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciEnv::cleanup_after_redefinition() { ++ ++ if (_factory != NULL) { ++ _factory->cleanup_after_redefinition(); ++ } ++} +diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp +--- a/src/share/vm/ci/ciEnv.hpp ++++ b/src/share/vm/ci/ciEnv.hpp +@@ -419,6 +419,8 @@ + void record_failure(const char* reason); + void record_method_not_compilable(const char* reason, bool all_tiers = true); + void record_out_of_memory_failure(); ++ ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIENV_HPP +diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp +--- a/src/share/vm/ci/ciObjectFactory.cpp ++++ b/src/share/vm/ci/ciObjectFactory.cpp +@@ -294,6 +294,11 @@ + // into the table. We need to recompute our index. + index = find(keyHandle(), _ci_objects); + } ++ ++ if (is_found_at(index, keyHandle(), _ci_objects)) { ++ // DCEVM: Check if this is an error? Can occur when redefining classes. ++ return _ci_objects->at(index); ++ } + assert(!is_found_at(index, keyHandle(), _ci_objects), "no double insert"); + insert(index, new_object, _ci_objects); + return new_object; +@@ -758,3 +763,50 @@ + _unloaded_instances->length(), + _unloaded_klasses->length()); + } ++ ++// DCEVM: Resoring the ciObject arrays after class redefinition ++void ciObjectFactory::sort_ci_objects(GrowableArray<ciObject*>* objects) { ++ ++ // Resort the _ci_objects array. The order of two class pointers can be changed during class redefinition. ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ if (last >= o) { ++ int cur_last_index = j - 1; ++ oop cur_last = last; ++ while (cur_last >= o) { ++ ++ // Swap the two objects to guarantee ordering ++ ciObject *tmp = objects->at(cur_last_index); ++ objects->at_put(cur_last_index, objects->at(cur_last_index + 1)); ++ objects->at_put(cur_last_index + 1, tmp); ++ ++ // Decrement index to move one step to the left ++ cur_last_index--; ++ if (cur_last_index < 0) { ++ break; ++ } ++ cur_last = objects->at(cur_last_index)->get_oop(); ++ } ++ } else { ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++ ++#ifdef ASSERT ++ if (CIObjectFactoryVerify) { ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++#endif // ASSERT ++} ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciObjectFactory::cleanup_after_redefinition() { ++ sort_ci_objects(_ci_objects); ++} +diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp +--- a/src/share/vm/ci/ciObjectFactory.hpp ++++ b/src/share/vm/ci/ciObjectFactory.hpp +@@ -38,6 +38,7 @@ + class ciObjectFactory : public ResourceObj { + friend class VMStructs; + friend class ciEnv; ++ friend class CompileBroker; + + private: + static volatile bool _initialized; +@@ -137,6 +138,11 @@ + + void print_contents(); + void print(); ++ ++private: ++ ++ static void sort_ci_objects(GrowableArray<ciObject*>* objects); ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP +diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp +--- a/src/share/vm/classfile/classFileParser.cpp ++++ b/src/share/vm/classfile/classFileParser.cpp +@@ -787,6 +787,7 @@ + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS) { + ClassFileStream* cfs = stream(); + assert(length > 0, "only called for length>0"); +@@ -805,6 +806,9 @@ + interface_index, CHECK_(nullHandle)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); ++ if (!old_klass.is_null() && !interf->is_newest_version()) { ++ interf = KlassHandle(THREAD, interf->newest_version()); ++ } + } else { + Symbol* unresolved_klass = cp->klass_name_at(interface_index); + +@@ -817,6 +821,9 @@ + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + interf = KlassHandle(THREAD, k); + + if (LinkWellKnownClasses) // my super type is well known to me +@@ -1705,6 +1712,8 @@ + int runtime_invisible_parameter_annotations_length = 0; + u1* annotation_default = NULL; + int annotation_default_length = 0; ++ u2 code_section_table_length; ++ typeArrayHandle code_section_table; + + // Parse code and exceptions attribute + u2 method_attributes_count = cfs->get_u2_fast(); +@@ -1874,6 +1883,24 @@ + parse_stackmap_table(code_attribute_length, CHECK_(nullHandle)); + stackmap_data = typeArrayHandle(THREAD, sm); + parsed_stackmap_attribute = true; ++ } else if (UseMethodForwardPoints && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_code_sections()) { ++ int length = code_attribute_length; ++ int value_count = length / sizeof(u2); ++ int line_count = length / 3; ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Found code section attribute when loading class with %d entries", value_count, line_count); ++ } ++ code_section_table_length = value_count; ++ code_section_table = oopFactory::new_permanent_shortArray(value_count, CHECK_NULL); ++ code_section_table->set_length(value_count); ++ ++ for (int i = 0; i < value_count; ++i) { ++ u2 value = cfs->get_u2(CHECK_(nullHandle)); ++ code_section_table->short_at_put(i, value); ++ if (TraceRedefineClasses >= 4) { ++ tty->print_cr("Code section table at %d: %d", i, value); ++ } ++ } + } else { + // Skip unknown attributes + cfs->skip_u1(code_attribute_length, CHECK_(nullHandle)); +@@ -1994,6 +2021,18 @@ + } + #endif + ++ // DCEVM: TODO: Get a different solution for the problem of method forward ++ // points and variable sized interpreter frames. ++ if (UseMethodForwardPoints) { ++ if (max_stack > MethodForwardPointsMaxStack) { ++ fatal(err_msg("Method has too large stack (%d), increase the value of MethodForwardPointsMaxStack (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ if (max_locals > MethodForwardPointsMaxLocals) { ++ fatal(err_msg("Method has too many locals (%d), increase the value of MethodForwardPointsMaxLocals (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ max_stack = MethodForwardPointsMaxStack; ++ max_locals = MethodForwardPointsMaxLocals; ++ } + // Fill in code attribute information + m->set_max_stack(max_stack); + m->set_max_locals(max_locals); +@@ -2008,6 +2047,8 @@ + */ + m->set_exception_table(exception_handlers()); + ++ m->constMethod()->set_code_section_table(code_section_table()); ++ + // Copy byte codes + m->set_code(code_start); + +@@ -2543,6 +2584,24 @@ + "Invalid Deprecated classfile attribute length %u in class file %s", + attribute_length, CHECK); + } ++ // DCEVM: Check for deleted field attribute ++ } else if (tag == vmSymbols::tag_field_redefinition_policy()) { ++ ++ char field_redefinition_policy = cfs->get_u1_fast(); ++ k->set_field_redefinition_policy(field_redefinition_policy); ++ ++ // DCEVM: Check for deleted static field attribute ++ } else if (tag == vmSymbols::tag_static_field_redefinition_policy()) { ++ ++ char static_field_redefinition_policy = cfs->get_u1_fast(); ++ k->set_static_field_redefinition_policy(static_field_redefinition_policy); ++ ++ // DCEVM: Check for deleted method attribute ++ } else if (tag == vmSymbols::tag_method_redefinition_policy()) { ++ ++ char method_redefinition_policy = cfs->get_u1_fast(); ++ k->set_method_redefinition_policy(method_redefinition_policy); ++ + } else if (_major_version >= JAVA_1_5_VERSION) { + if (tag == vmSymbols::tag_signature()) { + if (attribute_length != 2) { +@@ -2634,9 +2693,126 @@ + } + + ++// DCEVM: Finds the super symbols by reading the bytes of the class and returns ++// them in a growable array. ++void ClassFileParser::findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS) { ++ ++ _cp_patches = NULL; ++ // So that JVMTI can cache class file in the state before retransformable agents ++ // have modified it ++ unsigned char *cached_class_file_bytes = NULL; ++ ++ ClassFileStream* cfs = stream(); ++ ++ _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false; ++ ++ instanceKlassHandle nullHandle; ++ ++ // Save the class file name for easier error message printing. ++ _class_name = name != NULL ? name : vmSymbols::unknown_class_name(); ++ ++ cfs->guarantee_more(8, CHECK); // magic, major, minor ++ // Magic value ++ u4 magic = cfs->get_u4_fast(); ++ if (magic != JAVA_CLASSFILE_MAGIC) { ++ // Invalid class file! ++ return; ++ } ++ ++ // Version numbers ++ u2 minor_version = cfs->get_u2_fast(); ++ u2 major_version = cfs->get_u2_fast(); ++ ++ // Check version numbers - we check this even with verifier off ++ if (!is_supported_version(major_version, minor_version)) { ++ ++ // Unsupported version! ++ return; ++ } ++ ++ _major_version = major_version; ++ _minor_version = minor_version; ++ ++ ++ // Check if verification needs to be relaxed for this class file ++ // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376) ++ _relax_verify = Verifier::relax_verify_for(class_loader()); ++ _need_verify = false; ++ ++ // Constant pool ++ constantPoolHandle cp = parse_constant_pool(CHECK); ++ int cp_size = cp->length(); ++ ++ cfs->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len ++ ++ // Access flags ++ AccessFlags access_flags; ++ jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; ++ ++ if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { ++ // Set abstract bit for old class files for backward compatibility ++ flags |= JVM_ACC_ABSTRACT; ++ } ++ access_flags.set_flags(flags); ++ ++ // This class and superclass ++ instanceKlassHandle super_klass; ++ u2 this_class_index = cfs->get_u2_fast(); ++ check_property( ++ valid_cp_range(this_class_index, cp_size) && ++ cp->tag_at(this_class_index).is_unresolved_klass(), ++ "Invalid this class index %u in constant pool in class file %s", ++ this_class_index, CHECK); ++ ++ Symbol* class_name = cp->unresolved_klass_at(this_class_index); ++ assert(class_name != NULL, "class_name can't be null"); ++ ++ // Update _class_name which could be null previously to be class_name ++ _class_name = class_name; ++ ++ // DCEVM: DO NOT release all handles when parsing is done ++ {// HandleMark hm(THREAD); ++ ++ // Checks if name in class file matches requested name ++ if (name != NULL && class_name != name) { ++ return; ++ } ++ ++ u2 super_class_index = cfs->get_u2_fast(); ++ ++ if (super_class_index != 0) { ++ Symbol* super_class = cp->klass_name_at(super_class_index); ++ handles.append(super_class); ++ } else { ++ // DCEVM: This redefinition must be for the Object class. ++ } ++ ++ // Interfaces ++ u2 itfs_len = cfs->get_u2_fast(); ++ objArrayHandle local_interfaces; ++ if (itfs_len == 0) { ++ local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); ++ } else { ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK); ++ } ++ ++ for (int i=0; i<local_interfaces->length(); i++) { ++ oop o = local_interfaces->obj_at(i); ++ Symbol* interface_handle = ((klassOop)o)->klass_part()->name(); ++ handles.append(interface_handle); ++ } ++ } ++} ++ + instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, +@@ -2688,10 +2864,13 @@ + unsigned char* ptr = cfs->buffer(); + unsigned char* end_ptr = cfs->buffer() + cfs->length(); + ++ bool pretend_new_universe = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, + &ptr, &end_ptr, + &cached_class_file_bytes, + &cached_class_file_length); ++ Thread::current()->set_pretend_new_universe(pretend_new_universe); + + if (ptr != cfs->buffer()) { + // JVMTI agent has modified class file data. +@@ -2847,7 +3026,11 @@ + // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { +- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); ++ klassOop resolved_klass = cp->resolved_klass_at(super_class_index); ++ if (!old_klass.is_null()) { ++ resolved_klass = resolved_klass->klass_part()->newest_version(); ++ } ++ super_klass = instanceKlassHandle(THREAD, resolved_klass); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { +@@ -2865,7 +3048,7 @@ + if (itfs_len == 0) { + local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); + } else { +- local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle)); ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_(nullHandle)); + } + + u2 java_fields_count = 0; +@@ -2912,7 +3095,9 @@ + protection_domain, + true, + CHECK_(nullHandle)); +- ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + KlassHandle kh (THREAD, k); + super_klass = instanceKlassHandle(THREAD, kh()); + if (LinkWellKnownClasses) // my super class is well known to me +@@ -3303,6 +3488,19 @@ + rt = REF_NONE; + } else { + rt = super_klass->reference_type(); ++ ++ // DCEVM: With class redefinition, it can also happen that special classes are loaded. ++ if (name == vmSymbols::java_lang_ref_Reference()) { ++ rt = REF_OTHER; ++ } else if (name == vmSymbols::java_lang_ref_SoftReference()) { ++ rt = REF_SOFT; ++ } else if (name == vmSymbols::java_lang_ref_WeakReference()) { ++ rt = REF_WEAK; ++ } else if (name == vmSymbols::java_lang_ref_FinalReference()) { ++ rt = REF_FINAL; ++ } else if (name == vmSymbols::java_lang_ref_PhantomReference()) { ++ rt = REF_PHANTOM; ++ } + } + + // We can now create the basic klassOop for this klass +@@ -3402,7 +3600,7 @@ + // Do final class setup + fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts); + +- set_precomputed_flags(this_klass); ++ set_precomputed_flags(this_klass, old_klass); + + // reinitialize modifiers, using the InnerClasses attribute + int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle)); +@@ -3425,6 +3623,10 @@ + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + ++ if (rt == REF_OTHER) { ++ instanceRefKlass::update_nonstatic_oop_maps(ik); ++ } ++ + ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), + false /* not shared class */); + +@@ -3567,7 +3769,7 @@ + } + + +-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { ++void ClassFileParser::set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass) { + klassOop super = k->super(); + + // Check if this klass has an empty finalize method (i.e. one with return bytecode only), +@@ -3575,7 +3777,9 @@ + if (!_has_empty_finalizer) { + if (_has_finalizer || + (super != NULL && super->klass_part()->has_finalizer())) { +- k->set_has_finalizer(); ++ if (old_klass.is_null() || old_klass->has_finalizer()) { ++ k->set_has_finalizer(); ++ } + } + } + +@@ -3591,7 +3795,7 @@ + + // Check if this klass supports the java.lang.Cloneable interface + if (SystemDictionary::Cloneable_klass_loaded()) { +- if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) { ++ if (k->is_subtype_of(SystemDictionary::Cloneable_klass()) || k->is_subtype_of(SystemDictionary::Cloneable_klass()->klass_part()->newest_version())) { + k->set_is_cloneable(); + } + } +diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp +--- a/src/share/vm/classfile/classFileParser.hpp ++++ b/src/share/vm/classfile/classFileParser.hpp +@@ -78,6 +78,7 @@ + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS); + + // Field parsing +@@ -151,7 +152,7 @@ + unsigned int nonstatic_oop_map_count, + int* nonstatic_oop_offsets, + unsigned int* nonstatic_oop_counts); +- void set_precomputed_flags(instanceKlassHandle k); ++ void set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass); + objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, + objArrayHandle local_ifs, TRAPS); + +@@ -263,21 +264,33 @@ + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { + KlassHandle no_host_klass; +- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD); ++ return parseClassFile(name, class_loader, protection_domain, old_klass, no_host_klass, NULL, parsed_name, verify, THREAD); + } + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, + bool verify, + TRAPS); + ++ static void initialize_static_field(fieldDescriptor* fd, TRAPS); ++ ++ // DCEVM: Creates symbol handles for the super class and the interfaces ++ void findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS); ++ + // Verifier checks + static void check_super_class_access(instanceKlassHandle this_klass, TRAPS); + static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS); +diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp +--- a/src/share/vm/classfile/classLoader.cpp ++++ b/src/share/vm/classfile/classLoader.cpp +@@ -915,6 +915,7 @@ + instanceKlassHandle result = parser.parseClassFile(h_name, + class_loader, + protection_domain, ++ KlassHandle(), + parsed_name, + false, + CHECK_(h)); +diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp +--- a/src/share/vm/classfile/dictionary.cpp ++++ b/src/share/vm/classfile/dictionary.cpp +@@ -326,6 +326,21 @@ + } + } + ++ ++// DCEVM: Just the classes from defining class loaders ++void Dictionary::classes_do(ObjectClosure *closure) { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* probe = bucket(index); ++ probe != NULL; ++ probe = probe->next()) { ++ klassOop k = probe->klass(); ++ if (probe->loader() == instanceKlass::cast(k)->class_loader()) { ++ closure->do_object(k); ++ } ++ } ++ } ++} ++ + // Added for initialize_itable_for_klass to handle exceptions + // Just the classes from defining class loaders + void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) { +@@ -433,6 +448,33 @@ + add_entry(index, entry); + } + ++// DCEVM: Updates the klass entry to point to the new klassOop. Necessary only for class redefinition. ++bool Dictionary::update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class) { ++ ++ // There are several entries for the same class in the dictionary: One extra entry for each parent classloader of the classloader of the class. ++ bool found = false; ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass() == old_class()) { ++ entry->set_literal(k()); ++ found = true; ++ } ++ } ++ } ++ ++ return found; ++} ++ ++// DCEVM: Undo previous updates to the system dictionary ++void Dictionary::rollback_redefinition() { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass()->klass_part()->is_redefining()) { ++ entry->set_literal(entry->klass()->klass_part()->old_version()); ++ } ++ } ++ } ++} + + // This routine does not lock the system dictionary. + // +@@ -459,12 +501,22 @@ + return NULL; + } + ++// DCEVM: return old version if we are not in the new universe? ++klassOop Dictionary::intercept_for_version(klassOop k) { ++ if (k == NULL) return k; ++ ++ if (k->klass_part()->is_redefining() && !Thread::current()->pretend_new_universe()) { ++ return k->klass_part()->old_version(); ++ } ++ ++ return k; ++} + + klassOop Dictionary::find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS) { + DictionaryEntry* entry = get_entry(index, hash, name, loader); + if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { +- return entry->klass(); ++ return intercept_for_version(entry->klass()); + } else { + return NULL; + } +@@ -477,7 +529,7 @@ + assert (index == index_for(name, loader), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, loader); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +@@ -489,7 +541,7 @@ + assert (index == index_for(name, Handle()), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, Handle()); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp +--- a/src/share/vm/classfile/dictionary.hpp ++++ b/src/share/vm/classfile/dictionary.hpp +@@ -73,6 +73,10 @@ + + void add_klass(Symbol* class_name, Handle class_loader,KlassHandle obj); + ++ bool update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class); ++ ++ void rollback_redefinition(); ++ + klassOop find_class(int index, unsigned int hash, + Symbol* name, Handle loader); + +@@ -89,6 +93,7 @@ + void classes_do(void f(klassOop, TRAPS), TRAPS); + void classes_do(void f(klassOop, oop)); + void classes_do(void f(klassOop, oop, TRAPS), TRAPS); ++ void classes_do(ObjectClosure *closure); + + void methods_do(void f(methodOop)); + +@@ -105,6 +110,7 @@ + bool do_unloading(BoolObjectClosure* is_alive); + + // Protection domains ++ static klassOop intercept_for_version(klassOop k); + klassOop find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS); + bool is_valid_protection_domain(int index, unsigned int hash, +diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp +--- a/src/share/vm/classfile/javaClasses.cpp ++++ b/src/share/vm/classfile/javaClasses.cpp +@@ -1783,7 +1783,7 @@ + klassOop klass = SystemDictionary::reflect_Method_klass(); + // This class is eagerly initialized during VM initialization, since we keep a refence + // to one of the methods +- assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized"); ++ assert(instanceKlass::cast(klass)->is_initialized() || klass->klass_part()->old_version() != NULL, "must be initialized"); + return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH); + } + +diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp +--- a/src/share/vm/classfile/javaClasses.hpp ++++ b/src/share/vm/classfile/javaClasses.hpp +@@ -213,7 +213,6 @@ + + class java_lang_Class : AllStatic { + friend class VMStructs; +- + private: + // The fake offsets are added by the class loader when java.lang.Class is loaded + +diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfile/loaderConstraints.cpp +--- a/src/share/vm/classfile/loaderConstraints.cpp ++++ b/src/share/vm/classfile/loaderConstraints.cpp +@@ -449,7 +449,7 @@ + if (k != NULL) { + // We found the class in the system dictionary, so we should + // make sure that the klassOop matches what we already have. +- guarantee(k == probe->klass(), "klass should be in dictionary"); ++ guarantee(k == probe->klass()->klass_part()->newest_version(), "klass should be in dictionary"); + } else { + // If we don't find the class in the system dictionary, it + // has to be in the placeholders table. +diff --git a/src/share/vm/classfile/loaderConstraints.hpp b/src/share/vm/classfile/loaderConstraints.hpp +--- a/src/share/vm/classfile/loaderConstraints.hpp ++++ b/src/share/vm/classfile/loaderConstraints.hpp +@@ -106,7 +106,7 @@ + + klassOop klass() { return literal(); } + klassOop* klass_addr() { return literal_addr(); } +- void set_klass(klassOop k) { set_literal(k); } ++ void set_klass(klassOop k) { set_literal(k); assert(k == NULL || !k->klass_part()->is_redefining(), "just checking"); } + + LoaderConstraintEntry* next() { + return (LoaderConstraintEntry*)HashtableEntry<klassOop>::next(); +diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp +--- a/src/share/vm/classfile/systemDictionary.cpp ++++ b/src/share/vm/classfile/systemDictionary.cpp +@@ -151,6 +151,7 @@ + // can return a null klass + klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); + } ++ assert(klass == NULL || klass->klass_part()->is_newest_version() || klass->klass_part()->newest_version()->klass_part()->is_redefining(), "must be"); + return klass; + } + +@@ -193,7 +194,8 @@ + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), "Can not load classes with the Compiler thread"); ++ // DCEVM: Check if this relaxing of the condition is correct? Test case hs203t004 failing otherwise. ++ assert(!THREAD->is_Compiler_thread() || JvmtiThreadState::state_for(JavaThread::current())->get_class_being_redefined() != NULL, "Can not load classes with the Compiler thread"); + if (FieldType::is_array(class_name)) { + return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL); + } else if (FieldType::is_obj(class_name)) { +@@ -997,6 +999,7 @@ + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ KlassHandle(), + host_klass, + cp_patches, + parsed_name, +@@ -1056,8 +1059,15 @@ + Handle protection_domain, + ClassFileStream* st, + bool verify, ++ KlassHandle old_class, + TRAPS) { + ++ bool redefine_classes_locked = false; ++ if (!Thread::current()->redefine_classes_mutex()->owned_by_self()) { ++ Thread::current()->redefine_classes_mutex()->lock(); ++ redefine_classes_locked = true; ++ } ++ + // Classloaders that support parallelism, e.g. bootstrap classloader, + // or all classloaders with UnsyncloadClass do not acquire lock here + bool DoObjectLock = true; +@@ -1085,9 +1095,14 @@ + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ old_class, + parsed_name, + verify, + THREAD); ++ if (!old_class.is_null() && !k.is_null()) { ++ k->set_redefining(true); ++ k->set_old_version(old_class()); ++ } + + const char* pkg = "java/"; + if (!HAS_PENDING_EXCEPTION && +@@ -1122,13 +1137,18 @@ + // Add class just loaded + // If a class loader supports parallel classloading handle parallel define requests + // find_or_define_instance_class may return a different instanceKlass +- if (is_parallelCapable(class_loader)) { ++ // (tw) TODO: for class redefinition the parallel version does not work, check if this is a problem? ++ if (is_parallelCapable(class_loader) && old_class.is_null()) { + k = find_or_define_instance_class(class_name, class_loader, k, THREAD); + } else { +- define_instance_class(k, THREAD); ++ define_instance_class(k, old_class, THREAD); + } + } + ++ if (redefine_classes_locked) { ++ Thread::current()->redefine_classes_mutex()->unlock(); ++ } ++ + // If parsing the class file or define_instance_class failed, we + // need to remove the placeholder added on our behalf. But we + // must make sure parsed_name is valid first (it won't be if we had +@@ -1157,7 +1177,7 @@ + MutexLocker mu(SystemDictionary_lock, THREAD); + + klassOop check = find_class(parsed_name, class_loader); +- assert(check == k(), "should be present in the dictionary"); ++ assert((check == k() && !k->is_redefining()) || (k->is_redefining() && check == k->old_version()), "should be present in the dictionary"); + + klassOop check2 = find_class(h_name, h_loader); + assert(check == check2, "name inconsistancy in SystemDictionary"); +@@ -1453,7 +1473,11 @@ + } + } + +-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ++void SystemDictionary::rollback_redefinition() { ++ dictionary()->rollback_redefinition(); ++} ++ ++void SystemDictionary::define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS) { + + Handle class_loader_h(THREAD, k->class_loader()); + +@@ -1480,13 +1504,23 @@ + Symbol* name_h = k->name(); + unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h); + int d_index = dictionary()->hash_to_index(d_hash); +- check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK); ++ ++ // DCEVM: Update version of the klassOop in the system dictionary ++ // TODO: Check for thread safety! ++ if (!old_class.is_null()) { ++ bool ok = dictionary()->update_klass(d_index, d_hash, name_h, class_loader_h, k, old_class); ++ assert (ok, "must have found old class and updated!"); ++ } ++ check_constraints(d_index, d_hash, k, class_loader_h, old_class.is_null(), CHECK); ++ ++ if(!old_class.is_null() && TraceRedefineClasses >= 3){ tty->print_cr("Class has been updated!"); } + + // Register class just loaded with class loader (placed in Vector) + // Note we do this before updating the dictionary, as this can + // fail with an OutOfMemoryError (if it does, we will *not* put this + // class in the dictionary and will not update the class hierarchy). +- if (k->class_loader() != NULL) { ++ // (tw) Only register if not redefining a class. ++ if (k->class_loader() != NULL && old_class.is_null()) { + methodHandle m(THREAD, Universe::loader_addClass_method()); + JavaValue result(T_VOID); + JavaCallArguments args(class_loader_h); +@@ -1512,8 +1546,9 @@ + } + k->eager_initialize(THREAD); + ++ // (tw) Only notify jvmti if not redefining a class. + // notify jvmti +- if (JvmtiExport::should_post_class_load()) { ++ if (JvmtiExport::should_post_class_load() && old_class.is_null()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + +@@ -1586,7 +1621,7 @@ + } + } + +- define_instance_class(k, THREAD); ++ define_instance_class(k, KlassHandle(), THREAD); + + Handle linkage_exception = Handle(); // null handle + +@@ -1716,6 +1751,14 @@ + Universe::flush_dependents_on(k); + } + ++// (tw) Remove from hierarchy - Undo add_to_hierarchy. ++void SystemDictionary::remove_from_hierarchy(instanceKlassHandle k) { ++ assert(k.not_null(), "just checking"); ++ ++ k->remove_from_sibling_list(); ++ ++ // TODO: Remove from interfaces. ++} + + // ---------------------------------------------------------------------------- + // GC support +@@ -1804,7 +1847,8 @@ + } + + +-void SystemDictionary::preloaded_oops_do(OopClosure* f) { ++// (tw) Iterate over all pre-loaded classes in the dictionary. ++void SystemDictionary::preloaded_classes_do(OopClosure *f) { + for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { + f->do_oop((oop*) &_well_known_klasses[k]); + } +@@ -1818,6 +1862,23 @@ + } + } + ++ // TODO: Check if we need to call FilterFieldsMap ++} ++ ++void SystemDictionary::preloaded_oops_do(OopClosure* f) { ++ for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { ++ f->do_oop((oop*) &_well_known_klasses[k]); ++ } ++ ++ { ++ for (int i = 0; i < T_VOID+1; i++) { ++ if (_box_klasses[i] != NULL) { ++ assert(i >= T_BOOLEAN, "checking"); ++ f->do_oop((oop*) &_box_klasses[i]); ++ } ++ } ++ } ++ + // The basic type mirrors would have already been processed in + // Universe::oops_do(), via a call to shared_oops_do(), so should + // not be processed again. +@@ -1836,6 +1897,11 @@ + dictionary()->classes_do(f); + } + ++// (tw) Iterate over all classes in the dictionary. ++void SystemDictionary::classes_do(ObjectClosure *closure) { ++ dictionary()->classes_do(closure); ++} ++ + // Added for initialize_itable_for_klass + // Just the classes from defining class loaders + // Don't iterate over placeholders +@@ -1989,7 +2055,9 @@ + + // Preload ref klasses and set reference types + instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER); +- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); ++ ++ // (tw) This is now done in parseClassFile in order to support class redefinition ++ // instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK); + instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT); +@@ -2081,7 +2149,11 @@ + // also holds array classes + + assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary"); +- if ((defining == true) || (k() != check)) { ++ if ((defining == true) && ((k() != check) && k->old_version() != check)) { ++ ResourceMark rm(Thread::current()); ++ tty->print_cr("(%d / %d) (%s/%s)", k->revision_number(), check->klass_part()->revision_number(), k->name()->as_C_string(), check->klass_part()->name()->as_C_string()); ++ k()->print(); ++ check->print(); + linkage_error = "loader (instance of %s): attempted duplicate class " + "definition for name: \"%s\""; + } else { +diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp +--- a/src/share/vm/classfile/systemDictionary.hpp ++++ b/src/share/vm/classfile/systemDictionary.hpp +@@ -276,7 +276,7 @@ + // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) + static klassOop resolve_from_stream(Symbol* class_name, Handle class_loader, + Handle protection_domain, +- ClassFileStream* st, bool verify, TRAPS); ++ ClassFileStream* st, bool verify, KlassHandle old_class, TRAPS); + + // Lookup an already loaded class. If not found NULL is returned. + static klassOop find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); +@@ -320,6 +320,8 @@ + // Iterate over all klasses in dictionary + // Just the classes from defining class loaders + static void classes_do(void f(klassOop)); ++ static void classes_do(ObjectClosure *closure); ++ static void preloaded_classes_do(OopClosure *closure); + // Added for initialize_itable_for_klass to handle exceptions + static void classes_do(void f(klassOop, TRAPS), TRAPS); + // All classes, and their class loaders +@@ -427,6 +429,8 @@ + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); + } + ++ static void rollback_redefinition(); ++ + public: + #define WK_KLASS_DECLARE(name, symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } +@@ -608,7 +612,7 @@ + // after waiting, but before reentering SystemDictionary_lock + // to preserve lock order semantics. + static void double_lock_wait(Handle lockObject, TRAPS); +- static void define_instance_class(instanceKlassHandle k, TRAPS); ++ static void define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS); + static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, + Handle class_loader, + instanceKlassHandle k, TRAPS); +@@ -627,6 +631,11 @@ + // Setup link to hierarchy + static void add_to_hierarchy(instanceKlassHandle k, TRAPS); + ++public: ++ ++ // Remove link to hierarchy ++ static void remove_from_hierarchy(instanceKlassHandle k); ++ + private: + // We pass in the hashtable index so we can calculate it outside of + // the SystemDictionary_lock. +diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp +--- a/src/share/vm/classfile/verifier.cpp ++++ b/src/share/vm/classfile/verifier.cpp +@@ -103,7 +103,7 @@ + return !need_verify; + } + +-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) { ++bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS) { + HandleMark hm; + ResourceMark rm(THREAD); + +@@ -127,17 +127,19 @@ + split_verifier.verify_class(THREAD); + exception_name = split_verifier.result(); + if (klass->major_version() < NOFAILOVER_MAJOR_VERSION && +- FailOverToOldVerifier && !HAS_PENDING_EXCEPTION && ++ FailOverToOldVerifier && may_use_old_verifier && !HAS_PENDING_EXCEPTION && + (exception_name == vmSymbols::java_lang_VerifyError() || + exception_name == vmSymbols::java_lang_ClassFormatError())) { + if (TraceClassInitialization) { + tty->print_cr( + "Fail over class verification to old verifier for: %s", klassName); + } ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } + } else { ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -152,6 +154,9 @@ + } + tty->print_cr("End class verification for: %s", klassName); + } ++ } else if (TraceClassInitialization) { ++ // (tw) Output not verified classes ++ tty->print_cr("Class %s was not verified", klassName); + } + + if (HAS_PENDING_EXCEPTION) { +@@ -203,7 +208,7 @@ + // NOTE: this is called too early in the bootstrapping process to be + // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. + (refl_magic_klass == NULL || +- !klass->is_subtype_of(refl_magic_klass) || ++ !(klass->is_subtype_of(refl_magic_klass) || klass->is_subtype_of(refl_magic_klass->klass_part()->newest_version())) || + VerifyReflectionBytecodes) + ); + } +@@ -272,7 +277,7 @@ + ClassVerifier::ClassVerifier( + instanceKlassHandle klass, char* msg, size_t msg_len, TRAPS) + : _thread(THREAD), _exception_type(NULL), _message(msg), +- _message_buffer_len(msg_len), _klass(klass) { ++ _message_buffer_len(msg_len), _klass(klass->newest_version()), _klass_to_verify(klass) { + _this_type = VerificationType::reference_type(klass->name()); + // Create list to hold symbols in reference area. + _symbols = new GrowableArray<Symbol*>(100, 0, NULL); +@@ -296,7 +301,7 @@ + _klass->external_name()); + } + +- objArrayHandle methods(THREAD, _klass->methods()); ++ objArrayHandle methods(THREAD, _klass_to_verify->methods()); + int num_methods = methods->length(); + + for (int index = 0; index < num_methods; index++) { +@@ -2081,7 +2086,10 @@ + VerificationType stack_object_type = + current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); + if (current_type() != stack_object_type) { +- assert(cp->cache() == NULL, "not rewritten yet"); ++ ++ // (tw) TODO: Check if relaxing the following assertion is correct. For class redefinition we might call the verifier twice. ++ //assert(cp->cache() == NULL, "not rewritten yet"); ++ + Symbol* ref_class_name = + cp->klass_name_at(cp->klass_ref_index_at(index)); + // See the comments in verify_field_instructions() for +diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp +--- a/src/share/vm/classfile/verifier.hpp ++++ b/src/share/vm/classfile/verifier.hpp +@@ -47,7 +47,7 @@ + * Otherwise, no exception is thrown and the return indicates the + * error. + */ +- static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS); ++ static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS); + + // Return false if the class is loaded by the bootstrap loader, + // or if defineClass was called requesting skipping verification +@@ -97,7 +97,10 @@ + size_t _message_buffer_len; + GrowableArray<Symbol*>* _symbols; // keep a list of symbols created + ++public: + void verify_method(methodHandle method, TRAPS); ++ ++private: + char* generate_code_data(methodHandle m, u4 code_length, TRAPS); + void verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS); + void verify_local_variable_table(u4 code_length, char* code_data, TRAPS); +@@ -168,6 +171,7 @@ + + VerificationType object_type() const; + ++ instanceKlassHandle _klass_to_verify; + instanceKlassHandle _klass; // the class being verified + methodHandle _method; // current method being verified + VerificationType _this_type; // the verification type of the current class +diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp +--- a/src/share/vm/classfile/vmSymbols.hpp ++++ b/src/share/vm/classfile/vmSymbols.hpp +@@ -131,6 +131,10 @@ + template(tag_annotation_default, "AnnotationDefault") \ + template(tag_enclosing_method, "EnclosingMethod") \ + template(tag_bootstrap_methods, "BootstrapMethods") \ ++ template(tag_static_field_redefinition_policy, "StaticFieldRedefinitionPolicy") \ ++ template(tag_field_redefinition_policy, "FieldRedefinitionPolicy") \ ++ template(tag_method_redefinition_policy, "MethodRedefinitionPolicy") \ ++ template(tag_code_sections, "CodeSections") \ + \ + /* exception klasses: at least all exceptions thrown by the VM have entries here */ \ + template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \ +@@ -376,6 +380,10 @@ + template(oop_size_name, "oop_size") \ + template(static_oop_field_count_name, "static_oop_field_count") \ + \ ++ /* mutator in case of class redefinition */ \ ++ template(static_transformer_name, "$staticTransformer") \ ++ template(transformer_name, "$transformer") \ ++ \ + /* non-intrinsic name/signature pairs: */ \ + template(register_method_name, "register") \ + do_alias(register_method_signature, object_void_signature) \ +diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp +--- a/src/share/vm/compiler/compileBroker.cpp ++++ b/src/share/vm/compiler/compileBroker.cpp +@@ -1166,6 +1166,14 @@ + int comp_level, + methodHandle hot_method, int hot_count, + const char* comment, Thread* THREAD) { ++ JavaThread* thread = JavaThread::current(); ++ if (thread->is_Compiler_thread() && thread->as_CompilerThread()->should_bailout()) { ++ return NULL; // FIXME: DCEVM: should we do something else? ++ } ++ if (instanceKlass::cast(method->method_holder())->is_not_initialized()) { ++ return NULL; // FIXME: DCEVM: how should we avoid this? ++ } ++ + // make sure arguments make sense + assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method"); + assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); +@@ -1245,6 +1253,7 @@ + } + + // RedefineClasses() has replaced this method; just return ++ // (tw) This is important for the new version of hotswapping: Old code will only execute properly in the interpreter! + if (method->is_old()) { + return NULL; + } +@@ -1576,6 +1585,8 @@ + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { ++ thread->compilation_mutex()->lock(); ++ thread->set_should_bailout(false); + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + #ifdef COMPILER1 +@@ -1599,6 +1610,7 @@ + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); + } ++ thread->compilation_mutex()->unlock(); + } + } + } +@@ -2127,3 +2139,15 @@ + st->cr(); + #endif + } ++ ++// (tw) Clean up compiler interface after a class redefinition step ++void CompileBroker::cleanup_after_redefinition() { ++ int num_threads = _method_threads->length(); ++ ++ ciObjectFactory::sort_ci_objects(ciObjectFactory::_shared_ci_objects); ++ for (int i=0; i<num_threads; i++) { ++ if (_method_threads->at(i)->env() != NULL && _method_threads->at(i)->env() != (ciEnv *)badAddress) { ++ _method_threads->at(i)->env()->cleanup_after_redefinition(); ++ } ++ } ++} +diff --git a/src/share/vm/compiler/compileBroker.hpp b/src/share/vm/compiler/compileBroker.hpp +--- a/src/share/vm/compiler/compileBroker.hpp ++++ b/src/share/vm/compiler/compileBroker.hpp +@@ -407,6 +407,8 @@ + static void print_last_compile(); + + static void print_compiler_threads_on(outputStream* st); ++ ++ static void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +@@ -157,6 +157,13 @@ + } + } + ++ ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +@@ -149,6 +149,7 @@ + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp +@@ -30,6 +30,8 @@ + #include "oops/objArrayKlass.inline.hpp" + #include "oops/oop.inline.hpp" + ++GrowableArray<oop>* MarkSweep::_rescued_oops = NULL; ++ + Stack<oop> MarkSweep::_marking_stack; + Stack<DataLayout*> MarkSweep::_revisit_mdo_stack; + Stack<Klass*> MarkSweep::_revisit_klass_stack; +@@ -350,3 +352,86 @@ + } + + #endif ++ ++// (tw) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ oop rescued_obj = _rescued_oops->at(i); ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ if (rescued_obj->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_obj, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (tw) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->blueprint()->new_version() != NULL, "class of old object must have new version"); ++ ++ klassOop old_klass_oop = q->klass(); ++ klassOop new_klass_oop = q->blueprint()->new_version(); ++ ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = instanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ oop tmp_obj = q; ++ ++ if (new_klass_oop->klass_part()->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ int *cur = new_klass_oop->klass_part()->update_information(); ++ ++ tmp_obj->set_klass_no_check(new_klass_oop); ++ ++ if (cur == NULL) { ++ assert(size == new_size, "just checking"); ++ Copy::conjoint_words(((HeapWord *)tmp_obj), ((HeapWord *)new_location), size); ++ } else { ++ int destOffset = 0; ++ while (*cur != 0) { ++ if (*cur > 0) { ++ int size = *cur; ++ cur++; ++ int offset = *cur; ++ Copy::conjoint_jbytes(((char *)tmp_obj) + offset, ((char *)new_location) + destOffset, size); ++ destOffset += size; ++ cur++; ++ } else { ++ assert(*cur < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(((char*)new_location) + destOffset, skip, 0); ++ destOffset += skip; ++ cur++; ++ } ++ } ++ } ++ ++ if (tmp_obj != q) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp_obj, size); ++ } ++} +diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp +@@ -115,8 +115,12 @@ + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<oop>* _rescued_oops; ++ + // + // Vars + // +@@ -200,6 +204,8 @@ + template <class T> static inline void mark_and_push(T* p); + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); +diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp +--- a/src/share/vm/interpreter/interpreterRuntime.cpp ++++ b/src/share/vm/interpreter/interpreterRuntime.cpp +@@ -403,7 +403,7 @@ + assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); + assert(h_exception->is_oop(), "just checking"); + // Check that exception is a subclass of Throwable, otherwise we have a VerifyError +- if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { ++ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()->klass_part()->newest_version())) && !(h_exception->is_a(SystemDictionary::Throwable_klass()))) { + if (ExitVMOnVerifyError) vm_exit(-1); + ShouldNotReachHere(); + } +@@ -674,6 +674,82 @@ + JvmtiExport::post_raw_breakpoint(thread, method, bcp); + IRT_END + ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::forward_method(JavaThread *thread)) ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ while (Threads::wait_at_instrumentation_entry()) { ++ ml.wait(); ++ } ++ } ++ frame f = last_frame(thread); ++ methodOop m = f.interpreter_frame_method(); ++ methodOop forward_method = m->forward_method(); ++ if (forward_method != NULL) { ++ int bci = f.interpreter_frame_bci(); ++ ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Executing NOP in method %s at bci %d %d", m->name()->as_C_string(), bci, m->is_in_code_section(bci + 1)); ++ } ++ ++ int next_bci = bci - 1; ++ // First try bci before NOP. ++ if (!m->is_in_code_section(next_bci)) { ++ // Try bci after NOP. ++ next_bci = bci + 1; ++ if (!m->is_in_code_section(next_bci)) return; ++ } ++ ++ int new_bci = m->calculate_forward_bci(next_bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(&f, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ iframe->set_method(forward_method, new_bci - 1); ++ } ++IRT_END ++ ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_method(JavaThread *thread, oopDesc* receiverOop, int vTableIndex)) ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ // TODO: Check for correctness if different vtable indices in different versions? ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_vtable(vTableIndex); ++ thread->set_vm_result(method); ++IRT_END ++ ++// Correctly resolve interface method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_interface_method(JavaThread *thread, oopDesc* receiverOop, oopDesc* interface_klass, int vTableIndex)) ++ ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_itable((klassOop)interface_klass, vTableIndex, THREAD); ++ thread->set_vm_result(method); ++IRT_END ++ + IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) + // extract receiver from the outgoing argument list if necessary + Handle receiver(thread, NULL); +@@ -702,6 +778,10 @@ + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (info.resolved_method()->is_old()) { ++ // (tw) If we are executing an old method, this is OK! ++ if (method(thread)->is_old()) { ++ break; ++ } + // It is very unlikely that method is redefined more than 100 times + // in the middle of resolve. If it is looping here more than 100 times + // means then there could be a bug here. +diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp +--- a/src/share/vm/interpreter/interpreterRuntime.hpp ++++ b/src/share/vm/interpreter/interpreterRuntime.hpp +@@ -137,6 +137,9 @@ + static void post_method_entry(JavaThread *thread); + static void post_method_exit (JavaThread *thread); + static int interpreter_contains(address pc); ++ static void forward_method(JavaThread *thread); ++ static void find_correct_method(JavaThread *thread, oopDesc* receiver, int vTableIndex); ++ static void find_correct_interface_method(JavaThread *thread, oopDesc* receiver, oopDesc* interface_klass, int vTableIndex); + + // Native signature handlers + static void prepare_native_call(JavaThread* thread, methodOopDesc* method); +diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp +--- a/src/share/vm/interpreter/linkResolver.cpp ++++ b/src/share/vm/interpreter/linkResolver.cpp +@@ -145,8 +145,8 @@ + // Klass resolution + + void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) { +- if (!Reflection::verify_class_access(ref_klass->as_klassOop(), +- sel_klass->as_klassOop(), ++ if (!Reflection::verify_class_access(ref_klass->as_klassOop()->klass_part()->newest_version(), ++ sel_klass->as_klassOop()->klass_part()->newest_version(), + true)) { + ResourceMark rm(THREAD); + Exceptions::fthrow( +@@ -258,7 +258,7 @@ + // We'll check for the method name first, as that's most likely + // to be false (so we'll short-circuit out of these tests). + if (sel_method->name() == vmSymbols::clone_name() && +- sel_klass() == SystemDictionary::Object_klass() && ++ sel_klass()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() && + resolved_klass->oop_is_array()) { + // We need to change "protected" to "public". + assert(flags.is_protected(), "clone not protected?"); +@@ -334,6 +334,149 @@ + } + + ++void LinkResolver::lookup_method(methodHandle& resolved_method, KlassHandle resolved_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, KlassHandle current_klass, TRAPS) { ++ ++ // Interface method lookup? ++ if (is_interface) { ++ ++ // lookup method in this interface or its super, java.lang.Object ++ lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // lookup method in all the super-interfaces ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ } ++ ++ // Other methods ++ } else { ++ ++ // 2. lookup method in resolved klass and its super klasses ++ lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { // not found in the class hierarchy ++ // 3. lookup method in all the interfaces implemented by the resolved klass ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // JSR 292: see if this is an implicitly generated method MethodHandle.invoke(*...) ++ lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK); ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_field(fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static) { ++ ++ // First attempt unversioned ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int redefinition_policy = current_klass->new_version()->klass_part()->field_redefinition_policy(); ++ if (is_static) { ++ redefinition_policy = current_klass->new_version()->klass_part()->static_field_redefinition_policy(); ++ } ++ ++ assert(redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (redefinition_policy != Klass::DynamicCheck) { ++ ++ if (redefinition_policy == Klass::AccessOldMembers) { ++ // Forget looked up fields ++ sel_klass = KlassHandle(Thread::current(), (oop)NULL); ++ } ++ ++ assert(redefinition_policy == Klass::AccessOldMembers || redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (sel_klass.is_null() || fd.is_static() != is_static /* access old static field field is changed from static to non-static */) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ // FIXME: idubrov ++ //if (sel_klass.is_null()) { ++ // TRACE_RC2("Trying to resolve field (%s) in old universe failed => exception is the correct behaviour", field_name->as_C_string()); ++ //} else { ++ // assert(sel_klass->new_version() != NULL, "must be old class!"); ++ // TRACE_RC2("Resolved a field in the old universe (%s)!", field_name->as_C_string()); ++ //} ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_method(methodHandle& resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, TRAPS) { ++ ++ // First attempt unversioned ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // (tw) Are we in an old method that wants to see a different view on the world? ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int method_redefinition_policy = current_klass->new_version()->klass_part()->method_redefinition_policy(); ++ assert(method_redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (method_redefinition_policy != Klass::DynamicCheck) { ++ ++ // We do not throw the exception ++ if (method_redefinition_policy == Klass::AccessOldMembers) { ++ // Forget any new member lookup ++ resolved_method = methodHandle(THREAD, NULL); ++ } ++ ++ assert(method_redefinition_policy == Klass::AccessOldMembers || method_redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (resolved_method.is_null()) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ // Now do the lookup in a second attempt with a different resolved klass. ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // FIXME: idubrov ++ //IF_TRACE_RC2 { ++ // ResourceMark rm(THREAD); ++ // if (resolved_method.is_null()) { ++ // TRACE_RC2("Trying to resolve method (%s) in old universe failed => exception is the correct behaviour", method_name->as_C_string()); ++ // } else { ++ // assert(resolved_method->is_old(), "must be old method!"); ++ // TRACE_RC2("Resolved a method in the old universe (%s)!", resolved_method->name()->as_C_string()); ++ // } ++ //} ++ } ++ } ++ } ++ ++ if (resolved_method.is_null()) { ++ // no method found ++ ResourceMark rm(THREAD); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), ++ methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), ++ method_name, ++ method_signature)); ++ } ++} ++ ++void LinkResolver::find_correct_resolved_klass(KlassHandle &resolved_klass, KlassHandle ¤t_klass) { ++ int current_klass_revision = current_klass->revision_number(); ++ int resolved_klass_revision = resolved_klass->revision_number(); ++ // FIXME: idubrov ++ //TRACE_RC2("The two different revision numbers for interfaces: current=%d / resolved_callee=%d", current_klass_revision, resolved_klass_revision); ++ ++ while (resolved_klass->revision_number() > current_klass_revision) { ++ assert(resolved_klass->old_version(), "must have old version"); ++ resolved_klass = KlassHandle(Thread::current(), resolved_klass->old_version()); ++ } ++} ++ + void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, + Symbol* method_name, Symbol* method_signature, + KlassHandle current_klass, bool check_access, TRAPS) { +@@ -346,27 +489,8 @@ + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + +- // 2. lookup method in resolved klass and its super klasses +- lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { // not found in the class hierarchy +- // 3. lookup method in all the interfaces implemented by the resolved klass +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // JSR 292: see if this is an implicitly generated method MethodHandle.invoke(*...) +- lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK); +- } +- +- if (resolved_method.is_null()) { +- // 4. method lookup failed +- ResourceMark rm(THREAD); +- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature)); +- } +- } ++ // 2. and 3. and 4. lookup method in resolved klass and its super klasses ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, false, CHECK); + + // 5. check if method is concrete + if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) { +@@ -434,20 +558,7 @@ + } + + // lookup method in this interface or its super, java.lang.Object +- lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // lookup method in all the super-interfaces +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- if (resolved_method.is_null()) { +- // no method found +- ResourceMark rm(THREAD); +- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature)); +- } +- } ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, true, CHECK); + + if (check_access) { + HandleMark hm(THREAD); +@@ -534,9 +645,14 @@ + THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string()); + } + ++ KlassHandle ref_klass(THREAD, pool->pool_holder()->klass_part()); ++ + // Resolve instance field + fieldDescriptor fd; // find_field initializes fd if found +- KlassHandle sel_klass(THREAD, instanceKlass::cast(resolved_klass())->find_field(field, sig, &fd)); ++ ++ KlassHandle sel_klass; ++ lookup_correct_field(fd, sel_klass, resolved_klass, ref_klass, field, sig, is_static); ++ + // check if field exists; i.e., if a klass containing the field def has been selected + if (sel_klass.is_null()){ + ResourceMark rm(THREAD); +@@ -544,7 +660,6 @@ + } + + // check access +- KlassHandle ref_klass(THREAD, pool->pool_holder()); + check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK); + + // check for errors +@@ -556,7 +671,7 @@ + } + + // Final fields can only be accessed from its own class. +- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { ++ if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()->klass_part()->active_version() && sel_klass() != pool->pool_holder()) { + THROW(vmSymbols::java_lang_IllegalAccessError()); + } + +@@ -761,7 +876,7 @@ + bool check_access, bool check_null_and_abstract, TRAPS) { + methodHandle resolved_method; + linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); +- runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK); ++ runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, current_klass, check_null_and_abstract, CHECK); + } + + // throws linktime exceptions +@@ -791,6 +906,7 @@ + KlassHandle resolved_klass, + Handle recv, + KlassHandle recv_klass, ++ KlassHandle current_klass, + bool check_null_and_abstract, + TRAPS) { + +@@ -839,7 +955,40 @@ + // recv_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + instanceKlass* inst = (instanceKlass*)recv_klass()->klass_part(); ++ ++ // (tw) The type of the virtual method call and the type of the receiver do not need to ++ // have anything in common, as the receiver type could've been hotswapped. ++ // Does not always work (method could be resolved with correct dynamic type and later ++ // be called at the same place with a wrong dynamic type). ++ // (tw) TODO: Need to handle the static type vs dynamic type issue more generally. ++ ++ // The vTable must be based on the view of the world of the resolved method ++ klassOop method_holder = resolved_method->method_holder(); ++ ++ if (method_holder->klass_part()->new_version() != NULL) { ++ // We are executing in old code ++ // FIXME: idubrov ++ //TRACE_RC2("Calling a method in old code"); ++ while (method_holder->klass_part()->revision_number() < inst->revision_number()) { ++ inst = (instanceKlass *)(inst->old_version()->klass_part()); ++ } ++ } ++ ++ if (inst->is_subtype_of(method_holder)) { + selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); ++ } else { ++ ++ tty->print_cr("Failure:"); ++ inst->as_klassOop()->print(); ++ inst->super()->print(); ++ juint off = inst->super_check_offset(); ++ klassOop sup = *(klassOop*)( (address)inst->as_klassOop() + off ); ++ sup->print(); ++ method_holder->print(); ++ ++ bool b = inst->is_subtype_of(method_holder); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), "(tw) A virtual method was called, but the type of the receiver is not related with the type of the class of the called method!"); ++ } + } + } + +diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp +--- a/src/share/vm/interpreter/linkResolver.hpp ++++ b/src/share/vm/interpreter/linkResolver.hpp +@@ -106,7 +106,11 @@ + // It does all necessary link-time checks & throws exceptions if necessary. + + class LinkResolver: AllStatic { +- private: ++private: ++ static void lookup_method (methodHandle& result, KlassHandle resolved_klass, Symbol* name, Symbol* signature, bool is_interface, KlassHandle current_klass, TRAPS); ++ static void lookup_correct_field (fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static); ++ static void lookup_correct_method (methodHandle& result, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* name, Symbol* signature, bool is_interface, TRAPS); ++ static void find_correct_resolved_klass (KlassHandle &resolved_klass, KlassHandle ¤t_klass); + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +@@ -129,7 +133,7 @@ + static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + + static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); +- static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); ++ static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, KlassHandle current_klass, bool check_null_and_abstract, TRAPS); + static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); + + static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); +diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp +--- a/src/share/vm/interpreter/templateTable.hpp ++++ b/src/share/vm/interpreter/templateTable.hpp +@@ -328,8 +328,8 @@ + static void shouldnotreachhere(); + + // jvmti support +- static void jvmti_post_field_access(Register cache, Register index, bool is_static, bool has_tos); +- static void jvmti_post_field_mod(Register cache, Register index, bool is_static); ++ static void jvmti_post_field_access(Register cache, Register index, int byte_no, bool is_static, bool has_tos); ++ static void jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static); + static void jvmti_post_fast_field_mod(); + + // debugging of TemplateGenerator +diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp +--- a/src/share/vm/memory/genMarkSweep.cpp ++++ b/src/share/vm/memory/genMarkSweep.cpp +@@ -409,6 +409,7 @@ + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + Generation* pg = gch->perm_gen(); + +@@ -421,10 +422,14 @@ + + VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); + + VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); + ++ MarkSweep::copy_rescued_objects_back(); ++ + pg->post_compact(); // Shared spaces verification. + } +diff --git a/src/share/vm/memory/permGen.cpp b/src/share/vm/memory/permGen.cpp +--- a/src/share/vm/memory/permGen.cpp ++++ b/src/share/vm/memory/permGen.cpp +@@ -57,7 +57,12 @@ + + for (;;) { + { +- MutexLocker ml(Heap_lock); ++ // (tw) Only lock when not at a safepoint (necessary to use the split verifier from the VmThread) ++ Monitor *lock = Heap_lock; ++ if (SafepointSynchronize::is_at_safepoint()) { ++ lock = NULL; ++ } ++ MutexLockerEx ml(lock); + if ((obj = gen->allocate(size, false)) != NULL) { + return obj; + } +diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp +--- a/src/share/vm/memory/space.cpp ++++ b/src/share/vm/memory/space.cpp +@@ -378,6 +378,31 @@ + _compaction_top = bottom(); + } + ++// (tw) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, ++CompactPoint* cp, HeapWord* compact_top) { ++ // First check if we should switch compaction space ++ assert(this == cp->space, "'this' should be current compaction space."); ++ size_t compaction_max_size = pointer_delta(end(), compact_top); ++ while (size > compaction_max_size) { ++ // switch to next compaction space ++ cp->space->set_compaction_top(compact_top); ++ cp->space = cp->space->next_compaction_space(); ++ if (cp->space == NULL) { ++ cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); ++ assert(cp->gen != NULL, "compaction must succeed"); ++ cp->space = cp->gen->first_compaction_space(); ++ assert(cp->space != NULL, "generation must have a first compaction space"); ++ } ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ compaction_max_size = pointer_delta(cp->space->end(), compact_top); ++ } ++ ++ return compact_top; ++} ++ + HeapWord* CompactibleSpace::forward(oop q, size_t size, + CompactPoint* cp, HeapWord* compact_top) { + // q is alive +@@ -401,7 +426,7 @@ + } + + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -449,7 +474,208 @@ + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ return; ++ } ++ ++ /* Compute the new addresses for the live objects and store it in the mark ++ * Used by universe::mark_sweep_phase2() ++ */ ++ HeapWord* compact_top; /* This is where we are currently compacting to. */ ++ ++ /* We're sure to be here before any objects are compacted into this ++ * space, so this is a good time to initialize this: ++ */ ++ set_compaction_top(bottom()); ++ ++ if (cp->space == NULL) { ++ assert(cp->gen != NULL, "need a generation"); ++ assert(cp->threshold == NULL, "just checking"); ++ assert(cp->gen->first_compaction_space() == this, "just checking"); ++ cp->space = cp->gen->first_compaction_space(); ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ } else { ++ compact_top = cp->space->compaction_top(); ++ } ++ ++ /* We allow some amount of garbage towards the bottom of the space, so ++ * we don't start compacting before there is a significant gain to be made. ++ * Occasionally, we want to ensure a full compaction, which is determined ++ * by the MarkSweepAlwaysCompactCount parameter. ++ */ ++ int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations; ++ bool skip_dead = (MarkSweepAlwaysCompactCount < 1) ++ ||((invocations % MarkSweepAlwaysCompactCount) != 0); ++ ++ size_t allowed_deadspace = 0; ++ if (skip_dead) { ++ int ratio = (int)allowed_dead_ratio(); ++ allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; ++ } ++ ++ HeapWord* q = bottom(); ++ HeapWord* t = end(); ++ ++ HeapWord* end_of_live= q; /* One byte beyond the last byte of the last ++ live object. */ ++ HeapWord* first_dead = end();/* The first dead object. */ ++ LiveRange* liveRange = NULL; /* The current live range, recorded in the ++ first header of preceding free area. */ ++ _first_dead = first_dead; ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ while (q < t) { ++ assert(!block_is_obj(q) || ++ oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || ++ oop(q)->mark()->has_bias_pattern(), ++ "these are the only valid states during a mark sweep"); ++ if (block_is_obj(q) && oop(q)->is_gc_marked()) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size(q); ++ ++ // DCEVM: begin ++ ////////////////////////////////////////////////////////////////////////// ++ size_t forward_size = size; ++ ++ // Compute the forward sizes and leave out objects whose position could ++ // possibly overlap other objects. ++ ++ // DCEVM: There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL && oop(q)->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward_compact_top(forward_size, cp, compact_top); ++ ++ bool rescueing = false; ++ if (rescueing = must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ // FIXME: idubrov ++ //TRACE_RC5("rescue obj %d klass=%s", MarkSweep::_rescued_oops->length(), oop(q)->klass()->klass_part()->name()->as_C_string()); ++ MarkSweep::_rescued_oops->append(oop(q)); ++ } else { ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ } ++ ++ if ((size != forward_size || rescueing) && q < first_dead) { ++ // (tw) This object moves => first_dead must be set to here! ++ first_dead = q; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ q += size; ++ end_of_live = q; ++ } else { ++ /* run over all the contiguous dead objects */ ++ HeapWord* end = q; ++ do { ++ /* prefetch beyond end */ ++ Prefetch::write(end, interval); ++ end += block_size(end); ++ } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked())); ++ ++ /* see if we might want to pretend this object is alive so that ++ * we don't have to compact quite as often. ++ */ ++ if (allowed_deadspace > 0 && q == compact_top) { ++ size_t sz = pointer_delta(end, q); ++ if (insert_deadspace(allowed_deadspace, q, sz)) { ++ compact_top = cp->space->forward(oop(q), sz, cp, compact_top); ++ q = end; ++ end_of_live = end; ++ continue; ++ } ++ } ++ ++ /* otherwise, it really is a free region. */ ++ ++ /* for the previous LiveRange, record the end of the live objects. */ ++ if (liveRange) { ++ liveRange->set_end(q); ++ } ++ ++ /* record the current LiveRange object. ++ * liveRange->start() is overlaid on the mark word. ++ */ ++ liveRange = (LiveRange*)q; ++ liveRange->set_start(end); ++ liveRange->set_end(end); ++ ++ /* see if this is the first dead region. */ ++ if (q < first_dead) { ++ first_dead = q; ++ } ++ ++ /* move on to the next object */ ++ q = end; ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////// ++ // Compute the forwarding addresses for the objects that need to be ++ // rescued. ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ // FIXME: idubrov ++ //TRACE_RC2("Calculating new forward sizes for %d objects!", MarkSweep::_rescued_oops->length()); ++ ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ oop q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size((HeapWord*)q); ++ ++ size_t forward_size = size; ++ ++ // (tw) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ assert(compact_top <= t, "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ ++ assert(q == t, "just checking"); ++ if (liveRange != NULL) { ++ liveRange->set_end(q); ++ } ++ _end_of_live = end_of_live; ++ if (end_of_live < first_dead) { ++ first_dead = end_of_live; ++ } ++ _first_dead = first_dead; ++ ++// FIXME: idubrov ++// if (_first_dead > top()) { ++// _first_dead = top(); ++// } ++// ++// if (_end_of_live > top()) { ++// _end_of_live = top(); ++// } ++ assert(_first_dead <= top(), "Must be smaller equal"); ++ assert(_end_of_live <= top(), "Must be smaller equal"); ++ ++ /* save the compaction_top of the compaction space. */ ++ cp->space->set_compaction_top(compact_top); + } + + void Space::adjust_pointers() { +@@ -490,17 +716,313 @@ + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ if (heap->is_in_permanent(obj)) { ++ return -1; ++ } ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ ++ assert(is_in_reserved(old_obj), "old_obj must be in this space"); ++ ++ if (old_obj->is_perm()) { ++ // This object is in perm gen; check for invariant obj->klass() <= obj ++ if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ return true; ++ } ++ } ++ ++ int size = old_obj->size(); ++ int original_size = size; ++ if (oop(old_obj)->blueprint()->is_redefining()) { ++ assert(oop(old_obj)->blueprint()->old_version() != NULL, "must not be null"); ++ original_size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->old_version()->klass_part()); ++ } else if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->new_version()->klass_part()); ++ } ++ ++ bool normalComparison = (old_obj + original_size < new_obj + size); ++ ++ if (is_in_reserved(new_obj)) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ return normalComparison; ++ ++ } else { ++ ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else if (tenured_gen->is_in_reserved(old_obj)) { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert((normalComparison) == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ return normalComparison; ++ } ++ } ++} ++ ++oop CompactibleSpace::rescue(oop old_obj) { ++ assert(must_rescue(old_obj, old_obj->forwardee()), "do not call otherwise"); ++ ++ int size = old_obj->size(); ++ oop rescued_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)old_obj, (HeapWord*)rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { + return; // Nothing to do. + } ++ /* adjust all the interior pointers to point at the new locations of objects ++ * Used by MarkSweep::mark_sweep_phase3() */ + +- SCAN_AND_ADJUST_POINTERS(adjust_obj_size); ++ HeapWord* q = bottom(); ++ HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ ++ ++ assert(_first_dead <= _end_of_live, "Stands to reason, no?"); ++ ++ debug_only(HeapWord* prev_q = NULL); ++ debug_only(HeapWord* prev_prev_q = NULL); ++ debug_only(HeapWord* prev_prev_prev_q = NULL); ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ /* we have a chunk of the space which hasn't moved and we've ++ * reinitialized the mark word during the previous pass, so we can't ++ * use is_gc_marked for the traversal. */ ++ HeapWord* end = _first_dead; ++ ++ while (q < end) { ++ /* I originally tried to conjoin "block_start(q) == q" to the ++ * assertion below, but that doesn't work, because you can't ++ * accurately traverse previous objects to get to the current one ++ * after their pointers (including pointers into permGen) have been ++ * updated, until the actual compaction is done. dld, 4/00 */ ++ assert(block_is_obj(q), ++ "should be at block boundaries, and should be looking at objs"); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++// if (_first_dead == t) { ++// q = t; ++// } else { ++// /* $$$ This is funky. Using this to read the previously written ++// * LiveRange. See also use below. */ ++// q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); ++// } ++ } ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ debug_only(prev_q = NULL); ++ debug_only(prev_prev_q = NULL); ++ debug_only(prev_prev_prev_q = NULL); ++ while (q < t) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ if (oop(q)->is_gc_marked()) { ++ /* q is alive */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } else { ++ /* q is not a live object, so its mark should point at the next ++ * live object */ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } ++ } ++ ++ assert(q == t, "just checking"); + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size); ++ return; ++ } ++ ++ /* Copy all live objects to their new location ++ * Used by MarkSweep::mark_sweep_phase4() */ ++ ++ HeapWord* q = bottom(); ++ HeapWord* const t = _end_of_live; ++ debug_only(HeapWord* prev_q = NULL); ++ ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ debug_only( ++ /* we have a chunk of the space which hasn't moved and we've reinitialized ++ * the mark word during the previous pass, so we can't use is_gc_marked for ++ * the traversal. */ ++ HeapWord* const end = _first_dead; ++ ++ while (q < end) { ++ size_t size = obj_size(q); // FIXME: idubrov oop(q)->size(); ++ assert(!oop(q)->is_gc_marked(), ++ "should be unmarked (special dense prefix handling)"); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ) /* debug_only */ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++ //if (_first_dead == t) { ++ // q = t; ++ //} else { ++ ///* $$$ Funky */ ++ //q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); ++ //} ++ } ++ ++ const intx scan_interval = PrefetchScanIntervalInBytes; ++ const intx copy_interval = PrefetchCopyIntervalInBytes; ++ while (q < t) { ++ if (!oop(q)->is_gc_marked()) { ++ /* mark is pointer to next marked oop */ ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } else { ++ /* prefetch beyond q */ ++ Prefetch::read(q, scan_interval); ++ ++ /* size and destination */ ++ size_t size = obj_size(q); ++ HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); ++ ++ if (must_rescue(oop(q), oop(q)->forwardee())) { ++ oop dest_obj = rescue(oop(q)); ++ debug_only(Copy::fill_to_words(q, size, 0)); ++ } else { ++ ++ /* prefetch beyond compaction_top */ ++ Prefetch::write(compaction_top, copy_interval); ++ ++ /* copy object and reinit its mark */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, ++ compaction_top)); ++ assert(q != compaction_top || oop(q)->blueprint()->new_version() != NULL, "everything in this pass should be moving"); ++ ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); ++ } else { ++ Copy::aligned_conjoint_words(q, compaction_top, size); ++ } ++ oop(compaction_top)->init_mark(); ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); ++ } ++ ++ debug_only(prev_q = q); ++ q += size; ++ } ++ } ++ ++ /* Let's remember if we were empty before we did the compaction. */ ++ bool was_empty = used_region().is_empty(); ++ /* Reset space after compaction is complete */ ++ reset_after_compaction(); ++ /* We do this clear, below, since it has overloaded meanings for some */ ++ /* space subtypes. For example, OffsetTableContigSpace's that were */ ++ /* compacted into will have had their offset table thresholds updated */ ++ /* continuously, but those that weren't need to have their thresholds */ ++ /* re-initialized. Also mangles unused area for debugging. */ ++ if (used_region().is_empty()) { ++ if (!was_empty) clear(SpaceDecorator::Mangle); ++ } else { ++ if (ZapUnusedHeapArea) mangle_unused_area(); ++ } ++ ++ //SCAN_AND_COMPACT(obj_size); + } + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp +--- a/src/share/vm/memory/space.hpp ++++ b/src/share/vm/memory/space.hpp +@@ -445,6 +445,9 @@ + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ oop rescue(oop old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -475,6 +478,10 @@ + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); + ++ // (tw) ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } + +diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp +--- a/src/share/vm/memory/universe.cpp ++++ b/src/share/vm/memory/universe.cpp +@@ -100,6 +100,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + klassOop Universe::_boolArrayKlassObj = NULL; + klassOop Universe::_byteArrayKlassObj = NULL; +@@ -203,6 +205,38 @@ + f(systemObjArrayKlassObj()); + } + ++// DCEVM: This method should iterate all pointers that are not within heap objects. ++void Universe::root_oops_do(OopClosure *oopClosure) { ++ ++ class AlwaysTrueClosure: public BoolObjectClosure { ++ public: ++ void do_object(oop p) { ShouldNotReachHere(); } ++ bool do_object_b(oop p) { return true; } ++ }; ++ AlwaysTrueClosure always_true; ++ ++ // General strong roots ++ Universe::oops_do(oopClosure); ++ JNIHandles::oops_do(oopClosure); ++ Threads::oops_do(oopClosure, NULL); ++ ObjectSynchronizer::oops_do(oopClosure); ++ FlatProfiler::oops_do(oopClosure); ++ //Management::oops_do(oopClosure); // DCEVM: TODO: Check if this is correct? ++ JvmtiExport::oops_do(oopClosure); ++ // SO_AllClasses ++ SystemDictionary::oops_do(oopClosure); ++ ++ // Now adjust pointers in remaining weak roots. (All of which should ++ // have been cleared if they pointed to non-surviving objects.) ++ // Global (weak) JNI handles ++ JNIHandles::weak_oops_do(&always_true, oopClosure); ++ ++ CodeCache::oops_do(oopClosure); ++ StringTable::oops_do(oopClosure); ++ //ref_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++ //PSScavenge::reference_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++} ++ + void Universe::oops_do(OopClosure* f, bool do_all) { + + f->do_oop((oop*) &_int_mirror); +diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp +--- a/src/share/vm/memory/universe.hpp ++++ b/src/share/vm/memory/universe.hpp +@@ -127,6 +127,8 @@ + friend class SystemDictionary; + friend class VMStructs; + friend class CompactingPermGenGen; ++ friend class Space; ++ friend class ContiguousSpace; + friend class VM_PopulateDumpSharedSpace; + + friend jint universe_init(); +@@ -257,7 +259,18 @@ + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { ++ return _is_redefining_gc_run; ++ } ++ ++ static void set_redefining_gc_run(bool b) { ++ _is_redefining_gc_run = b; ++ } ++ + // Known classes in the VM + static klassOop boolArrayKlassObj() { return _boolArrayKlassObj; } + static klassOop byteArrayKlassObj() { return _byteArrayKlassObj; } +@@ -396,6 +409,8 @@ + + // Iteration + ++ static void root_oops_do(OopClosure *f); ++ + // Apply "f" to the addresses of all the direct heap pointers maintained + // as static fields of "Universe". + static void oops_do(OopClosure* f, bool do_all = false); +@@ -412,6 +427,7 @@ + + // Debugging + static bool verify_in_progress() { return _verify_in_progress; } ++ static void set_verify_in_progress(bool b) { _verify_in_progress = b; } + static void verify(bool allow_dirty = true, bool silent = false, + VerifyOption option = VerifyOption_Default ); + static int verify_count() { return _verify_count; } +diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp +--- a/src/share/vm/oops/arrayKlass.cpp ++++ b/src/share/vm/oops/arrayKlass.cpp +@@ -129,9 +129,9 @@ + + bool arrayKlass::compute_is_subtype_of(klassOop k) { + // An array is a subtype of Serializable, Clonable, and Object +- return k == SystemDictionary::Object_klass() +- || k == SystemDictionary::Cloneable_klass() +- || k == SystemDictionary::Serializable_klass(); ++ return k->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Cloneable_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Serializable_klass()->klass_part()->newest_version(); + } + + +diff --git a/src/share/vm/oops/constMethodKlass.cpp b/src/share/vm/oops/constMethodKlass.cpp +--- a/src/share/vm/oops/constMethodKlass.cpp ++++ b/src/share/vm/oops/constMethodKlass.cpp +@@ -101,6 +101,7 @@ + MarkSweep::mark_and_push(cm->adr_method()); + MarkSweep::mark_and_push(cm->adr_stackmap_data()); + MarkSweep::mark_and_push(cm->adr_exception_table()); ++ MarkSweep::mark_and_push(cm->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -113,6 +114,7 @@ + PSParallelCompact::mark_and_push(cm, cm_oop->adr_method()); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data()); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_exception_table()); ++ PSParallelCompact::mark_and_push(cm, cm_oop->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -124,6 +126,7 @@ + blk->do_oop(cm->adr_method()); + blk->do_oop(cm->adr_stackmap_data()); + blk->do_oop(cm->adr_exception_table()); ++ blk->do_oop(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -141,6 +144,8 @@ + if (mr.contains(adr)) blk->do_oop(adr); + adr = cm->adr_exception_table(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = cm->adr_code_section_table(); ++ if (mr.contains(adr)) blk->do_oop(adr); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -156,6 +161,7 @@ + MarkSweep::adjust_pointer(cm->adr_method()); + MarkSweep::adjust_pointer(cm->adr_stackmap_data()); + MarkSweep::adjust_pointer(cm->adr_exception_table()); ++ MarkSweep::adjust_pointer(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +diff --git a/src/share/vm/oops/constMethodOop.hpp b/src/share/vm/oops/constMethodOop.hpp +--- a/src/share/vm/oops/constMethodOop.hpp ++++ b/src/share/vm/oops/constMethodOop.hpp +@@ -114,7 +114,7 @@ + + public: + oop* oop_block_beg() const { return adr_method(); } +- oop* oop_block_end() const { return adr_exception_table() + 1; } ++ oop* oop_block_end() const { return adr_code_section_table() + 1; } + + private: + // +@@ -132,6 +132,10 @@ + // table is pointing to Universe::the_empty_int_array + typeArrayOop _exception_table; + ++ ++ // (tw) Table mapping code sections for method forward points. ++ typeArrayOop _code_section_table; ++ + // + // End of the oop block. + // +@@ -184,6 +188,28 @@ + void set_exception_table(typeArrayOop e) { oop_store_without_check((oop*) &_exception_table, (oop) e); } + bool has_exception_handler() const { return exception_table() != NULL && exception_table()->length() > 0; } + ++ // code section table ++ typeArrayOop code_section_table() const { return _code_section_table; } ++ void set_code_section_table(typeArrayOop e) { oop_store_without_check((oop*) &_code_section_table, (oop) e); } ++ bool has_code_section_table() const { return code_section_table() != NULL && code_section_table()->length() > 0; } ++ static const int ValuesPerCodeSectionEntry = 3; ++ int code_section_entries() const { ++ if (!has_code_section_table()) return 0; ++ return _code_section_table->length() / ValuesPerCodeSectionEntry; ++ } ++ ++ int code_section_new_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry); ++ } ++ ++ int code_section_original_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 1); ++ } ++ ++ int code_section_length_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 2); ++ } ++ + void init_fingerprint() { + const uint64_t initval = CONST64(0x8000000000000000); + _fingerprint = initval; +@@ -285,6 +311,7 @@ + oop* adr_method() const { return (oop*)&_method; } + oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } + oop* adr_exception_table() const { return (oop*)&_exception_table; } ++ oop* adr_code_section_table() const { return (oop*)&_code_section_table; } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } + +diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp +--- a/src/share/vm/oops/cpCacheOop.cpp ++++ b/src/share/vm/oops/cpCacheOop.cpp +@@ -36,9 +36,15 @@ + + // Implememtation of ConstantPoolCacheEntry + ++void ConstantPoolCacheEntry::copy_from(ConstantPoolCacheEntry *other) { ++ _flags = other->_flags; // flags ++} ++ + void ConstantPoolCacheEntry::initialize_entry(int index) { + assert(0 < index && index < 0x10000, "sanity check"); + _indices = index; ++ _f1 = NULL; ++ _f2 = 0; + assert(constant_pool_index() == index, ""); + } + +@@ -50,7 +56,7 @@ + + int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, + bool is_vfinal, bool is_volatile, +- bool is_method_interface, bool is_method) { ++ bool is_method_interface, bool is_method, bool is_old_method) { + int f = state; + + assert( state < number_of_states, "Invalid state in as_flags"); +@@ -65,7 +71,9 @@ + if (is_method_interface) f |= 1; + f <<= 1; + if (is_method) f |= 1; +- f <<= ConstantPoolCacheEntry::hotSwapBit; ++ f <<= 1; ++ if (is_old_method) f |= 1; ++ f <<= ConstantPoolCacheEntry::oldMethodBit; + // Preserve existing flag bit values + #ifdef ASSERT + int old_state = ((_flags >> tosBits) & 0x0F); +@@ -137,7 +145,7 @@ + set_f2(field_offset); + assert(field_index <= field_index_mask, + "field index does not fit in low flag bits"); +- set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | ++ set_flags(as_flags(field_type, is_final, false, is_volatile, false, false, false) | + (field_index & field_index_mask)); + set_bytecode_1(get_code); + set_bytecode_2(put_code); +@@ -153,7 +161,8 @@ + int vtable_index) { + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); +- assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); ++ // (tw) No longer valid assert ++ //assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); + bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); + + int byte_no = -1; +@@ -167,6 +176,9 @@ + } else { + assert(vtable_index >= 0, "valid index"); + set_f2(vtable_index); ++ ++ // (tw) save method holder in f1 for virtual calls ++ set_f1(method()); + } + byte_no = 2; + break; +@@ -212,7 +224,7 @@ + needs_vfinal_flag, + false, + change_to_virtual, +- true)| ++ true, method->is_old())| + method()->size_of_parameters()); + + // Note: byte_no also appears in TemplateTable::resolve. +@@ -252,7 +264,7 @@ + assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); + set_f1(interf); + set_f2(index); +- set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); ++ set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true, method->is_old()) | method()->size_of_parameters()); + set_bytecode_1(Bytecodes::_invokeinterface); + } + +@@ -282,7 +294,7 @@ + param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic + bool is_final = true; + assert(signature_invoker->is_final_method(), "is_final"); +- int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size; ++ int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true, false) | param_size; + assert(_flags == 0 || _flags == flags, "flags should be the same"); + set_flags(flags); + // do not do set_bytecode on a secondary CP cache entry +@@ -416,26 +428,13 @@ + // If this constantPoolCacheEntry refers to old_method then update it + // to refer to new_method. + bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, +- methodOop new_method, bool * trace_name_printed) { ++ methodOop new_method) { + + if (is_vfinal()) { ++ + // virtual and final so f2() contains method ptr instead of vtable index +- if (f2() == (intptr_t)old_method) { +- // match old_method so need an update +- _f2 = (intptr_t)new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- ++ if((methodOop)f2() != NULL && ((methodOop)f2())->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); + return true; + } + +@@ -443,65 +442,28 @@ + return false; + } + +- if ((oop)_f1 == NULL) { +- // NULL f1() means this is a virtual entry so bail out +- // We are assuming that the vtable index does not need change. ++ // (tw) check how to update interface methods! ++ if (bytecode_1() == Bytecodes::_invokevirtual || bytecode_2() == Bytecodes::_invokevirtual) { ++ ++ if(((methodOop)f1())->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); ++ return true; ++ } ++ + return false; + } + + if ((oop)_f1 == old_method) { + _f1 = new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- ++ return true; ++ } else if(_f1 != NULL && (bytecode_1() != Bytecodes::_invokeinterface && ((methodOop)f1())->method_holder()->klass_part()->new_version())) { ++ initialize_entry(constant_pool_index()); + return true; + } + + return false; + } + +-bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { +- if (!is_method_entry()) { +- // not a method entry so not interesting by default +- return false; +- } +- +- methodOop m = NULL; +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- m = (methodOop)_f2; +- } else if ((oop)_f1 == NULL) { +- // NULL _f1 means this is a virtual entry so also not interesting +- return false; +- } else { +- if (!((oop)_f1)->is_method()) { +- // _f1 can also contain a klassOop for an interface +- return false; +- } +- m = (methodOop)_f1; +- } +- +- assert(m != NULL && m->is_method(), "sanity check"); +- if (m == NULL || !m->is_method() || m->method_holder() != k) { +- // robustness for above sanity checks or method is not in +- // the interesting class +- return false; +- } +- +- // the method is in the interesting class so the entry is interesting +- return true; +-} +- + void ConstantPoolCacheEntry::print(outputStream* st, int index) const { + // print separator + if (index == 0) tty->print_cr(" -------------"); +@@ -542,38 +504,18 @@ + // RedefineClasses() API support: + // If any entry of this constantPoolCache points to any of + // old_methods, replace it with the corresponding new_method. +-void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed) { +- +- if (methods_length == 0) { +- // nothing to do if there are no methods +- return; +- } +- +- // get shorthand for the interesting class +- klassOop old_holder = old_methods[0]->method_holder(); ++void constantPoolCacheOopDesc::adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length) { + + for (int i = 0; i < length(); i++) { +- if (!entry_at(i)->is_interesting_method_entry(old_holder)) { +- // skip uninteresting methods +- continue; +- } ++ if (entry_at(i)->is_field_entry()) { + +- // The constantPoolCache contains entries for several different +- // things, but we only care about methods. In fact, we only care +- // about methods in the same class as the one that contains the +- // old_methods. At this point, we have an interesting entry. +- +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; +- methodOop new_method = new_methods[j]; +- +- if (entry_at(i)->adjust_method_entry(old_method, new_method, +- trace_name_printed)) { +- // current old_method matched this entry and we updated it so +- // break out and get to the next interesting entry if there one +- break; +- } ++ // (tw) TODO: Update only field offsets and modify only constant pool entries that ++ // point to changed fields ++ entry_at(i)->initialize_entry(entry_at(i)->constant_pool_index()); ++ ++ } else if(entry_at(i)->is_method_entry()) { ++ entry_at(i)->adjust_method_entry(NULL, NULL); + } + } + } +diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp +--- a/src/share/vm/oops/cpCacheOop.hpp ++++ b/src/share/vm/oops/cpCacheOop.hpp +@@ -135,19 +135,24 @@ + void set_bytecode_2(Bytecodes::Code code); + void set_f1(oop f1) { + oop existing_f1 = _f1; // read once +- assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); ++ // (tw) need to relax assertion for redefinition ++ // assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); + oop_store(&_f1, f1); + } + void set_f1_if_null_atomic(oop f1); +- void set_f2(intx f2) { assert(_f2 == 0 || _f2 == f2, "illegal field change"); _f2 = f2; } ++ void set_f2(intx f2) { ++ // (tw) need to relax assertion for redefinition ++ // assert(_f2 == 0 || _f2 == f2, "illegal field change"); ++ _f2 = f2; } + int as_flags(TosState state, bool is_final, bool is_vfinal, bool is_volatile, +- bool is_method_interface, bool is_method); ++ bool is_method_interface, bool is_method, bool is_old_method); + void set_flags(intx flags) { _flags = flags; } + + public: + // specific bit values in flag field + // Note: the interpreter knows this layout! + enum FlagBitValues { ++ oldMethodBit = 22, + hotSwapBit = 23, + methodInterface = 24, + volatileField = 25, +@@ -167,6 +172,8 @@ + void initialize_entry(int original_index); // initialize primary entry + void initialize_secondary_entry(int main_index); // initialize secondary entry + ++ void copy_from(ConstantPoolCacheEntry *other); ++ + void set_field( // sets entry to resolved field state + Bytecodes::Code get_code, // the bytecode used for reading the field + Bytecodes::Code put_code, // the bytecode used for writing the field +@@ -296,9 +303,7 @@ + // trace_name_printed is set to true if the current call has + // printed the klass name so that other routines in the adjust_* + // group don't print the klass name. +- bool adjust_method_entry(methodOop old_method, methodOop new_method, +- bool * trace_name_printed); +- bool is_interesting_method_entry(klassOop k); ++ bool adjust_method_entry(methodOop old_method, methodOop new_method); + bool is_field_entry() const { return (_flags & (1 << hotSwapBit)) == 0; } + bool is_method_entry() const { return (_flags & (1 << hotSwapBit)) != 0; } + +@@ -397,14 +402,9 @@ + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); + } + +- // RedefineClasses() API support: +- // If any entry of this constantPoolCache points to any of +- // old_methods, replace it with the corresponding new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- void adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed); ++ // (tw) Update method and field references ++ void adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length); + }; + + #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP +diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp +--- a/src/share/vm/oops/instanceKlass.cpp ++++ b/src/share/vm/oops/instanceKlass.cpp +@@ -248,12 +248,118 @@ + } + + ++void instanceKlass::initialize_redefined_class() { ++ RC_TRACE(0x00000400, ("initializing redefined class %s", ++ name()->as_C_string())); ++ ++ assert(!is_initialized(), ""); ++ assert(this->old_version() != NULL, ""); ++ assert(is_linked(), "must be linked before"); ++ ++ ++ instanceKlassHandle this_oop(Thread::current(), this->as_klassOop()); ++ class UpdateStaticFieldClosure : public FieldClosure { ++ ++ private: ++ instanceKlassHandle this_oop; ++ ++ public: ++ UpdateStaticFieldClosure(instanceKlassHandle this_oop) { ++ this->this_oop = this_oop; ++ } ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ fieldDescriptor result; ++ bool found = ((instanceKlass *)(this_oop->old_version()->klass_part()))->find_local_field(fd->name(), fd->signature(), &result); ++ ++ if (found && result.is_static()) { ++ int old_offset = result.offset(); ++ assert(result.field_type() == fd->field_type(), "Old and new field type does not match"); ++ ++ oop new_location = this_oop()->java_mirror(); ++ oop old_location = this_oop->old_version()->java_mirror(); ++ int offset = fd->offset(); ++ RC_TRACE(0x00000400, ("Copying static field value for field '%s' old_offset=%d new_offset=%d", ++ fd->name()->as_C_string(), old_offset, offset)); ++ ++ oop cur_oop; ++ ++ switch(result.field_type()) { ++ ++ // Found static field with same name and type in the old klass => copy value from old to new klass ++ ++ case T_BOOLEAN: ++ new_location->bool_field_put(offset, old_location->bool_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_CHAR: ++ new_location->char_field_put(offset, old_location->char_field(old_offset)); ++ DEBUG_ONLY(old_location->char_field_put(old_offset, 0)); ++ break; ++ ++ case T_FLOAT: ++ new_location->float_field_put(offset, old_location->float_field(old_offset)); ++ DEBUG_ONLY(old_location->float_field_put(old_offset, 0)); ++ break; ++ ++ case T_DOUBLE: ++ new_location->double_field_put(offset, old_location->double_field(old_offset)); ++ DEBUG_ONLY(old_location->double_field_put(old_offset, 0)); ++ break; ++ ++ case T_BYTE: ++ new_location->byte_field_put(offset, old_location->byte_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_SHORT: ++ new_location->short_field_put(offset, old_location->short_field(old_offset)); ++ DEBUG_ONLY(old_location->short_field_put(old_offset, 0)); ++ break; ++ ++ case T_INT: ++ new_location->int_field_put(offset, old_location->int_field(old_offset)); ++ DEBUG_ONLY(old_location->int_field_put(old_offset, 0)); ++ break; ++ ++ case T_LONG: ++ new_location->long_field_put(offset, old_location->long_field(old_offset)); ++ DEBUG_ONLY(old_location->long_field_put(old_offset, 0)); ++ break; ++ ++ case T_OBJECT: ++ case T_ARRAY: ++ cur_oop = old_location->obj_field(old_offset); ++ new_location->obj_field_put_raw(offset, cur_oop); ++ old_location->obj_field_put_raw(old_offset, NULL); ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ } else { ++ RC_TRACE(0x00000200, ("New static field %s has_initial_value=%d", ++ fd->name()->as_C_string(), (int)(fd->has_initial_value()))); ++ // field not found ++ // (tw) TODO: Probably this call is not necessary here! ++ // FIXME: idubrov ++ //ClassFileParser::initialize_static_field(fd, Thread::current()); ++ } ++ } ++ }; ++ ++ UpdateStaticFieldClosure cl(this_oop); ++ this->do_local_static_fields(&cl); ++} ++ ++ + bool instanceKlass::verify_code( + instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { + // 1) Verify the bytecodes + Verifier::Mode mode = + throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; +- return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); ++ return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), true, CHECK_false); + } + + +@@ -360,7 +466,13 @@ + jt->get_thread_stat()->perf_recursion_counts_addr(), + jt->get_thread_stat()->perf_timers_addr(), + PerfClassTraceTime::CLASS_VERIFY); +- bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(true); ++ } ++ bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(false); ++ } + if (!verify_ok) { + return false; + } +@@ -398,7 +510,8 @@ + } + #endif + this_oop->set_init_state(linked); +- if (JvmtiExport::should_post_class_prepare()) { ++ // (tw) Must check for old version in order to prevent infinite loops. ++ if (JvmtiExport::should_post_class_prepare() && this_oop->old_version() == NULL /* JVMTI deadlock otherwise */) { + Thread *thread = THREAD; + assert(thread->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); +@@ -665,6 +778,18 @@ + return false; + } + ++bool instanceKlass::implements_interface_any_version(klassOop k) const { ++ k = k->klass_part()->newest_version(); ++ if (this->newest_version() == k) return true; ++ assert(Klass::cast(k)->is_interface(), "should be an interface class"); ++ for (int i = 0; i < transitive_interfaces()->length(); i++) { ++ if (((klassOop)transitive_interfaces()->obj_at(i))->klass_part()->newest_version() == k) { ++ return true; ++ } ++ } ++ return false; ++} ++ + objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { + if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + if (length > arrayOopDesc::max_array_length(T_OBJECT)) { +@@ -793,7 +918,25 @@ + } + + void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { ++ ++ ResourceMark rm(THREAD); + methodHandle h_method(THREAD, this_oop->class_initializer()); ++ ++ if (this_oop->revision_number() != -1){ ++ methodOop m = NULL; ++ if (AllowAdvancedClassRedefinition) { ++ m = this_oop->find_method(vmSymbols::static_transformer_name(), vmSymbols::void_method_signature()); ++ } ++ methodHandle method(m); ++ if (method() != NULL && method()->is_static()) { ++ RC_TRACE(0x00000200, ("Calling static transformer instead of static initializer")); ++ h_method = method; ++ } else if (!((instanceKlass*)this_oop->old_version()->klass_part())->is_not_initialized()) { ++ // Only execute the static initializer, if it was not yet executed for the old version of the class. ++ return; ++ } ++ } ++ + assert(!this_oop->is_initialized(), "we cannot initialize twice"); + if (TraceClassInitialization) { + tty->print("%d Initializing ", call_class_initializer_impl_counter++); +@@ -942,6 +1085,137 @@ + } + } + ++void instanceKlass::store_update_information(GrowableArray<int> &values) { ++ int *arr = NEW_C_HEAP_ARRAY(int, values.length()); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_update_information(arr); ++} ++ ++void instanceKlass::clear_update_information() { ++ FREE_C_HEAP_ARRAY(int, update_information()); ++ set_update_information(NULL); ++} ++ ++typedef Pair<int, klassOop> typeInfoPair; ++ ++void instanceKlass::store_type_check_information(GrowableArray< Pair<int, klassOop> > &values) { ++ Pair<int, klassOop> *arr = NEW_C_HEAP_ARRAY(typeInfoPair, values.length()); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_type_check_information(arr); ++} ++ ++void instanceKlass::clear_type_check_information() { ++ FREE_C_HEAP_ARRAY(typeInfoPair, type_check_information()); ++ set_type_check_information(NULL); ++} ++ ++void instanceKlass::do_fields_evolution(FieldEvolutionClosure* cl) { ++ ++ assert (old_version() != NULL, "must have old version!"); ++ ++ klassOop old_klass_oop = old_version(); ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = this; ++ ++ fieldDescriptor fd; ++ fieldDescriptor old_fd; ++ ++ instanceKlass *cur_new_klass = new_klass; ++ klassOop cur_new_klass_oop = this->as_klassOop(); ++ ++ if (_fields_not_changed) { ++ ++ class MyFieldClosure : public FieldClosure { ++ ++ FieldEvolutionClosure *_cl; ++ public: ++ MyFieldClosure(FieldEvolutionClosure *cl) {_cl = cl; } ++ virtual void do_field(fieldDescriptor* fd) { ++ _cl->do_changed_field(fd, fd); ++ } ++ }; ++ ++ MyFieldClosure mfc(cl); ++ do_nonstatic_fields(&mfc); ++ } else { ++ ++ _fields_not_changed = true; ++ GrowableArray<fieldDescriptor> fds; ++ while (true) { ++ for (JavaFieldStream fs(cur_new_klass); !fs.done(); fs.next()) { ++ fd.initialize(cur_new_klass_oop, fs.index()); ++ if (fd.is_static()) { ++ continue; ++ } ++ fds.append(fd); ++ } ++ ++ if (cur_new_klass->super() != NULL) { ++ cur_new_klass_oop = cur_new_klass->super(); ++ cur_new_klass = instanceKlass::cast(cur_new_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ GrowableArray<fieldDescriptor> sortedFds; ++ while (fds.length() > 0) { ++ int minOffset = 0x7fffffff; ++ int minIndex = -1; ++ for (int i=0; i<fds.length(); i++) { ++ int curOffset = fds.adr_at(i)->offset(); ++ if (curOffset < minOffset) { ++ minOffset = curOffset; ++ minIndex = i; ++ } ++ } ++ ++ sortedFds.append(fds.at(minIndex)); ++ fds.remove_at(minIndex); ++ } ++ ++ ++ for (int i=0; i<sortedFds.length(); i++) { ++ fieldDescriptor &fd = *sortedFds.adr_at(i); ++ ++ char found = 0; ++ instanceKlass *cur_old_klass = old_klass; ++ klassOop cur_old_klass_oop = old_klass_oop; ++ while (true) { ++ for (JavaFieldStream fs(cur_old_klass); !fs.done(); fs.next()) { ++ old_fd.initialize(cur_old_klass_oop, fs.index()); ++ if (old_fd.is_static()) { ++ continue; ++ } ++ if (old_fd.name() == fd.name() && old_fd.signature() == fd.signature()) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found && cur_old_klass->super()) { ++ cur_old_klass_oop = cur_old_klass->super(); ++ cur_old_klass = instanceKlass::cast(cur_old_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ if (found) { ++ if (old_fd.offset() != fd.offset()) { ++ _fields_not_changed = false; ++ } ++ cl->do_changed_field(&old_fd, &fd); ++ } else { ++ _fields_not_changed = false; ++ cl->do_new_field(&fd); ++ } ++ } ++ } ++} + + void instanceKlass::do_local_static_fields(FieldClosure* cl) { + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { +@@ -1331,6 +1605,20 @@ + return id; + } + ++bool instanceKlass::update_jmethod_id(methodOop method, jmethodID newMethodID) { ++ size_t idnum = (size_t)method->method_idnum(); ++ jmethodID* jmeths = methods_jmethod_ids_acquire(); ++ size_t length; // length assigned as debugging crumb ++ jmethodID id = NULL; ++ if (jmeths != NULL && // If there is a cache ++ (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, ++ jmeths[idnum+1] = newMethodID; // Set the id (may be NULL) ++ return true; ++ } ++ ++ return false; ++} ++ + + // Cache an itable index + void instanceKlass::set_cached_itable_index(size_t idnum, int index) { +@@ -1490,6 +1778,13 @@ + last = b; + b = b->next(); + } ++ ++ // (tw) Hack as dependencies get wrong version of klassOop ++ if(this->old_version() != NULL) { ++ ((instanceKlass *)this->old_version()->klass_part())->remove_dependent_nmethod(nm); ++ return; ++ } ++ + #ifdef ASSERT + tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); + nm->print(); +@@ -2337,6 +2632,9 @@ + klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); + st->print(BULLET"fake entry for mirror: "); + mirrored_klass->print_value_on(st); ++ if (mirrored_klass != NULL) { ++ st->print_cr("revision: %d (oldest=%d, newest=%d)", mirrored_klass->klass_part()->revision_number(), mirrored_klass->klass_part()->oldest_version()->klass_part()->revision_number(), mirrored_klass->klass_part()->newest_version()->klass_part()->revision_number()); ++ } + st->cr(); + st->print(BULLET"fake entry resolved_constructor: "); + methodOop ctor = java_lang_Class::resolved_constructor(obj); +diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp +--- a/src/share/vm/oops/instanceKlass.hpp ++++ b/src/share/vm/oops/instanceKlass.hpp +@@ -99,6 +99,22 @@ + virtual void do_field(fieldDescriptor* fd) = 0; + }; + ++// (tw) Iterates over the fields of the old and new class ++class FieldEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_field(fieldDescriptor* fd) = 0; ++ virtual void do_old_field(fieldDescriptor* fd) = 0; ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd) = 0; ++}; ++ ++// (tw) Iterates over the methods of the old and new class ++class MethodEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_method(methodOop oop) = 0; ++ virtual void do_old_method(methodOop oop) = 0; ++ virtual void do_changed_method(methodOop oldOop, methodOop newOop) = 0; ++}; ++ + #ifndef PRODUCT + // Print fields. + // If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields. +@@ -264,6 +280,11 @@ + // _idnum_allocated_count. + u1 _init_state; // state of class + ++ // (tw) Field that allows for a short-path when calculating updated fields for the second time and ++ // no fields changed. Testing performance impact with this, can be removed later when the update ++ // information is cached. ++ bool _fields_not_changed; ++ + u1 _reference_type; // reference type + + // embedded Java vtable follows here +@@ -397,6 +418,7 @@ + // initialization (virtuals from Klass) + bool should_be_initialized() const; // means that initialize should be called + void initialize(TRAPS); ++ void initialize_redefined_class(); + void link_class(TRAPS); + bool link_class_or_fail(TRAPS); // returns false on failure + void unlink_class(); +@@ -549,6 +571,7 @@ + static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum, + size_t *length_p, jmethodID* id_p); + jmethodID jmethod_id_or_null(methodOop method); ++ bool update_jmethod_id(methodOop method, jmethodID newMethodID); + + // cached itable index support + void set_cached_itable_index(size_t idnum, int index); +@@ -630,6 +653,7 @@ + + // subclass/subinterface checks + bool implements_interface(klassOop k) const; ++ bool implements_interface_any_version(klassOop k) const; + + // Access to implementors of an interface. We only store the count + // of implementors, and in case, there are only a few +@@ -659,6 +683,12 @@ + void do_local_static_fields(FieldClosure* cl); + void do_nonstatic_fields(FieldClosure* cl); // including inherited fields + void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS); ++ void do_fields_evolution(FieldEvolutionClosure *cl); ++ void store_update_information(GrowableArray<int> &values); ++ void clear_update_information(); ++ void store_type_check_information(GrowableArray< Pair<int, klassOop> > &values); ++ void clear_type_check_information(); ++ + + void methods_do(void f(methodOop method)); + void array_klasses_do(void f(klassOop k)); +diff --git a/src/share/vm/oops/instanceKlassKlass.cpp b/src/share/vm/oops/instanceKlassKlass.cpp +--- a/src/share/vm/oops/instanceKlassKlass.cpp ++++ b/src/share/vm/oops/instanceKlassKlass.cpp +@@ -451,6 +451,28 @@ + instanceKlass* ik = instanceKlass::cast(klassOop(obj)); + klassKlass::oop_print_on(obj, st); + ++ // (tw) Output revision number and revision numbers of older / newer and oldest / newest version of this class. ++ ++ st->print(BULLET"revision: %d", ik->revision_number()); ++ ++ if (ik->new_version() != NULL) { ++ st->print(" (newer=%d)", ik->new_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->newest_version() != ik->new_version() && ik->newest_version() != obj) { ++ st->print(" (newest=%d)", ik->newest_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->old_version() != NULL) { ++ st->print(" (old=%d)", ik->old_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->oldest_version() != ik->old_version() && ik->oldest_version() != obj) { ++ st->print(" (oldest=%d)", ik->oldest_version()->klass_part()->revision_number()); ++ } ++ ++ st->cr(); ++ + st->print(BULLET"instance size: %d", ik->size_helper()); st->cr(); + st->print(BULLET"klass size: %d", ik->object_size()); st->cr(); + st->print(BULLET"access: "); ik->access_flags().print_on(st); st->cr(); +@@ -636,7 +658,7 @@ + } + guarantee(sib->as_klassOop()->is_klass(), "should be klass"); + guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); +- guarantee(sib->super() == super, "siblings should have same superklass"); ++ guarantee(sib->super() == super || super->klass_part()->newest_version() == SystemDictionary::Object_klass(), "siblings should have same superklass"); + sib = sib->next_sibling(); + } + +diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp +--- a/src/share/vm/oops/instanceRefKlass.cpp ++++ b/src/share/vm/oops/instanceRefKlass.cpp +@@ -455,10 +455,13 @@ + instanceKlass* ik = instanceKlass::cast(k); + + // Check that we have the right class +- debug_only(static bool first_time = true); +- assert(k == SystemDictionary::Reference_klass() && first_time, +- "Invalid update of maps"); +- debug_only(first_time = false); ++ ++ // (tw) Asserts no longer valid for class redefinition ++ // debug_only(static bool first_time = true); ++ ++ //assert(k == SystemDictionary::Reference_klass() && first_time, ++ // "Invalid update of maps"); ++ //debug_only(first_time = false); + assert(ik->nonstatic_oop_map_count() == 1, "just checking"); + + OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); +diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp +--- a/src/share/vm/oops/klass.cpp ++++ b/src/share/vm/oops/klass.cpp +@@ -54,6 +54,26 @@ + return false; + } + ++void Klass::update_supers_to_newest_version() { ++ ++ if (super() != NULL) set_super(super()->klass_part()->newest_version()); ++ ++ for (uint i=0; i<primary_super_limit(); i++) { ++ klassOop cur = _primary_supers[i]; ++ if (cur != NULL) { ++ _primary_supers[i] = cur->klass_part()->newest_version(); ++ } ++ } ++ ++ // Scan the array-of-objects ++ int cnt = secondary_supers()->length(); ++ for (int i = 0; i < cnt; i++) { ++ klassOop cur = (klassOop)secondary_supers()->obj_at(i); ++ if (cur != NULL) { ++ secondary_supers()->obj_at_put(i, cur->klass_part()->newest_version()); ++ } ++ } ++} + bool Klass::search_secondary_supers(klassOop k) const { + // Put some extra logic here out-of-line, before the search proper. + // This cuts down the size of the inline method. +@@ -160,6 +180,16 @@ + kl->set_alloc_size(0); + TRACE_SET_KLASS_TRACE_ID(kl, 0); + ++ kl->set_redefinition_flags(Klass::NoRedefinition); ++ kl->set_redefining(false); ++ kl->set_new_version(NULL); ++ kl->set_old_version(NULL); ++ kl->set_redefinition_index(-1); ++ kl->set_revision_number(-1); ++ kl->set_field_redefinition_policy(DynamicCheck); ++ kl->set_static_field_redefinition_policy(AccessDeletedMembers); ++ kl->set_method_redefinition_policy(AccessDeletedMembers); ++ + kl->set_prototype_header(markOopDesc::prototype()); + kl->set_biased_lock_revocation_count(0); + kl->set_last_biased_lock_bulk_revocation_time(0); +@@ -232,7 +262,7 @@ + set_super(NULL); + oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop()); + assert(super_depth() == 0, "Object must already be initialized properly"); +- } else if (k != super() || k == SystemDictionary::Object_klass()) { ++ } else if (k != super() || k->klass_part()->super() == NULL) { + assert(super() == NULL || super() == SystemDictionary::Object_klass(), + "initialize this only once to a non-trivial value"); + set_super(k); +diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp +--- a/src/share/vm/oops/klass.hpp ++++ b/src/share/vm/oops/klass.hpp +@@ -170,6 +170,7 @@ + void* operator new(size_t ignored, KlassHandle& klass, int size, TRAPS); + }; + ++template<class L, class R> class Pair; + + class Klass : public Klass_vtbl { + friend class VMStructs; +@@ -222,6 +223,39 @@ + oop* oop_block_beg() const { return adr_secondary_super_cache(); } + oop* oop_block_end() const { return adr_next_sibling() + 1; } + ++ // (tw) Different class redefinition flags of code evolution. ++ enum RedefinitionFlags { ++ ++ // This class is not redefined at all! ++ NoRedefinition, ++ ++ // There are changes to the class meta data. ++ ModifyClass = 1, ++ ++ // The size of the class meta data changes. ++ ModifyClassSize = ModifyClass << 1, ++ ++ // There are change to the instance format. ++ ModifyInstances = ModifyClassSize << 1, ++ ++ // The size of instances changes. ++ ModifyInstanceSize = ModifyInstances << 1, ++ ++ // A super type of this class is removed. ++ RemoveSuperType = ModifyInstanceSize << 1, ++ ++ // This class (or one of its super classes) has an instance transformer method. ++ HasInstanceTransformer = RemoveSuperType << 1, ++ }; ++ ++ // (tw) Different policies dealing with deleted fields / methods in old code. ++ enum RedefinitionPolicy { ++ StaticCheck, ++ DynamicCheck, ++ AccessDeletedMembers, ++ AccessOldMembers ++ }; ++ + protected: + // + // The oop block. All oop fields must be declared here and only oop fields +@@ -241,6 +275,10 @@ + oop _java_mirror; + // Superclass + klassOop _super; ++ // Old class ++ klassOop _old_version; ++ // New class ++ klassOop _new_version; + // First subclass (NULL if none); _subklass->next_sibling() is next one + klassOop _subklass; + // Sibling link (or NULL); links all subklasses of a klass +@@ -253,6 +291,19 @@ + jint _modifier_flags; // Processed access flags, for use by Class.getModifiers. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ // (tw) Non-oop fields for enhanced class redefinition ++ jint _revision_number; // The revision number for redefined classes ++ jint _redefinition_index; // Index of this class when performing the redefinition ++ bool _subtype_changed; ++ int _redefinition_flags; // Level of class redefinition ++ bool _is_copying_backwards; // Does the class need to copy fields backwards? => possibly overwrite itself? ++ int * _update_information; // Update information ++ Pair<int, klassOop> * _type_check_information; // Offsets of object fields that need a type check ++ char _method_redefinition_policy; ++ char _field_redefinition_policy; ++ char _static_field_redefinition_policy; ++ bool _is_redefining; ++ + #ifndef PRODUCT + int _verify_count; // to avoid redundant verifies + #endif +@@ -301,6 +352,99 @@ + klassOop secondary_super_cache() const { return _secondary_super_cache; } + void set_secondary_super_cache(klassOop k) { oop_store_without_check((oop*) &_secondary_super_cache, (oop) k); } + ++ // BEGIN class redefinition utilities ++ ++ // double links between new and old version of a class ++ klassOop old_version() const { return _old_version; } ++ void set_old_version(klassOop klass) { assert(_old_version == NULL || klass == NULL, "Can only be set once!"); _old_version = klass; } ++ klassOop new_version() const { return _new_version; } ++ void set_new_version(klassOop klass) { assert(_new_version == NULL || klass == NULL, "Can only be set once!"); _new_version = klass; } ++ ++ // A subtype of this class is no longer a subtype ++ bool has_subtype_changed() const { return _subtype_changed; } ++ void set_subtype_changed(bool b) { assert(is_newest_version() || new_version()->klass_part()->is_newest_version(), "must be newest or second newest version"); ++ _subtype_changed = b; } ++ // state of being redefined ++ int redefinition_index() const { return _redefinition_index; } ++ void set_redefinition_index(int index) { _redefinition_index = index; } ++ void set_redefining(bool b) { _is_redefining = b; } ++ bool is_redefining() const { return _is_redefining; } ++ int redefinition_flags() const { return _redefinition_flags; } ++ bool check_redefinition_flag(int flags) const { return (_redefinition_flags & flags) != 0; } ++ void set_redefinition_flags(int flags) { _redefinition_flags = flags; } ++ bool is_copying_backwards() const { return _is_copying_backwards; } ++ void set_copying_backwards(bool b) { _is_copying_backwards = b; } ++ ++ // update information ++ int *update_information() const { return _update_information; } ++ void set_update_information(int *info) { _update_information = info; } ++ Pair<int, klassOop> *type_check_information() const { return _type_check_information; } ++ void set_type_check_information(Pair<int, klassOop> *info) { _type_check_information = info; } ++ ++ bool is_same_or_older_version(klassOop klass) const { ++ if (Klass::cast(klass) == this) { return true; } ++ else if (_old_version == NULL) { return false; } ++ else { return _old_version->klass_part()->is_same_or_older_version(klass); } ++ } ++ ++ // Revision number for redefined classes, -1 for originally loaded classes ++ jint revision_number() const { ++ return _revision_number; ++ } ++ ++ bool was_redefined() const { ++ return _revision_number != -1; ++ } ++ ++ void set_revision_number(jint number) { ++ _revision_number = number; ++ } ++ ++ char method_redefinition_policy() { ++ return _method_redefinition_policy; ++ } ++ ++ void set_method_redefinition_policy(char v) { ++ _method_redefinition_policy = v; ++ } ++ ++ char field_redefinition_policy() { ++ return _field_redefinition_policy; ++ } ++ ++ void set_field_redefinition_policy(char v) { ++ _field_redefinition_policy = v; ++ } ++ ++ char static_field_redefinition_policy() { ++ return _static_field_redefinition_policy; ++ } ++ ++ void set_static_field_redefinition_policy(char v) { ++ _static_field_redefinition_policy = v; ++ } ++ ++ klassOop oldest_version() const { ++ if (_old_version == NULL) { return this->as_klassOop(); } ++ else { return _old_version->klass_part()->oldest_version(); }; ++ } ++ ++ klassOop newest_version() const { ++ if (_new_version == NULL) { return this->as_klassOop(); } ++ else { return _new_version->klass_part()->newest_version(); }; ++ } ++ ++ klassOop active_version() const { ++ if (_new_version == NULL || _new_version->klass_part()->is_redefining()) { return this->as_klassOop(); assert(!this->is_redefining(), "just checking"); } ++ else { return _new_version->klass_part()->active_version(); }; ++ } ++ ++ bool is_newest_version() const { ++ return _new_version == NULL; ++ } ++ ++ // END class redefinition utilities ++ + objArrayOop secondary_supers() const { return _secondary_supers; } + void set_secondary_supers(objArrayOop k) { oop_store_without_check((oop*) &_secondary_supers, (oop) k); } + +@@ -361,6 +505,8 @@ + void set_next_sibling(klassOop s); + + oop* adr_super() const { return (oop*)&_super; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } + oop* adr_primary_supers() const { return (oop*)&_primary_supers[0]; } + oop* adr_secondary_super_cache() const { return (oop*)&_secondary_super_cache; } + oop* adr_secondary_supers()const { return (oop*)&_secondary_supers; } +@@ -490,6 +636,7 @@ + return search_secondary_supers(k); + } + } ++ void update_supers_to_newest_version(); + bool search_secondary_supers(klassOop k) const; + + // Find LCA in class hierarchy +@@ -818,6 +965,8 @@ + + + inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); } ++inline klassOop klassOopDesc::old_version() const { return klass_part()->old_version(); } ++inline klassOop klassOopDesc::new_version() const { return klass_part()->new_version(); } + + + #endif // SHARE_VM_OOPS_KLASS_HPP +diff --git a/src/share/vm/oops/klassKlass.cpp b/src/share/vm/oops/klassKlass.cpp +--- a/src/share/vm/oops/klassKlass.cpp ++++ b/src/share/vm/oops/klassKlass.cpp +@@ -68,6 +68,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + MarkSweep::mark_and_push(k->adr_super()); ++ MarkSweep::mark_and_push(k->adr_old_version()); ++ MarkSweep::mark_and_push(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::mark_and_push(k->adr_primary_supers()+i); + MarkSweep::mark_and_push(k->adr_secondary_super_cache()); +@@ -87,6 +89,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + PSParallelCompact::mark_and_push(cm, k->adr_super()); ++ PSParallelCompact::mark_and_push(cm, k->adr_old_version()); ++ PSParallelCompact::mark_and_push(cm, k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + PSParallelCompact::mark_and_push(cm, k->adr_primary_supers()+i); + PSParallelCompact::mark_and_push(cm, k->adr_secondary_super_cache()); +@@ -106,6 +110,8 @@ + int size = oop_size(obj); + Klass* k = Klass::cast(klassOop(obj)); + blk->do_oop(k->adr_super()); ++ blk->do_oop(k->adr_old_version()); ++ blk->do_oop(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + blk->do_oop(k->adr_primary_supers()+i); + blk->do_oop(k->adr_secondary_super_cache()); +@@ -134,6 +140,10 @@ + oop* adr; + adr = k->adr_super(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + for (juint i = 0; i < Klass::primary_super_limit(); i++) { + adr = k->adr_primary_supers()+i; + if (mr.contains(adr)) blk->do_oop(adr); +@@ -147,6 +157,8 @@ + // The following are "weak links" in the perm gen and are + // treated specially in a later phase of a perm gen collection. + assert(oop(k)->is_perm(), "should be in perm"); ++ assert(oop(k->adr_old_version())->is_perm(), "should be in perm"); ++ assert(oop(k->adr_new_version())->is_perm(), "should be in perm"); + assert(oop(k->adr_subklass())->is_perm(), "should be in perm"); + assert(oop(k->adr_next_sibling())->is_perm(), "should be in perm"); + if (blk->should_remember_klasses() +@@ -167,6 +179,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + + MarkSweep::adjust_pointer(k->adr_super()); ++ MarkSweep::adjust_pointer(k->adr_new_version()); ++ MarkSweep::adjust_pointer(k->adr_old_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::adjust_pointer(k->adr_primary_supers()+i); + MarkSweep::adjust_pointer(k->adr_secondary_super_cache()); +diff --git a/src/share/vm/oops/klassOop.hpp b/src/share/vm/oops/klassOop.hpp +--- a/src/share/vm/oops/klassOop.hpp ++++ b/src/share/vm/oops/klassOop.hpp +@@ -41,8 +41,10 @@ + // returns the Klass part containing dispatching behavior + Klass* klass_part() const { return (Klass*)((address)this + sizeof(klassOopDesc)); } + +- // Convenience wrapper ++ // Convenience wrappers + inline oop java_mirror() const; ++ inline klassOop old_version() const; ++ inline klassOop new_version() const; + + private: + // These have no implementation since klassOop should never be accessed in this fashion +diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp +--- a/src/share/vm/oops/klassVtable.cpp ++++ b/src/share/vm/oops/klassVtable.cpp +@@ -97,7 +97,8 @@ + vtable_length = Universe::base_vtable_size(); + } + +- if (super == NULL && !Universe::is_bootstrapping() && ++ // (tw) TODO: Check if we can relax the condition on a fixed base vtable size ++ /*if (super == NULL && !Universe::is_bootstrapping() && + vtable_length != Universe::base_vtable_size()) { + // Someone is attempting to redefine java.lang.Object incorrectly. The + // only way this should happen is from +@@ -107,9 +108,9 @@ + vtable_length = Universe::base_vtable_size(); + } + assert(super != NULL || vtable_length == Universe::base_vtable_size(), +- "bad vtable size for class Object"); ++ "bad vtable size for class Object");*/ + assert(vtable_length % vtableEntry::size() == 0, "bad vtable length"); +- assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); ++ //assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); + } + + int klassVtable::index_of(methodOop m, int len) const { +@@ -1186,6 +1187,7 @@ + + void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { + vtableEntry* vte = &vt->table()[index]; ++ if (vte->method() == NULL || table()[index].method() == NULL) return; + if (vte->method()->name() != table()[index].method()->name() || + vte->method()->signature() != table()[index].method()->signature()) { + fatal("mismatched name/signature of vtable entries"); +@@ -1205,6 +1207,8 @@ + + void vtableEntry::verify(klassVtable* vt, outputStream* st) { + NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); ++ // (tw) TODO: Check: Does not hold? ++ if (method() != NULL) { + assert(method() != NULL, "must have set method"); + method()->verify(); + // we sub_type, because it could be a miranda method +@@ -1212,7 +1216,13 @@ + #ifndef PRODUCT + print(); + #endif +- fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); ++ klassOop first_klass = vt->klass()(); ++ klassOop second_klass = method()->method_holder(); ++ // (tw) the following fatal does not work for old versions of classes ++ if (first_klass->klass_part()->is_newest_version()) { ++ //fatal1("vtableEntry %#lx: method is from subclass", this); ++ } ++ } + } + } + +@@ -1220,7 +1230,7 @@ + + void vtableEntry::print() { + ResourceMark rm; +- tty->print("vtableEntry %s: ", method()->name()->as_C_string()); ++ tty->print("vtableEntry %s: ", (method() == NULL) ? "null" : method()->name()->as_C_string()); + if (Verbose) { + tty->print("m %#lx ", (address)method()); + } +@@ -1292,7 +1302,7 @@ + for (int i = 0; i < length(); i++) { + methodOop m = unchecked_method_at(i); + if (m != NULL) { +- if (m->is_old()) { ++ if (m->is_old() || !m->method_holder()->klass_part()->is_newest_version()) { + return false; + } + } +diff --git a/src/share/vm/oops/methodKlass.cpp b/src/share/vm/oops/methodKlass.cpp +--- a/src/share/vm/oops/methodKlass.cpp ++++ b/src/share/vm/oops/methodKlass.cpp +@@ -93,6 +93,10 @@ + m->set_adapter_entry(NULL); + m->clear_code(); // from_c/from_i get set to c2i/i2i + ++ m->set_forward_method(NULL); ++ m->set_new_version(NULL); ++ m->set_old_version(NULL); ++ + if (access_flags.is_native()) { + m->clear_native_function(); + m->set_signature_handler(NULL); +@@ -127,6 +131,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::mark_and_push(m->adr_constMethod()); ++ MarkSweep::mark_and_push(m->adr_forward_method()); ++ MarkSweep::mark_and_push(m->adr_new_version()); ++ MarkSweep::mark_and_push(m->adr_old_version()); + MarkSweep::mark_and_push(m->adr_constants()); + if (m->method_data() != NULL) { + MarkSweep::mark_and_push(m->adr_method_data()); +@@ -141,6 +148,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + PSParallelCompact::mark_and_push(cm, m->adr_constMethod()); ++ PSParallelCompact::mark_and_push(cm, m->adr_forward_method()); ++ PSParallelCompact::mark_and_push(cm, m->adr_new_version()); ++ PSParallelCompact::mark_and_push(cm, m->adr_old_version()); + PSParallelCompact::mark_and_push(cm, m->adr_constants()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { +@@ -159,6 +169,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves + blk->do_oop(m->adr_constMethod()); ++ blk->do_oop(m->adr_forward_method()); ++ blk->do_oop(m->adr_new_version()); ++ blk->do_oop(m->adr_old_version()); + blk->do_oop(m->adr_constants()); + if (m->method_data() != NULL) { + blk->do_oop(m->adr_method_data()); +@@ -178,6 +191,12 @@ + oop* adr; + adr = m->adr_constMethod(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_forward_method(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + adr = m->adr_constants(); + if (mr.contains(adr)) blk->do_oop(adr); + if (m->method_data() != NULL) { +@@ -197,6 +216,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::adjust_pointer(m->adr_constMethod()); ++ MarkSweep::adjust_pointer(m->adr_forward_method()); ++ MarkSweep::adjust_pointer(m->adr_new_version()); ++ MarkSweep::adjust_pointer(m->adr_old_version()); + MarkSweep::adjust_pointer(m->adr_constants()); + if (m->method_data() != NULL) { + MarkSweep::adjust_pointer(m->adr_method_data()); +@@ -213,6 +235,9 @@ + assert(obj->is_method(), "should be method"); + methodOop m = methodOop(obj); + PSParallelCompact::adjust_pointer(m->adr_constMethod()); ++ PSParallelCompact::adjust_pointer(m->adr_forward_method()); ++ PSParallelCompact::adjust_pointer(m->adr_new_version()); ++ PSParallelCompact::adjust_pointer(m->adr_old_version()); + PSParallelCompact::adjust_pointer(m->adr_constants()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { +@@ -234,7 +259,18 @@ + methodOop m = methodOop(obj); + // get the effect of PrintOopAddress, always, for methods: + st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m); +- st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr(); ++ st->print (" - method holder: "); m->method_holder()->print_value_on(st); ++ ++ if (m->method_holder()->klass_part()->new_version() != NULL) { ++ st->print(" (old)"); ++ } ++ st->cr(); ++ ++ st->print_cr(" - is obsolete: %d", (int)(m->is_obsolete())); ++ st->print_cr(" - is old: %d", (int)(m->is_old())); ++ st->print_cr(" - new version: "INTPTR_FORMAT" ", (address)(m->new_version())); ++ st->print_cr(" - old version: "INTPTR_FORMAT" ", (address)(m->old_version())); ++ st->print_cr(" - holder revision: %d", m->method_holder()->klass_part()->revision_number()); + st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants()); + m->constants()->print_value_on(st); st->cr(); + st->print (" - access: 0x%x ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr(); +diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp +--- a/src/share/vm/oops/methodOop.cpp ++++ b/src/share/vm/oops/methodOop.cpp +@@ -328,6 +328,70 @@ + } + + ++bool methodOopDesc::is_in_code_section(int bci) { ++ // There is no table => every bci is in the code section table. ++ if (!constMethod()->has_code_section_table()) return true; ++ ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++int methodOopDesc::calculate_forward_bci(int bci, methodOop new_method) { ++ int original_bci = -1; ++ if (constMethod()->has_code_section_table()) { ++ assert(is_in_code_section(bci), "can only forward in section"); ++ // First calculate back to original bci. ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ original_bci = bci - new_index + original_index; ++ break; ++ } ++ } ++ assert (original_bci != -1, "must have been in code section"); ++ } else { ++ // No code sections specified => we are in an original method. ++ original_bci = bci; ++ } ++ ++ // We know the original bci => match to new method. ++ int new_bci = -1; ++ if (new_method->constMethod()->has_code_section_table()) { ++ // Map to new bci. ++ constMethodOop m = new_method->constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (original_bci >= original_index && original_bci < original_index + length) { ++ new_bci = original_bci - original_index + new_index; ++ break; ++ } ++ } ++ assert (new_bci != -1, "must have found new code section"); ++ ++ } else { ++ // We are in an original method. ++ new_bci = original_bci; ++ } ++ ++ return new_bci; ++} ++ ++ + int methodOopDesc::extra_stack_words() { + // not an inline function, to avoid a header dependency on Interpreter + return extra_stack_entries() * Interpreter::stackElementSize; +@@ -1079,6 +1143,9 @@ + // Reset correct method/const method, method size, and parameter info + newcm->set_method(newm()); + newm->set_constMethod(newcm); ++ newm->set_forward_method(newm->forward_method()); ++ newm->set_new_version(newm->new_version()); ++ newm->set_old_version(newm->old_version()); + assert(newcm->method() == newm(), "check"); + newm->constMethod()->set_code_size(new_code_length); + newm->constMethod()->set_constMethod_size(new_const_method_size); +diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp +--- a/src/share/vm/oops/methodOop.hpp ++++ b/src/share/vm/oops/methodOop.hpp +@@ -116,6 +116,11 @@ + AccessFlags _access_flags; // Access flags + int _vtable_index; // vtable index of this method (see VtableIndexFlag) + // note: can have vtables with >2**16 elements (because of inheritance) ++ // (tw) Newer version of method available? ++ methodOop _forward_method; ++ methodOop _new_version; ++ methodOop _old_version; ++ + #ifdef CC_INTERP + int _result_index; // C++ interpreter needs for converting results to/from stack + #endif +@@ -174,6 +179,32 @@ + int name_index() const { return constMethod()->name_index(); } + void set_name_index(int index) { constMethod()->set_name_index(index); } + ++ methodOop forward_method() const {return _forward_method; } ++ void set_forward_method(methodOop m) { _forward_method = m; } ++ bool has_forward_method() const { return forward_method() != NULL; } ++ methodOop new_version() const {return _new_version; } ++ void set_new_version(methodOop m) { _new_version = m; } ++ methodOop newest_version() { if(_new_version == NULL) return this; else return new_version()->newest_version(); } ++ ++ methodOop old_version() const {return _old_version; }; ++ void set_old_version(methodOop m) { ++ if (m == NULL) { ++ _old_version = NULL; ++ return; ++ } ++ ++ assert(_old_version == NULL, "may only be set once"); ++ assert(this->code_size() == m->code_size(), "must have same code length"); ++ _old_version = m; ++ } ++ ++ methodOop oldest_version() const { ++ if(_old_version == NULL) return (methodOop)this; ++ else { ++ return old_version()->oldest_version(); ++ } ++ } ++ + // signature + Symbol* signature() const { return _constants->symbol_at(signature_index()); } + int signature_index() const { return constMethod()->signature_index(); } +@@ -670,6 +701,10 @@ + // Inline cache support + void cleanup_inline_caches(); + ++ // (tw) Method forwarding support. ++ bool is_in_code_section(int bci); ++ int calculate_forward_bci(int bci, methodOop new_method); ++ + // Find if klass for method is loaded + bool is_klass_loaded_by_klass_index(int klass_index) const; + bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; +@@ -723,6 +758,9 @@ + + // Garbage collection support + oop* adr_constMethod() const { return (oop*)&_constMethod; } ++ oop* adr_forward_method() const { return (oop*)&_forward_method; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } + oop* adr_constants() const { return (oop*)&_constants; } + oop* adr_method_data() const { return (oop*)&_method_data; } + }; +diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp +--- a/src/share/vm/oops/oop.hpp ++++ b/src/share/vm/oops/oop.hpp +@@ -94,6 +94,7 @@ + narrowOop* compressed_klass_addr(); + + void set_klass(klassOop k); ++ void set_klass_no_check(klassOop k); + + // For klass field compression + int klass_gap() const; +@@ -134,6 +135,7 @@ + bool is_array() const; + bool is_objArray() const; + bool is_klass() const; ++ bool is_instanceKlass() const; + bool is_thread() const; + bool is_method() const; + bool is_constMethod() const; +diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp +--- a/src/share/vm/oops/oop.inline.hpp ++++ b/src/share/vm/oops/oop.inline.hpp +@@ -115,6 +115,14 @@ + } + } + ++inline void oopDesc::set_klass_no_check(klassOop k) { ++ if (UseCompressedOops) { ++ oop_store_without_check(compressed_klass_addr(), (oop)k); ++ } else { ++ oop_store_without_check(klass_addr(), (oop) k); ++ } ++} ++ + inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); + } +@@ -148,6 +156,7 @@ + inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } + inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } + inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } ++inline bool oopDesc::is_instanceKlass() const { return blueprint()->oop_is_instanceKlass(); } + inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } + inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } + inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } +diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp +--- a/src/share/vm/prims/jni.cpp ++++ b/src/share/vm/prims/jni.cpp +@@ -405,7 +405,7 @@ + } + } + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, +- Handle(), &st, true, ++ Handle(), &st, true, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp +--- a/src/share/vm/prims/jvm.cpp ++++ b/src/share/vm/prims/jvm.cpp +@@ -866,7 +866,7 @@ + Handle protection_domain (THREAD, JNIHandles::resolve(pd)); + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, + protection_domain, &st, +- verify != 0, ++ verify != 0, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp +--- a/src/share/vm/prims/jvmtiEnv.cpp ++++ b/src/share/vm/prims/jvmtiEnv.cpp +@@ -290,7 +290,10 @@ + class_definitions[index].klass = jcls; + } + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RetransformClasses */ + +@@ -299,9 +302,12 @@ + // class_definitions - pre-checked for NULL + jvmtiError + JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) { +-//TODO: add locking ++ + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RedefineClasses */ + +diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp +--- a/src/share/vm/prims/jvmtiExport.cpp ++++ b/src/share/vm/prims/jvmtiExport.cpp +@@ -2307,7 +2307,7 @@ + // iterate over any code blob descriptors collected and post a + // DYNAMIC_CODE_GENERATED event to the profiler. + JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { +- assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events"); ++ assert(!JavaThread::current()->owns_locks_but_redefine_classes_lock(), "all locks must be released to post deferred events"); + // iterate over any code blob descriptors that we collected + if (_code_blobs != NULL) { + for (int i=0; i<_code_blobs->length(); i++) { +diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp +--- a/src/share/vm/prims/jvmtiImpl.cpp ++++ b/src/share/vm/prims/jvmtiImpl.cpp +@@ -286,6 +286,8 @@ + void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { + ((methodOopDesc*)_method->*meth_act)(_bci); + ++ // DCEVM: TODO: Check how we can implement this differently here! ++ + // add/remove breakpoint to/from versions of the method that + // are EMCP. Directly or transitively obsolete methods are + // not saved in the PreviousVersionInfo. +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp +--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp +@@ -30,491 +30,637 @@ + #include "interpreter/rewriter.hpp" + #include "memory/gcLocker.hpp" + #include "memory/universe.inline.hpp" ++#include "memory/cardTableRS.hpp" ++#include "oops/klassVtable.hpp" + #include "oops/fieldStreams.hpp" +-#include "oops/klassVtable.hpp" + #include "prims/jvmtiImpl.hpp" + #include "prims/jvmtiRedefineClasses.hpp" ++#include "prims/jvmtiClassFileReconstituter.hpp" + #include "prims/methodComparator.hpp" + #include "runtime/deoptimization.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "compiler/compileBroker.hpp" + + + objArrayOop VM_RedefineClasses::_old_methods = NULL; + objArrayOop VM_RedefineClasses::_new_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_old_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_new_methods = NULL; +-methodOop* VM_RedefineClasses::_deleted_methods = NULL; +-methodOop* VM_RedefineClasses::_added_methods = NULL; ++int* VM_RedefineClasses::_matching_old_methods = NULL; ++int* VM_RedefineClasses::_matching_new_methods = NULL; ++int* VM_RedefineClasses::_deleted_methods = NULL; ++int* VM_RedefineClasses::_added_methods = NULL; + int VM_RedefineClasses::_matching_methods_length = 0; + int VM_RedefineClasses::_deleted_methods_length = 0; + int VM_RedefineClasses::_added_methods_length = 0; + klassOop VM_RedefineClasses::_the_class_oop = NULL; + +- +-VM_RedefineClasses::VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind) { ++// Holds the revision number of the current class redefinition ++int VM_RedefineClasses::_revision_number = -1; ++ ++VM_RedefineClasses::VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind) ++ : VM_GC_Operation(Universe::heap()->total_full_collections(), GCCause::_jvmti_force_gc) { ++ RC_TIMER_START(_timer_total); + _class_count = class_count; + _class_defs = class_defs; + _class_load_kind = class_load_kind; +- _res = JVMTI_ERROR_NONE; ++ _updated_oops = NULL; ++ _result = JVMTI_ERROR_NONE; + } + ++VM_RedefineClasses::~VM_RedefineClasses() { ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ Threads::set_wait_at_instrumentation_entry(false); ++ ml.notify_all(); ++ } ++ ++ unlock_threads(); ++ RC_TIMER_STOP(_timer_total); ++ ++ if (TimeRedefineClasses) { ++ tty->print_cr("Timing Prologue: %d", _timer_prologue.milliseconds()); ++ tty->print_cr("Timing Class Loading: %d", _timer_class_loading.milliseconds()); ++ tty->print_cr("Timing Waiting for Lock: %d", _timer_wait_for_locks.milliseconds()); ++ tty->print_cr("Timing Class Linking: %d", _timer_class_linking.milliseconds()); ++ tty->print_cr("Timing Check Type: %d", _timer_check_type.milliseconds()); ++ tty->print_cr("Timing Prepare Redefinition: %d", _timer_prepare_redefinition.milliseconds()); ++ tty->print_cr("Timing Redefinition GC: %d", _timer_redefinition.milliseconds()); ++ tty->print_cr("Timing Epilogue: %d", _timer_vm_op_epilogue.milliseconds()); ++ tty->print_cr("------------------------------------------------------------------"); ++ tty->print_cr("Total Time: %d", _timer_total.milliseconds()); ++ } ++} ++ ++// Searches for all affected classes and performs a sorting such that a supertype is always before a subtype. ++jvmtiError VM_RedefineClasses::find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses) { ++ ++ // Create array with all classes for which the redefine command was given ++ GrowableArray<instanceKlassHandle> klasses_to_redefine; ++ for (int i=0; i<_class_count; i++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); ++ instanceKlassHandle klass_handle(Thread::current(), java_lang_Class::as_klassOop(mirror)); ++ klasses_to_redefine.append(klass_handle); ++ assert(klass_handle->new_version() == NULL, "Must be new class"); ++ } ++ ++ // Find classes not directly redefined, but affected by a redefinition (because one of its supertypes is redefined) ++ GrowableArray<instanceKlassHandle> affected_classes; ++ FindAffectedKlassesClosure closure(&klasses_to_redefine, &affected_classes); ++ ++ // Trace affected classes ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Klasses affected: %d", ++ affected_classes.length())); ++ for (int i=0; i<affected_classes.length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ affected_classes.at(i)->name()->as_C_string())); ++ } ++ } ++ ++ // Add the array of affected classes and the array of redefined classes to get a list of all classes that need a redefinition ++ all_affected_klasses->appendAll(&klasses_to_redefine); ++ all_affected_klasses->appendAll(&affected_classes); ++ ++ // Sort the affected klasses such that a supertype is always on a smaller array index than its subtype. ++ jvmtiError result = do_topological_class_sorting(_class_defs, _class_count, &affected_classes, all_affected_klasses, Thread::current()); ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Redefine order: ")); ++ for (int i=0; i<all_affected_klasses->length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ all_affected_klasses->at(i)->name()->as_C_string())); ++ } ++ } ++ ++ return result; ++} ++ ++// Searches for the class bytes of the given class and returns them as a byte array. ++jvmtiError VM_RedefineClasses::find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed) { ++ ++ *not_changed = false; ++ ++ // Search for the index in the redefinition array that corresponds to the current class ++ int j; ++ for (j=0; j<_class_count; j++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[j].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ if (the_class_oop == the_class()) { ++ break; ++ } ++ } ++ ++ if (j == _class_count) { ++ ++ *not_changed = true; ++ ++ // Redefine with same bytecodes. This is a class that is only indirectly affected by redefinition, ++ // so the user did not specify a different bytecode for that class. ++ ++ if (the_class->get_cached_class_file_bytes() == NULL) { ++ // not cached, we need to reconstitute the class file from VM representation ++ constantPoolHandle constants(Thread::current(), the_class->constants()); ++ ObjectLocker ol(constants, Thread::current()); // lock constant pool while we query it ++ ++ JvmtiClassFileReconstituter reconstituter(the_class); ++ if (reconstituter.get_error() != JVMTI_ERROR_NONE) { ++ return reconstituter.get_error(); ++ } ++ ++ *class_byte_count = (jint)reconstituter.class_file_size(); ++ *class_bytes = (unsigned char*)reconstituter.class_file_bytes(); ++ ++ } else { ++ ++ // it is cached, get it from the cache ++ *class_byte_count = the_class->get_cached_class_file_len(); ++ *class_bytes = the_class->get_cached_class_file_bytes(); ++ } ++ ++ } else { ++ ++ // Redefine with bytecodes at index j ++ *class_bytes = _class_defs[j].class_bytes; ++ *class_byte_count = _class_defs[j].class_byte_count; ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++// Prologue of the VM operation, called on the Java thread in parallel to normal program execution + bool VM_RedefineClasses::doit_prologue() { +- if (_class_count == 0) { +- _res = JVMTI_ERROR_NONE; ++ ++ _revision_number++; ++ RC_TRACE(0x00000001, ("Redefinition with revision number %d started!", _revision_number)); ++ ++ assert(Thread::current()->is_Java_thread(), "must be Java thread"); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (!check_arguments()) { ++ RC_TIMER_STOP(_timer_prologue); + return false; + } +- if (_class_defs == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; ++ ++ // We first load new class versions in the prologue, because somewhere down the ++ // call chain it is required that the current thread is a Java thread. ++ _new_classes = new (ResourceObj::C_HEAP) GrowableArray<instanceKlassHandle>(5, true); ++ _result = load_new_class_versions(Thread::current()); ++ ++ RC_TRACE(0x00000001, ("Loaded new class versions!")); ++ if (_result != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("error occured: %d!", _result)); ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TIMER_STOP(_timer_prologue); + return false; + } ++ ++ RC_TRACE(0x00000001, ("nearly finished")); ++ VM_GC_Operation::doit_prologue(); ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TRACE(0x00000001, ("doit_prologue finished!")); ++ return true; ++} ++ ++// Checks basic properties of the arguments of the redefinition command. ++bool VM_RedefineClasses::check_arguments() { ++ ++ if (_class_count == 0) RC_ABORT(JVMTI_ERROR_NONE); ++ if (_class_defs == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); + for (int i = 0; i < _class_count; i++) { +- if (_class_defs[i].klass == NULL) { +- _res = JVMTI_ERROR_INVALID_CLASS; +- return false; ++ if (_class_defs[i].klass == NULL) RC_ABORT(JVMTI_ERROR_INVALID_CLASS); ++ if (_class_defs[i].class_byte_count == 0) RC_ABORT(JVMTI_ERROR_INVALID_CLASS_FORMAT); ++ if (_class_defs[i].class_bytes == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); ++ } ++ ++ return true; ++} ++ ++jvmtiError VM_RedefineClasses::check_exception() const { ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000001, ("parse_stream exception: '%s'", ++ ex_name->as_C_string())); ++ if (TraceRedefineClasses >= 1) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->print_cr(""); + } +- if (_class_defs[i].class_byte_count == 0) { +- _res = JVMTI_ERROR_INVALID_CLASS_FORMAT; +- return false; +- } +- if (_class_defs[i].class_bytes == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++ CLEAR_PENDING_EXCEPTION; ++ ++ if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { ++ return JVMTI_ERROR_UNSUPPORTED_VERSION; ++ } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { ++ return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { ++ // The message will be "XXX (wrong name: YYY)" ++ return JVMTI_ERROR_NAMES_DONT_MATCH; ++ } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { ++ return JVMTI_ERROR_OUT_OF_MEMORY; ++ } else { ++ // Just in case more exceptions can be thrown.. ++ return JVMTI_ERROR_FAILS_VERIFICATION; + } + } + +- // Start timer after all the sanity checks; not quite accurate, but +- // better than adding a bunch of stop() calls. +- RC_TIMER_START(_timer_vm_op_prologue); +- +- // We first load new class versions in the prologue, because somewhere down the +- // call chain it is required that the current thread is a Java thread. +- _res = load_new_class_versions(Thread::current()); +- if (_res != JVMTI_ERROR_NONE) { +- // Free os::malloc allocated memory in load_new_class_version. +- os::free(_scratch_classes); +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return false; ++ return JVMTI_ERROR_NONE; ++} ++ ++// Loads all new class versions and stores the instanceKlass handles in an array. ++jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { ++ ++ ResourceMark rm(THREAD); ++ ++ RC_TRACE(0x00000001, ("===================================================================")); ++ RC_TRACE(0x00000001, ("load new class versions (%d)", ++ _class_count)); ++ ++ // Retrieve an array of all classes that need to be redefined ++ GrowableArray<instanceKlassHandle> all_affected_klasses; ++ jvmtiError err = find_sorted_affected_classes(&all_affected_klasses); ++ if (err != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding sorted affected classes: %d", ++ (int)err)); ++ return err; + } + +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return true; +-} +- +-void VM_RedefineClasses::doit() { +- Thread *thread = Thread::current(); +- +- if (UseSharedSpaces) { +- // Sharing is enabled so we remap the shared readonly space to +- // shared readwrite, private just in case we need to redefine +- // a shared class. We do the remap during the doit() phase of +- // the safepoint to be safer. +- if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { +- RC_TRACE_WITH_THREAD(0x00000001, thread, +- ("failed to remap shared readonly space to readwrite, private")); +- _res = JVMTI_ERROR_INTERNAL; +- return; ++ ++ JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); ++ ++ _max_redefinition_flags = Klass::NoRedefinition; ++ jvmtiError result = JVMTI_ERROR_NONE; ++ ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ RC_TRACE(0x00000002, ("Processing affected class %d of %d", ++ i+1, all_affected_klasses.length())); ++ ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ RC_TRACE(0x00000002, ("name=%s", ++ the_class->name()->as_C_string())); ++ ++ the_class->link_class(THREAD); ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ // Find new class bytes ++ const unsigned char* class_bytes; ++ jint class_byte_count; ++ jvmtiError error; ++ jboolean not_changed; ++ if ((error = find_class_bytes(the_class, &class_bytes, &class_byte_count, ¬_changed)) != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding class bytes: %d", ++ (int)error)); ++ result = error; ++ break; ++ } ++ assert(class_bytes != NULL && class_byte_count != 0, "Class bytes defined at this point!"); ++ ++ ++ // Set redefined class handle in JvmtiThreadState class. ++ // This redefined class is sent to agent event handler for class file ++ // load hook event. ++ state->set_class_being_redefined(&the_class, _class_load_kind); ++ ++ RC_TRACE(0x00000002, ("Before resolving from stream")); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_loading); ++ ++ ++ // Parse the stream. ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ Symbol* the_class_sym = the_class->name(); ++ ClassFileStream st((u1*) class_bytes, class_byte_count, (char *)"__VM_RedefineClasses__"); ++ instanceKlassHandle new_class(THREAD, SystemDictionary::resolve_from_stream(the_class_sym, ++ the_class_loader, ++ protection_domain, ++ &st, ++ true, ++ the_class, ++ THREAD)); ++ ++ not_changed = false; ++ ++ RC_TIMER_STOP(_timer_class_loading); ++ RC_TIMER_START(_timer_prologue); ++ ++ RC_TRACE(0x00000002, ("After resolving class from stream!")); ++ // Clear class_being_redefined just to be sure. ++ state->clear_class_being_redefined(); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++#ifdef ASSERT ++ ++ assert(new_class() != NULL, "Class could not be loaded!"); ++ assert(new_class() != the_class(), "must be different"); ++ assert(new_class->new_version() == NULL && new_class->old_version() != NULL, ""); ++ ++ ++ objArrayOop k_interfaces = new_class->local_interfaces(); ++ for (int j=0; j<k_interfaces->length(); j++) { ++ assert(((klassOop)k_interfaces->obj_at(j))->klass_part()->is_newest_version(), "just checking"); ++ } ++ ++ if (!THREAD->is_Compiler_thread()) { ++ ++ RC_TRACE(0x00000002, ("name=%s loader="INTPTR_FORMAT" protection_domain="INTPTR_FORMAT" ", ++ the_class->name()->as_C_string(), ++ (address)(the_class->class_loader()), ++ (address)(the_class->protection_domain()))); ++ // If we are on the compiler thread, we must not try to resolve a class. ++ klassOop systemLookup = SystemDictionary::resolve_or_null(the_class->name(), the_class->class_loader(), the_class->protection_domain(), THREAD); ++ ++ if (systemLookup != NULL) { ++ assert(systemLookup == new_class->old_version(), "Old class must be in system dictionary!"); ++ ++ ++ Klass *subklass = new_class()->klass_part()->subklass(); ++ while (subklass != NULL) { ++ assert(subklass->new_version() == NULL, "Most recent version of class!"); ++ subklass = subklass->next_sibling(); ++ } ++ } else { ++ // This can happen for reflection generated classes.. ? ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ ++#endif ++ ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (new_class->layout_helper() != the_class->layout_helper()) { ++ RC_TRACE(0x00000001, ("Instance size change for class %s: new=%d old=%d", ++ new_class->name()->as_C_string(), ++ new_class->layout_helper(), ++ the_class->layout_helper())); ++ } ++ } ++ ++ // Set the new version of the class ++ new_class->set_revision_number(_revision_number); ++ new_class->set_redefinition_index(i); ++ the_class->set_new_version(new_class()); ++ _new_classes->append(new_class); ++ ++ assert(new_class->new_version() == NULL, ""); ++ ++ int redefinition_flags = Klass::NoRedefinition; ++ ++ if (not_changed) { ++ redefinition_flags = Klass::NoRedefinition; ++ } else if (AllowAdvancedClassRedefinition) { ++ redefinition_flags = calculate_redefinition_flags(new_class); ++ } else { ++ jvmtiError allowed = check_redefinition_allowed(new_class); ++ if (allowed != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error redefinition not allowed!")); ++ result = allowed; ++ break; ++ } ++ redefinition_flags = Klass::ModifyClass; ++ } ++ ++ if (new_class->super() != NULL) { ++ redefinition_flags = redefinition_flags | new_class->super()->klass_part()->redefinition_flags(); ++ } ++ ++ for (int j=0; j<new_class->local_interfaces()->length(); j++) { ++ redefinition_flags = redefinition_flags | ((klassOop)new_class->local_interfaces()->obj_at(j))->klass_part()->redefinition_flags(); ++ } ++ ++ new_class->set_redefinition_flags(redefinition_flags); ++ ++ _max_redefinition_flags = _max_redefinition_flags | redefinition_flags; ++ ++ if ((redefinition_flags & Klass::ModifyInstances) != 0) { ++ // TODO: Check if watch access flags of static fields are updated correctly. ++ calculate_instance_update_information(_new_classes->at(i)()); ++ } else { ++ assert(new_class->layout_helper() >> 1 == new_class->old_version()->klass_part()->layout_helper() >> 1, "must be equal"); ++ assert(new_class->fields()->length() == ((instanceKlass*)new_class->old_version()->klass_part())->fields()->length(), "must be equal"); ++ ++ fieldDescriptor fd_new; ++ fieldDescriptor fd_old; ++ for (JavaFieldStream fs(new_class); !fs.done(); fs.next()) { ++ fd_new.initialize(new_class(), fs.index()); ++ fd_old.initialize(new_class->old_version(), fs.index()); ++ transfer_special_access_flags(&fd_old, &fd_new); ++ } ++ } ++ ++ if (RC_TRACE_ENABLED(0x00000008)) { ++ if (new_class->super() != NULL) { ++ RC_TRACE(0x00000008, ("Super class is %s", ++ new_class->super()->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++#ifdef ASSERT ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->new_version() == NULL, "Super klass must be newest version!"); ++ ++ the_class->vtable()->verify(tty); ++ new_class->vtable()->verify(tty); ++#endif ++ ++ RC_TRACE(0x00000002, ("Verification done!")); ++ ++ if (i == all_affected_klasses.length() - 1) { ++ ++ // This was the last class processed => check if additional classes have been loaded in the meantime ++ ++ RC_TIMER_STOP(_timer_prologue); ++ lock_threads(); ++ RC_TIMER_START(_timer_prologue); ++ ++ for (int j=0; j<all_affected_klasses.length(); j++) { ++ ++ klassOop initial_klass = all_affected_klasses.at(j)(); ++ Klass *initial_subklass = initial_klass->klass_part()->subklass(); ++ Klass *cur_klass = initial_subklass; ++ while(cur_klass != NULL) { ++ ++ if(cur_klass->oop_is_instance() && cur_klass->is_newest_version()) { ++ instanceKlassHandle handle(THREAD, cur_klass->as_klassOop()); ++ if (!all_affected_klasses.contains(handle)) { ++ ++ int k = i + 1; ++ for (; k<all_affected_klasses.length(); k++) { ++ if (all_affected_klasses.at(k)->is_subtype_of(cur_klass->as_klassOop())) { ++ break; ++ } ++ } ++ all_affected_klasses.insert_before(k, handle); ++ RC_TRACE(0x00000002, ("Adding newly loaded class to affected classes: %s", ++ cur_klass->name()->as_C_string())); ++ } ++ } ++ ++ cur_klass = cur_klass->next_sibling(); ++ } ++ } ++ ++ int new_count = all_affected_klasses.length() - 1 - i; ++ if (new_count != 0) { ++ ++ unlock_threads(); ++ RC_TRACE(0x00000001, ("Found new number of affected classes: %d", ++ new_count)); ++ } + } + } + +- for (int i = 0; i < _class_count; i++) { +- redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread); ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; + } +- // Disable any dependent concurrent compilations +- SystemDictionary::notice_modification(); +- +- // Set flag indicating that some invariants are no longer true. +- // See jvmtiExport.hpp for detailed explanation. +- JvmtiExport::set_has_redefined_a_class(); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_linking); ++ // Link and verify new classes _after_ all classes have been updated in the system dictionary! ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ instanceKlassHandle new_class(the_class->new_version()); ++ ++ RC_TRACE(0x00000002, ("Linking class %d/%d %s", ++ i, ++ all_affected_klasses.length(), ++ the_class->name()->as_C_string())); ++ new_class->link_class(THREAD); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ } ++ RC_TIMER_STOP(_timer_class_linking); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } ++ ++ RC_TRACE(0x00000002, ("All classes loaded!")); + + #ifdef ASSERT +- SystemDictionary::classes_do(check_class, thread); ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ assert(the_class->new_version() != NULL, "Must have been redefined"); ++ instanceKlassHandle new_version = instanceKlassHandle(THREAD, the_class->new_version()); ++ assert(new_version->new_version() == NULL, "Must be newest version"); ++ ++ if (!(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL)) { ++ new_version()->print(); ++ new_version->super()->print(); ++ } ++ assert(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL, "Super class must be newest version"); ++ } ++ ++ SystemDictionary::classes_do(check_class, THREAD); ++ + #endif ++ ++ RC_TRACE(0x00000001, ("Finished verification!")); ++ return JVMTI_ERROR_NONE; + } + +-void VM_RedefineClasses::doit_epilogue() { +- // Free os::malloc allocated memory. +- // The memory allocated in redefine will be free'ed in next VM operation. +- os::free(_scratch_classes); +- +- if (RC_TRACE_ENABLED(0x00000004)) { +- // Used to have separate timers for "doit" and "all", but the timer +- // overhead skewed the measurements. +- jlong doit_time = _timer_rsc_phase1.milliseconds() + +- _timer_rsc_phase2.milliseconds(); +- jlong all_time = _timer_vm_op_prologue.milliseconds() + doit_time; +- +- RC_TRACE(0x00000004, ("vm_op: all=" UINT64_FORMAT +- " prologue=" UINT64_FORMAT " doit=" UINT64_FORMAT, all_time, +- _timer_vm_op_prologue.milliseconds(), doit_time)); +- RC_TRACE(0x00000004, +- ("redefine_single_class: phase1=" UINT64_FORMAT " phase2=" UINT64_FORMAT, +- _timer_rsc_phase1.milliseconds(), _timer_rsc_phase2.milliseconds())); ++void VM_RedefineClasses::lock_threads() { ++ ++ RC_TIMER_START(_timer_wait_for_locks); ++ ++ ++ JavaThread *javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->set_should_bailout(true); ++ } ++ javaThread = javaThread->next(); + } ++ ++ int cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->compilation_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ RC_TRACE(0x00000002, ("Locked %d compiler threads", cnt)); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ javaThread->redefine_classes_mutex()->lock(); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ ++ RC_TRACE(0x00000002, ("Locked %d threads", cnt)); ++ ++ RC_TIMER_STOP(_timer_wait_for_locks); + } + +-bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { +- // classes for primitives cannot be redefined +- if (java_lang_Class::is_primitive(klass_mirror)) { +- return false; ++void VM_RedefineClasses::unlock_threads() { ++ ++ int cnt = 0; ++ JavaThread *javaThread = Threads::first(); ++ Thread *thread = Thread::current(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ if (compilerThread->compilation_mutex()->owned_by_self()) { ++ compilerThread->compilation_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); + } +- klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); +- // classes for arrays cannot be redefined +- if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { +- return false; ++ ++ RC_TRACE(0x00000002, ("Unlocked %d compiler threads", cnt)); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ if (javaThread->redefine_classes_mutex()->owned_by_self()) { ++ javaThread->redefine_classes_mutex()->unlock(); ++ } ++ } ++ javaThread = javaThread->next(); + } +- return true; ++ ++ RC_TRACE(0x00000002, ("Unlocked %d threads", cnt)); + } + +-// Append the current entry at scratch_i in scratch_cp to *merge_cp_p +-// where the end of *merge_cp_p is specified by *merge_cp_length_p. For +-// direct CP entries, there is just the current entry to append. For +-// indirect and double-indirect CP entries, there are zero or more +-// referenced CP entries along with the current entry to append. +-// Indirect and double-indirect CP entries are handled by recursive +-// calls to append_entry() as needed. The referenced CP entries are +-// always appended to *merge_cp_p before the referee CP entry. These +-// referenced CP entries may already exist in *merge_cp_p in which case +-// there is nothing extra to append and only the current entry is +-// appended. +-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, +- int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, +- TRAPS) { +- +- // append is different depending on entry tag type +- switch (scratch_cp->tag_at(scratch_i).value()) { +- +- // The old verifier is implemented outside the VM. It loads classes, +- // but does not resolve constant pool entries directly so we never +- // see Class entries here with the old verifier. Similarly the old +- // verifier does not like Class entries in the input constant pool. +- // The split-verifier is implemented in the VM so it can optionally +- // and directly resolve constant pool entries to load classes. The +- // split-verifier can accept either Class entries or UnresolvedClass +- // entries in the input constant pool. We revert the appended copy +- // back to UnresolvedClass so that either verifier will be happy +- // with the constant pool entry. +- case JVM_CONSTANT_Class: +- { +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- (*merge_cp_p)->unresolved_klass_at_put(*merge_cp_length_p, +- scratch_cp->klass_name_at(scratch_i)); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // these are direct CP entries so they can be directly appended, +- // but double and long take two constant pool entries +- case JVM_CONSTANT_Double: // fall through +- case JVM_CONSTANT_Long: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p) += 2; +- } break; +- +- // these are direct CP entries so they can be directly appended +- case JVM_CONSTANT_Float: // fall through +- case JVM_CONSTANT_Integer: // fall through +- case JVM_CONSTANT_Utf8: // fall through +- +- // This was an indirect CP entry, but it has been changed into +- // an interned string so this entry can be directly appended. +- case JVM_CONSTANT_String: // fall through +- +- // These were indirect CP entries, but they have been changed into +- // Symbol*s so these entries can be directly appended. +- case JVM_CONSTANT_UnresolvedClass: // fall through +- case JVM_CONSTANT_UnresolvedString: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // this is an indirect CP entry so it needs special handling +- case JVM_CONSTANT_NameAndType: +- { +- int name_ref_i = scratch_cp->name_ref_index_at(scratch_i); +- int new_name_ref_i = 0; +- bool match = (name_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_ref_i, *merge_cp_p, name_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_ref_i = found_i; +- map_index(scratch_cp, name_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_name_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- int signature_ref_i = scratch_cp->signature_ref_index_at(scratch_i); +- int new_signature_ref_i = 0; +- match = (signature_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(signature_ref_i, *merge_cp_p, +- signature_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(signature_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != signature_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_signature_ref_i = found_i; +- map_index(scratch_cp, signature_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, signature_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_signature_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_name_ref_i and new_signature_ref_i will both be 0. +- // In that case, all we are appending is the current entry. +- if (new_name_ref_i == 0) { +- new_name_ref_i = name_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d name_ref_index change: %d to %d", +- *merge_cp_length_p, name_ref_i, new_name_ref_i)); +- } +- if (new_signature_ref_i == 0) { +- new_signature_ref_i = signature_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d signature_ref_index change: %d to %d", +- *merge_cp_length_p, signature_ref_i, new_signature_ref_i)); +- } +- +- (*merge_cp_p)->name_and_type_at_put(*merge_cp_length_p, +- new_name_ref_i, new_signature_ref_i); +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // this is a double-indirect CP entry so it needs special handling +- case JVM_CONSTANT_Fieldref: // fall through +- case JVM_CONSTANT_InterfaceMethodref: // fall through +- case JVM_CONSTANT_Methodref: +- { +- int klass_ref_i = scratch_cp->uncached_klass_ref_index_at(scratch_i); +- int new_klass_ref_i = 0; +- bool match = (klass_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(klass_ref_i, *merge_cp_p, klass_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(klass_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != klass_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_klass_ref_i = found_i; +- map_index(scratch_cp, klass_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, klass_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. Without the optimization where we +- // use JVM_CONSTANT_UnresolvedClass, then up to two entries +- // could be appended. +- new_klass_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- int name_and_type_ref_i = +- scratch_cp->uncached_name_and_type_ref_index_at(scratch_i); +- int new_name_and_type_ref_i = 0; +- match = (name_and_type_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_and_type_ref_i, *merge_cp_p, +- name_and_type_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_and_type_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_and_type_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_and_type_ref_i = found_i; +- map_index(scratch_cp, name_and_type_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_and_type_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can append more than +- // one entry so the post call query of *merge_cp_length_p +- // is required in order to get the right index for the +- // JVM_CONSTANT_NameAndType entry. +- new_name_and_type_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_klass_ref_i and new_name_and_type_ref_i will both be +- // 0. In that case, all we are appending is the current entry. +- if (new_klass_ref_i == 0) { +- new_klass_ref_i = klass_ref_i; +- } +- if (new_name_and_type_ref_i == 0) { +- new_name_and_type_ref_i = name_and_type_ref_i; +- } +- +- const char *entry_name; +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Fieldref: +- entry_name = "Fieldref"; +- (*merge_cp_p)->field_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- entry_name = "IFMethodref"; +- (*merge_cp_p)->interface_method_at_put(*merge_cp_length_p, +- new_klass_ref_i, new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_Methodref: +- entry_name = "Methodref"; +- (*merge_cp_p)->method_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- default: +- guarantee(false, "bad switch"); +- break; +- } +- +- if (klass_ref_i != new_klass_ref_i) { +- RC_TRACE(0x00080000, ("%s entry@%d class_index changed: %d to %d", +- entry_name, *merge_cp_length_p, klass_ref_i, new_klass_ref_i)); +- } +- if (name_and_type_ref_i != new_name_and_type_ref_i) { +- RC_TRACE(0x00080000, +- ("%s entry@%d name_and_type_index changed: %d to %d", +- entry_name, *merge_cp_length_p, name_and_type_ref_i, +- new_name_and_type_ref_i)); +- } +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // At this stage, Class or UnresolvedClass could be here, but not +- // ClassIndex +- case JVM_CONSTANT_ClassIndex: // fall through +- +- // Invalid is used as the tag for the second constant pool entry +- // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should +- // not be seen by itself. +- case JVM_CONSTANT_Invalid: // fall through +- +- // At this stage, String or UnresolvedString could be here, but not +- // StringIndex +- case JVM_CONSTANT_StringIndex: // fall through +- +- // At this stage JVM_CONSTANT_UnresolvedClassInError should not be +- // here +- case JVM_CONSTANT_UnresolvedClassInError: // fall through +- +- default: +- { +- // leave a breadcrumb +- jbyte bad_value = scratch_cp->tag_at(scratch_i).value(); +- ShouldNotReachHere(); +- } break; +- } // end switch tag value +-} // end append_entry() +- +- +-void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { +- typeArrayOop save; +- +- save = scratch_class->get_method_annotations_of(i); +- scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); +- scratch_class->set_method_annotations_of(j, save); +- +- save = scratch_class->get_method_parameter_annotations_of(i); +- scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); +- scratch_class->set_method_parameter_annotations_of(j, save); +- +- save = scratch_class->get_method_default_annotations_of(i); +- scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); +- scratch_class->set_method_default_annotations_of(j, save); +-} +- +- +-jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( +- instanceKlassHandle the_class, +- instanceKlassHandle scratch_class) { ++jvmtiError VM_RedefineClasses::check_redefinition_allowed(instanceKlassHandle scratch_class) { ++ ++ ++ ++ // Compatibility mode => check for unsupported modification ++ ++ ++ assert(scratch_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(scratch_class->old_version()); ++ + int i; + + // Check superclasses, or rather their names, since superclasses themselves can be + // requested to replace. + // Check for NULL superclass first since this might be java.lang.Object + if (the_class->super() != scratch_class->super() && +- (the_class->super() == NULL || scratch_class->super() == NULL || +- Klass::cast(the_class->super())->name() != +- Klass::cast(scratch_class->super())->name())) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ (the_class->super() == NULL || scratch_class->super() == NULL || ++ Klass::cast(the_class->super())->name() != ++ Klass::cast(scratch_class->super())->name())) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + + // Check if the number, names and order of directly implemented interfaces are the same. +@@ -532,8 +678,8 @@ + } + for (i = 0; i < n_intfs; i++) { + if (Klass::cast((klassOop) k_interfaces->obj_at(i))->name() != +- Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + } + +@@ -682,12 +828,8 @@ + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(old_num); +- swap_all_method_annotations(old_num, new_num, scratch_class); + } + } +- RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni, +- k_old_method->name_and_sig_as_C_string(), oi)); + // advance to next pair of methods + ++oi; + ++ni; +@@ -696,11 +838,11 @@ + // method added, see if it is OK + new_flags = (jushort) k_new_method->access_flags().get_flags(); + if ((new_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // new methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; + } + { + u2 num = the_class->next_method_idnum(); +@@ -715,24 +857,19 @@ + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(num); +- swap_all_method_annotations(new_num, num, scratch_class); + } +- RC_TRACE(0x00008000, ("Method added: new: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni)); + ++ni; // advance to next new method + break; + case deleted: + // method deleted, see if it is OK + old_flags = (jushort) k_old_method->access_flags().get_flags(); + if ((old_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // deleted methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; + } +- RC_TRACE(0x00008000, ("Method deleted: old: %s [%d]", +- k_old_method->name_and_sig_as_C_string(), oi)); + ++oi; // advance to next old method + break; + default: +@@ -743,2084 +880,2272 @@ + return JVMTI_ERROR_NONE; + } + +- +-// Find new constant pool index value for old constant pool index value +-// by seaching the index map. Returns zero (0) if there is no mapped +-// value for the old constant pool index. +-int VM_RedefineClasses::find_new_index(int old_index) { +- if (_index_map_count == 0) { +- // map is empty so nothing can be found +- return 0; ++int VM_RedefineClasses::calculate_redefinition_flags(instanceKlassHandle new_class) { ++ ++ int result = Klass::NoRedefinition; ++ ++ ++ ++ RC_TRACE(0x00000002, ("Comparing different class versions of class %s", ++ new_class->name()->as_C_string())); ++ ++ assert(new_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(new_class->old_version()); ++ ++ // Check whether class is in the error init state. ++ if (the_class->is_in_error_state()) { ++ // TBD #5057930: special error code is needed in 1.6 ++ //result = Klass::union_redefinition_level(result, Klass::Invalid); + } + +- if (old_index < 1 || old_index >= _index_map_p->length()) { +- // The old_index is out of range so it is not mapped. This should +- // not happen in regular constant pool merging use, but it can +- // happen if a corrupt annotation is processed. +- return 0; ++ int i; ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check superclasses ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->is_newest_version(), ""); ++ if (the_class->super() != new_class->super()) { ++ // Super class changed ++ ++ klassOop cur_klass = the_class->super(); ++ while (cur_klass != NULL) { ++ if (!new_class->is_subclass_of(cur_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000002, ("Removed super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::RemoveSuperType | Klass::ModifyInstances | Klass::ModifyClass; ++ ++ if (!cur_klass->klass_part()->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ cur_klass->klass_part()->set_subtype_changed(true); ++ } ++ } ++ ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ ++ cur_klass = new_class->super(); ++ while (cur_klass != NULL) { ++ if (!the_class->is_subclass_of(cur_klass->klass_part()->old_version())) { ++ RC_TRACE(0x00000002, ("Added super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::ModifyClass | Klass::ModifyInstances; ++ } ++ cur_klass = cur_klass->klass_part()->super(); ++ } + } + +- int value = _index_map_p->at(old_index); +- if (value == -1) { +- // the old_index is not mapped +- return 0; +- } +- +- return value; +-} // end find_new_index() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// class pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_Class && t2 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->klass_name_at(index1)->as_C_string(); +- char *s2 = cp2->klass_name_at(index2)->as_C_string(); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_class_mismatch() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// string pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_string_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_String && t1 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_String && t2 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->string_at_noresolve(index1); +- char *s2 = cp2->string_at_noresolve(index2); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_string_mismatch() +- +- +-jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { +- // For consistency allocate memory using os::malloc wrapper. +- _scratch_classes = (instanceKlassHandle *) +- os::malloc(sizeof(instanceKlassHandle) * _class_count); +- if (_scratch_classes == NULL) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } +- +- ResourceMark rm(THREAD); +- +- JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); +- // state can only be NULL if the current thread is exiting which +- // should not happen since we're trying to do a RedefineClasses +- guarantee(state != NULL, "exiting thread calling load_new_class_versions"); +- for (int i = 0; i < _class_count; i++) { +- oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); +- // classes for primitives cannot be redefined +- if (!is_modifiable_class(mirror)) { +- return JVMTI_ERROR_UNMODIFIABLE_CLASS; +- } +- klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- Symbol* the_class_sym = the_class->name(); +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loading name=%s kind=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), _class_load_kind, +- os::available_memory() >> 10)); +- +- ClassFileStream st((u1*) _class_defs[i].class_bytes, +- _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); +- +- // Parse the stream. +- Handle the_class_loader(THREAD, the_class->class_loader()); +- Handle protection_domain(THREAD, the_class->protection_domain()); +- // Set redefined class handle in JvmtiThreadState class. +- // This redefined class is sent to agent event handler for class file +- // load hook event. +- state->set_class_being_redefined(&the_class, _class_load_kind); +- +- klassOop k = SystemDictionary::parse_stream(the_class_sym, +- the_class_loader, +- protection_domain, +- &st, +- THREAD); +- // Clear class_being_redefined just to be sure. +- state->clear_class_being_redefined(); +- +- // TODO: if this is retransform, and nothing changed we can skip it +- +- instanceKlassHandle scratch_class (THREAD, k); +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("parse_stream exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- +- if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { +- return JVMTI_ERROR_UNSUPPORTED_VERSION; +- } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { +- return JVMTI_ERROR_INVALID_CLASS_FORMAT; +- } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { +- return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; +- } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { +- // The message will be "XXX (wrong name: YYY)" +- return JVMTI_ERROR_NAMES_DONT_MATCH; +- } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { // Just in case more exceptions can be thrown.. +- return JVMTI_ERROR_FAILS_VERIFICATION; ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check interfaces ++ ++ // Interfaces removed? ++ objArrayOop old_interfaces = the_class->transitive_interfaces(); ++ for (i = 0; i<old_interfaces->length(); i++) { ++ instanceKlassHandle old_interface((klassOop)old_interfaces->obj_at(i)); ++ if (!new_class->implements_interface_any_version(old_interface())) { ++ result = result | Klass::RemoveSuperType | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Removed interface %s", ++ old_interface->name()->as_C_string())); ++ ++ if (!old_interface->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of interface %s", ++ old_interface->name()->as_C_string())); ++ old_interface->set_subtype_changed(true); + } + } +- +- // Ensure class is linked before redefine +- if (!the_class->is_linked()) { +- the_class->link_class(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("link_class exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; ++ } ++ ++ // Interfaces added? ++ objArrayOop new_interfaces = new_class->transitive_interfaces(); ++ for (i = 0; i<new_interfaces->length(); i++) { ++ if (!the_class->implements_interface_any_version((klassOop)new_interfaces->obj_at(i))) { ++ result = result | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Added interface %s", ++ ((klassOop)new_interfaces->obj_at(i))->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++ ++ // Check whether class modifiers are the same. ++ jushort old_flags = (jushort) the_class->access_flags().get_flags(); ++ jushort new_flags = (jushort) new_class->access_flags().get_flags(); ++ if (old_flags != new_flags) { ++ // TODO (tw): Can this have any effects? ++ } ++ ++ // Check if the number, names, types and order of fields declared in these classes ++ // are the same. ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ // access ++ old_flags = old_fs.access_flags().as_short(); ++ new_flags = new_fs.access_flags().as_short(); ++ if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) { ++ // (tw) Can this have any effects? ++ } ++ // offset ++ if (old_fs.offset() != new_fs.offset()) { ++ result = result | Klass::ModifyInstances; ++ } ++ // name and signature ++ Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index()); ++ Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index()); ++ Symbol* name_sym2 = new_class->constants()->symbol_at(new_fs.name_index()); ++ Symbol* sig_sym2 = new_class->constants()->symbol_at(new_fs.signature_index()); ++ if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { ++ result = result | Klass::ModifyInstances; ++ } ++ } ++ ++ if (!old_fs.done() || !new_fs.done()) { ++ result = result | Klass::ModifyInstances; ++ } ++ ++ // Do a parallel walk through the old and new methods. Detect ++ // cases where they match (exist in both), have been added in ++ // the new methods, or have been deleted (exist only in the ++ // old methods). The class file parser places methods in order ++ // by method name, but does not order overloaded methods by ++ // signature. In order to determine what fate befell the methods, ++ // this code places the overloaded new methods that have matching ++ // old methods in the same order as the old methods and places ++ // new overloaded methods at the end of overloaded methods of ++ // that name. The code for this order normalization is adapted ++ // from the algorithm used in instanceKlass::find_method(). ++ // Since we are swapping out of order entries as we find them, ++ // we only have to search forward through the overloaded methods. ++ // Methods which are added and have the same name as an existing ++ // method (but different signature) will be put at the end of ++ // the methods with that name, and the name mismatch code will ++ // handle them. ++ objArrayHandle k_old_methods(the_class->methods()); ++ objArrayHandle k_new_methods(new_class->methods()); ++ int n_old_methods = k_old_methods->length(); ++ int n_new_methods = k_new_methods->length(); ++ ++ int ni = 0; ++ int oi = 0; ++ while (true) { ++ methodOop k_old_method; ++ methodOop k_new_method; ++ enum { matched, added, deleted, undetermined } method_was = undetermined; ++ ++ if (oi >= n_old_methods) { ++ if (ni >= n_new_methods) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ method_was = added; ++ } else if (ni >= n_new_methods) { ++ // Old method, at the end, is deleted ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ method_was = deleted; ++ } else { ++ // There are more methods in both the old and new lists ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ if (k_old_method->name() != k_new_method->name()) { ++ // Methods are sorted by method name, so a mismatch means added ++ // or deleted ++ if (k_old_method->name()->fast_compare(k_new_method->name()) > 0) { ++ method_was = added; + } else { +- return JVMTI_ERROR_INTERNAL; ++ method_was = deleted; ++ } ++ } else if (k_old_method->signature() == k_new_method->signature()) { ++ // Both the name and signature match ++ method_was = matched; ++ } else { ++ // The name matches, but the signature doesn't, which means we have to ++ // search forward through the new overloaded methods. ++ int nj; // outside the loop for post-loop check ++ for (nj = ni + 1; nj < n_new_methods; nj++) { ++ methodOop m = (methodOop)k_new_methods->obj_at(nj); ++ if (k_old_method->name() != m->name()) { ++ // reached another method name so no more overloaded methods ++ method_was = deleted; ++ break; ++ } ++ if (k_old_method->signature() == m->signature()) { ++ // found a match so swap the methods ++ k_new_methods->obj_at_put(ni, m); ++ k_new_methods->obj_at_put(nj, k_new_method); ++ k_new_method = m; ++ method_was = matched; ++ break; ++ } ++ } ++ ++ if (nj >= n_new_methods) { ++ // reached the end without a match; so method was deleted ++ method_was = deleted; + } + } + } + +- // Do the validity checks in compare_and_normalize_class_versions() +- // before verifying the byte codes. By doing these checks first, we +- // limit the number of functions that require redirection from +- // the_class to scratch_class. In particular, we don't have to +- // modify JNI GetSuperclass() and thus won't change its performance. +- jvmtiError res = compare_and_normalize_class_versions(the_class, +- scratch_class); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ switch (method_was) { ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ // (tw) Can this have any effects? Probably yes on vtables? ++ result = result | Klass::ModifyClass; + } +- +- // verify what the caller passed us + { +- // The bug 6214132 caused the verification to fail. +- // Information about the_class and scratch_class is temporarily +- // recorded into jvmtiThreadState. This data is used to redirect +- // the_class to scratch_class in the JVM_* functions called by the +- // verifier. Please, refer to jvmtiThreadState.hpp for the detailed +- // description. +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify( +- scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes exception: '%s'", ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller the bytecodes are bad +- return JVMTI_ERROR_FAILS_VERIFICATION; ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = new_class->method_with_idnum(old_num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(old_num); ++ RC_TRACE(0x00000002, ("swapping idnum of new and old method %d / %d!", ++ new_num, ++ old_num)); ++ // swap_all_method_annotations(old_num, new_num, new_class); + } + } +- +- res = merge_cp_and_rewrite(the_class, scratch_class, THREAD); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni, ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ result = result | Klass::ModifyClass; + } +- +- if (VerifyMergedCPBytecodes) { +- // verify what we have done during constant pool merging +- { +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD); ++ { ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ result = result | Klass::ModifyClass; + } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes post merge-CP exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller that constant pool merging screwed up +- return JVMTI_ERROR_INTERNAL; ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = new_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ //swap_all_method_annotations(new_num, num, new_class); ++ } ++ RC_TRACE(0x00000001, ("Method added: new: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni)); ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ RC_TRACE(0x00000001, ("Method deleted: old: %s [%d]", ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ if (new_class()->size() != new_class->old_version()->size()) { ++ result |= Klass::ModifyClassSize; ++ } ++ ++ if (new_class->size_helper() != ((instanceKlass*)(new_class->old_version()->klass_part()))->size_helper()) { ++ result |= Klass::ModifyInstanceSize; ++ } ++ ++ methodHandle instanceTransformerMethod(new_class->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature())); ++ if (!instanceTransformerMethod.is_null() && !instanceTransformerMethod->is_static()) { ++ result |= Klass::HasInstanceTransformer; ++ } ++ ++ // (tw) Check method bodies to be able to return NoChange? ++ return result; ++} ++ ++void VM_RedefineClasses::calculate_instance_update_information(klassOop new_version) { ++ ++ class UpdateFieldsEvolutionClosure : public FieldEvolutionClosure { ++ ++ private: ++ ++ GrowableArray<int> info; ++ int curPosition; ++ bool copy_backwards; ++ ++ public: ++ ++ bool does_copy_backwards() { ++ return copy_backwards; ++ } ++ ++ UpdateFieldsEvolutionClosure(klassOop klass) { ++ ++ int base_offset = instanceOopDesc::base_offset_in_bytes(); ++ ++ if (klass->klass_part()->newest_version() == SystemDictionary::Reference_klass()->klass_part()->newest_version()) { ++ base_offset += java_lang_ref_Reference::number_of_fake_oop_fields*size_of_type(T_OBJECT); ++ } ++ ++ info.append(base_offset); ++ info.append(0); ++ curPosition = base_offset; ++ copy_backwards = false; ++ } ++ ++ GrowableArray<int> &finish() { ++ info.append(0); ++ return info; ++ } ++ ++ virtual void do_new_field(fieldDescriptor* fd){ ++ int alignment = fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } ++ ++ int size = size_of_type(fd->field_type()); ++ fill(size); ++ } ++ ++ private: ++ ++ void fill(int size) { ++ if (info.length() > 0 && info.at(info.length() - 1) < 0) { ++ (*info.adr_at(info.length() - 1)) -= size; ++ } else { ++ info.append(-size); ++ } ++ ++ curPosition += size; ++ } ++ ++ int size_of_type(BasicType type) { ++ int size = 0; ++ switch(type) { ++ case T_BOOLEAN: ++ size = sizeof(jboolean); ++ break; ++ ++ case T_CHAR: ++ size = (sizeof(jchar)); ++ break; ++ ++ case T_FLOAT: ++ size = (sizeof(jfloat)); ++ break; ++ ++ case T_DOUBLE: ++ size = (sizeof(jdouble)); ++ break; ++ ++ case T_BYTE: ++ size = (sizeof(jbyte)); ++ break; ++ ++ case T_SHORT: ++ size = (sizeof(jshort)); ++ break; ++ ++ case T_INT: ++ size = (sizeof(jint)); ++ break; ++ ++ case T_LONG: ++ size = (sizeof(jlong)); ++ break; ++ ++ case T_OBJECT: ++ case T_ARRAY: ++ if (UseCompressedOops) { ++ size = sizeof(narrowOop); ++ } else { ++ size = (sizeof(oop)); ++ } ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ assert(size > 0, ""); ++ return size; ++ ++ } ++ ++ public: ++ ++ virtual void do_old_field(fieldDescriptor* fd){} ++ ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd){ ++ ++ int alignment = new_fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } ++ ++ assert(old_fd->field_type() == new_fd->field_type(), ""); ++ assert(curPosition == new_fd->offset(), "must be correct offset!"); ++ ++ int offset = old_fd->offset(); ++ int size = size_of_type(old_fd->field_type()); ++ ++ int prevEnd = -1; ++ if (info.length() > 0 && info.at(info.length() - 1) > 0) { ++ prevEnd = info.at(info.length() - 2) + info.at(info.length() - 1); ++ } ++ ++ if (prevEnd == offset) { ++ info.at_put(info.length() - 2, info.at(info.length() - 2) + size); ++ } else { ++ info.append(size); ++ info.append(offset); ++ } ++ ++ if (old_fd->offset() < new_fd->offset()) { ++ copy_backwards = true; ++ } ++ ++ transfer_special_access_flags(old_fd, new_fd); ++ ++ curPosition += size; ++ } ++ }; ++ ++ UpdateFieldsEvolutionClosure cl(new_version); ++ ((instanceKlass*)new_version->klass_part())->do_fields_evolution(&cl); ++ ++ GrowableArray<int> result = cl.finish(); ++ ((instanceKlass*)new_version->klass_part())->store_update_information(result); ++ ((instanceKlass*)new_version->klass_part())->set_copying_backwards(cl.does_copy_backwards()); ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Instance update information for %s:", ++ new_version->klass_part()->name()->as_C_string())); ++ if (cl.does_copy_backwards()) { ++ RC_TRACE(0x00000002, ("\tDoes copy backwards!")); ++ } ++ for (int i=0; i<result.length(); i++) { ++ int curNum = result.at(i); ++ if (curNum < 0) { ++ RC_TRACE(0x00000002, ("\t%d CLEAN", curNum)); ++ } else if (curNum > 0) { ++ RC_TRACE(0x00000002, ("\t%d COPY from %d", curNum, result.at(i + 1))); ++ i++; ++ } else { ++ RC_TRACE(0x00000002, ("\tEND")); ++ } ++ } ++ } ++} ++ ++Symbol* VM_RedefineClasses::signature_to_class_name(Symbol* signature) { ++ assert(FieldType::is_obj(signature), ""); ++ return SymbolTable::new_symbol(signature->as_C_string() + 1, signature->utf8_length() - 2, Thread::current()); ++} ++ ++void VM_RedefineClasses::calculate_type_check_information(klassOop klass) { ++ if (klass->klass_part()->is_redefining()) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(klass); ++ GrowableArray< Pair<int, klassOop> > type_check_information; ++ ++ class MyFieldClosure : public FieldClosure { ++ ++ public: ++ ++ GrowableArray< Pair<int, klassOop> > *_arr; ++ ++ MyFieldClosure(GrowableArray< Pair<int, klassOop> > *arr) { ++ _arr = arr; ++ } ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ if (fd->field_type() == T_OBJECT) { ++ Symbol* signature = fd->signature(); ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ klassOop field_klass; ++ if (is_field_dangerous(name, fd, field_klass)) { ++ RC_TRACE(0x00000002, ("Found dangerous field %s in klass %s of type %s", ++ fd->name()->as_C_string(), ++ fd->field_holder()->klass_part()->name()->as_C_string(), ++ name->as_C_string())); ++ _arr->append(Pair<int, klassOop>(fd->offset(), field_klass->klass_part()->newest_version())); ++ } ++ } ++ ++ // Array fields can never be a problem! ++ } ++ } ++ ++ bool is_field_dangerous(Symbol* klass_name, fieldDescriptor *fd, klassOop &field_klass) { ++ field_klass = SystemDictionary::find(klass_name, fd->field_holder()->klass_part()->class_loader(), ++ fd->field_holder()->klass_part()->protection_domain(), Thread::current()); ++ if(field_klass != NULL) { ++ if (field_klass->klass_part()->is_redefining()) { ++ field_klass = field_klass->klass_part()->old_version(); ++ } ++ if (field_klass->klass_part()->has_subtype_changed()) { ++ return true; ++ } ++ } ++ return false; ++ } ++ }; ++ ++ MyFieldClosure fieldClosure(&type_check_information); ++ cur_instance_klass->do_nonstatic_fields(&fieldClosure); ++ ++ if (type_check_information.length() > 0) { ++ type_check_information.append(Pair<int, klassOop>(-1, NULL)); ++ cur_instance_klass->store_type_check_information(type_check_information); ++ } ++} ++ ++bool VM_RedefineClasses::check_field_value_types() { ++ ++ Thread *THREAD = Thread::current(); ++ class CheckFieldTypesClosure : public ObjectClosure { ++ ++ private: ++ ++ bool _result; ++ ++ public: ++ ++ CheckFieldTypesClosure() { ++ _result = true; ++ } ++ ++ bool result() { return _result; } ++ ++ virtual void do_object(oop obj) { ++ ++ if (!_result) { ++ return; ++ } ++ ++ if (obj->is_objArray()) { ++ ++ objArrayOop array = objArrayOop(obj); ++ ++ klassOop element_klass = objArrayKlass::cast(array->klass())->element_klass(); ++ ++ if (element_klass->klass_part()->has_subtype_changed()) { ++ int length = array->length(); ++ for (int i=0; i<length; i++) { ++ oop element = array->obj_at(i); ++ if (element != NULL && element->blueprint()->newest_version()->klass_part()->is_redefining()) { ++ // Check subtype relationship to static type of array ++ if (!element->blueprint()->newest_version()->klass_part()->is_subtype_of(element_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000001, ("Array value is INVALID - abort redefinition (static_type=%s, index=%d, dynamic_type=%s)", ++ element_klass->klass_part()->name()->as_C_string(), ++ i, ++ element->blueprint()->name()->as_C_string())); ++ _result = false; ++ break; ++ } ++ } ++ } ++ } ++ ++ } else { ++ Pair<int, klassOop> *cur = obj->klass()->klass_part()->type_check_information(); ++ if (cur != NULL) { ++ // Type check information exists for this oop ++ while ((*cur).left() != -1) { ++ check_field(obj, (*cur).left(), (*cur).right()); ++ cur++; ++ } + } + } + } + +- Rewriter::rewrite(scratch_class, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- Rewriter::relocate_and_link(scratch_class, THREAD); +- } +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } +- } +- +- _scratch_classes[i] = scratch_class; +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), os::available_memory() >> 10)); +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Map old_index to new_index as needed. scratch_cp is only needed +-// for RC_TRACE() calls. +-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, +- int old_index, int new_index) { +- if (find_new_index(old_index) != 0) { +- // old_index is already mapped +- return; +- } +- +- if (old_index == new_index) { +- // no mapping is needed +- return; +- } +- +- _index_map_p->at_put(old_index, new_index); +- _index_map_count++; +- +- RC_TRACE(0x00040000, ("mapped tag %d at index %d to %d", +- scratch_cp->tag_at(old_index).value(), old_index, new_index)); +-} // end map_index() +- +- +-// Merge old_cp and scratch_cp and return the results of the merge via +-// merge_cp_p. The number of entries in *merge_cp_p is returned via +-// merge_cp_length_p. The entries in old_cp occupy the same locations +-// in *merge_cp_p. Also creates a map of indices from entries in +-// scratch_cp to the corresponding entry in *merge_cp_p. Index map +-// entries are only created for entries in scratch_cp that occupy a +-// different location in *merged_cp_p. +-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS) { +- +- if (merge_cp_p == NULL) { +- assert(false, "caller must provide scatch constantPool"); +- return false; // robustness +- } +- if (merge_cp_length_p == NULL) { +- assert(false, "caller must provide scatch CP length"); +- return false; // robustness +- } +- // Worst case we need old_cp->length() + scratch_cp()->length(), +- // but the caller might be smart so make sure we have at least +- // the minimum. +- if ((*merge_cp_p)->length() < old_cp->length()) { +- assert(false, "merge area too small"); +- return false; // robustness +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("old_cp_len=%d, scratch_cp_len=%d", old_cp->length(), +- scratch_cp->length())); +- +- { +- // Pass 0: +- // The old_cp is copied to *merge_cp_p; this means that any code +- // using old_cp does not have to change. This work looks like a +- // perfect fit for constantPoolOop::copy_cp_to(), but we need to +- // handle one special case: +- // - revert JVM_CONSTANT_Class to JVM_CONSTANT_UnresolvedClass +- // This will make verification happy. +- +- int old_i; // index into old_cp +- +- // index zero (0) is not used in constantPools +- for (old_i = 1; old_i < old_cp->length(); old_i++) { +- // leave debugging crumb +- jbyte old_tag = old_cp->tag_at(old_i).value(); +- switch (old_tag) { +- case JVM_CONSTANT_Class: +- case JVM_CONSTANT_UnresolvedClass: +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- // May be resolving while calling this so do the same for +- // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) +- (*merge_cp_p)->unresolved_klass_at_put(old_i, +- old_cp->klass_name_at(old_i)); +- break; +- +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // just copy the entry to *merge_cp_p, but double and long take +- // two constant pool entries +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- old_i++; +- break; +- +- default: +- // just copy the entry to *merge_cp_p +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- break; +- } +- } // end for each old_cp entry +- +- // We don't need to sanity check that *merge_cp_length_p is within +- // *merge_cp_p bounds since we have the minimum on-entry check above. +- (*merge_cp_length_p) = old_i; +- } +- +- // merge_cp_len should be the same as old_cp->length() at this point +- // so this trace message is really a "warm-and-breathing" message. +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 0: merge_cp_len=%d", *merge_cp_length_p)); +- +- int scratch_i; // index into scratch_cp +- { +- // Pass 1a: +- // Compare scratch_cp entries to the old_cp entries that we have +- // already copied to *merge_cp_p. In this pass, we are eliminating +- // exact duplicates (matching entry at same index) so we only +- // compare entries in the common indice range. +- int increment = 1; +- int pass1a_length = MIN2(old_cp->length(), scratch_cp->length()); +- for (scratch_i = 1; scratch_i < pass1a_length; scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- bool match = scratch_cp->compare_entry_to(scratch_i, *merge_cp_p, +- scratch_i, CHECK_0); +- if (match) { +- // found a match at the same index so nothing more to do +- continue; +- } else if (is_unresolved_class_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved class entry at the same index +- // with the same string value. Since Pass 0 reverted any +- // class entries to unresolved class entries in *merge_cp_p, +- // we go with the unresolved class entry. +- continue; +- } else if (is_unresolved_string_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved string entry at the same index +- // with the same string value. We can live with whichever +- // happens to be at scratch_i in *merge_cp_p. +- continue; +- } +- +- int found_i = scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, +- CHECK_0); +- if (found_i != 0) { +- guarantee(found_i != scratch_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // The find_matching_entry() call above could fail to find a match +- // due to a resolved versus unresolved class or string entry situation +- // like we solved above with the is_unresolved_*_mismatch() calls. +- // However, we would have to call is_unresolved_*_mismatch() over +- // all of *merge_cp_p (potentially) and that doesn't seem to be +- // worth the time. +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1a: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- +- if (scratch_i < scratch_cp->length()) { +- // Pass 1b: +- // old_cp is smaller than scratch_cp so there are entries in +- // scratch_cp that we have not yet processed. We take care of +- // those now. +- int increment = 1; +- for (; scratch_i < scratch_cp->length(); scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- int found_i = +- scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, CHECK_0); +- if (found_i != 0) { +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- } +- +- return true; +-} // end merge_constant_pools() +- +- +-// Merge constant pools between the_class and scratch_class and +-// potentially rewrite bytecodes in scratch_class to use the merged +-// constant pool. +-jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class, +- TRAPS) { +- // worst case merged constant pool length is old and new combined +- int merge_cp_length = the_class->constants()->length() +- + scratch_class->constants()->length(); +- +- constantPoolHandle old_cp(THREAD, the_class->constants()); +- constantPoolHandle scratch_cp(THREAD, scratch_class->constants()); +- +- // Constant pools are not easily reused so we allocate a new one +- // each time. +- // merge_cp is created unsafe for concurrent GC processing. It +- // should be marked safe before discarding it. Even though +- // garbage, if it crosses a card boundary, it may be scanned +- // in order to find the start of the first complete object on the card. +- constantPoolHandle merge_cp(THREAD, +- oopFactory::new_constantPool(merge_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- int orig_length = old_cp->orig_length(); +- if (orig_length == 0) { +- // This old_cp is an actual original constant pool. We save +- // the original length in the merged constant pool so that +- // merge_constant_pools() can be more efficient. If a constant +- // pool has a non-zero orig_length() value, then that constant +- // pool was created by a merge operation in RedefineClasses. +- merge_cp->set_orig_length(old_cp->length()); +- } else { +- // This old_cp is a merged constant pool from a previous +- // RedefineClasses() calls so just copy the orig_length() +- // value. +- merge_cp->set_orig_length(old_cp->orig_length()); +- } +- +- ResourceMark rm(THREAD); +- _index_map_count = 0; +- _index_map_p = new intArray(scratch_cp->length(), -1); +- +- bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, +- &merge_cp_length, THREAD); +- if (!result) { +- // The merge can fail due to memory allocation failure or due +- // to robustness checks. +- return JVMTI_ERROR_INTERNAL; +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("merge_cp_len=%d, index_map_len=%d", merge_cp_length, _index_map_count)); +- +- if (_index_map_count == 0) { +- // there is nothing to map between the new and merged constant pools +- +- if (old_cp->length() == scratch_cp->length()) { +- // The old and new constant pools are the same length and the +- // index map is empty. This means that the three constant pools +- // are equivalent (but not the same). Unfortunately, the new +- // constant pool has not gone through link resolution nor have +- // the new class bytecodes gone through constant pool cache +- // rewriting so we can't use the old constant pool with the new +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else if (old_cp->length() < scratch_cp->length()) { +- // The old constant pool has fewer entries than the new constant +- // pool and the index map is empty. This means the new constant +- // pool is a superset of the old constant pool. However, the old +- // class bytecodes have already gone through constant pool cache +- // rewriting so we can't use the new constant pool with the old +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else { +- // The old constant pool has more entries than the new constant +- // pool and the index map is empty. This means that both the old +- // and merged constant pools are supersets of the new constant +- // pool. +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool; the previous new constant pool will +- // get GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- // drop local ref to the merged constant pool +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); +- } +- } else { +- if (RC_TRACE_ENABLED(0x00040000)) { +- // don't want to loop unless we are tracing +- int count = 0; +- for (int i = 1; i < _index_map_p->length(); i++) { +- int value = _index_map_p->at(i); +- +- if (value != -1) { +- RC_TRACE_WITH_THREAD(0x00040000, THREAD, +- ("index_map[%d]: old=%d new=%d", count, i, value)); +- count++; ++ void check_field(oop obj, int offset, klassOop static_type) { ++ oop field_value = obj->obj_field(offset); ++ if (field_value != NULL) { ++ // Field is not null ++ if (field_value->klass()->klass_part()->newest_version()->klass_part()->is_subtype_of(static_type)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Field value is OK (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Field value is INVALID - abort redefinition (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ _result = false; + } + } + } +- +- // We have entries mapped between the new and merged constant pools +- // so we have to rewrite some constant pool references. +- if (!rewrite_cp_refs(scratch_class, THREAD)) { +- return JVMTI_ERROR_INTERNAL; ++ }; ++ ++ CheckFieldTypesClosure myObjectClosure; ++ ++ // make sure that heap is parsable (fills TLABs with filler objects) ++ Universe::heap()->ensure_parsability(false); // no need to retire TLABs ++ ++ // do the iteration ++ // If this operation encounters a bad object when using CMS, ++ // consider using safe_object_iterate() which avoids perm gen ++ // objects that may contain bad references. ++ Universe::heap()->object_iterate(&myObjectClosure); ++ ++ // when sharing is enabled we must iterate over the shared spaces ++ if (UseSharedSpaces) { ++ GenCollectedHeap* gch = GenCollectedHeap::heap(); ++ CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); ++ gen->ro_space()->object_iterate(&myObjectClosure); ++ gen->rw_space()->object_iterate(&myObjectClosure); ++ } ++ ++ return myObjectClosure.result(); ++} ++ ++void VM_RedefineClasses::clear_type_check_information(klassOop k) { ++ if (k->klass_part()->is_redefining()) { ++ k = k->klass_part()->old_version(); ++ } ++ ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(k); ++ cur_instance_klass->clear_type_check_information(); ++} ++ ++void VM_RedefineClasses::update_active_methods() { ++ ++ RC_TRACE(0x00000002, ("Updating active methods")); ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x0000000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ int bci = jvf->bci(); ++ RC_TRACE(0x00000400, ("found method: %s / bci=%d", jvf->method()->name()->as_C_string(), bci)); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ if (jvf->method()->new_version() != NULL && jvf->is_interpreted_frame()) { ++ ++ ++ RC_TRACE(0x00000002, ("Found method that should just be updated to the newest version %s", ++ jvf->method()->name_and_sig_as_C_string())); ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ int code_size = jvf->method()->code_size(); ++ char *code_base_old = (char*)jvf->method()->code_base(); ++ char *code_base_new = (char*)jvf->method()->new_version()->code_base(); ++ for (int i=0; i<code_size; i++) { ++ tty->print_cr("old=%d new=%d", *code_base_old++, *code_base_new++); ++ } ++ jvf->method()->print_codes_on(tty); ++ jvf->method()->new_version()->print_codes_on(tty); ++ } ++ ++ assert(jvf->is_interpreted_frame(), "Every frame must be interpreted!"); ++ interpretedVFrame *iframe = (interpretedVFrame *)jvf; ++ ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ constantPoolCacheOop cp_old = jvf->method()->constants()->cache(); ++ tty->print_cr("old cp"); ++ for (int i=0; i<cp_old->length(); i++) { ++ cp_old->entry_at(i)->print(tty, i); ++ } ++ constantPoolCacheOop cp_new = jvf->method()->new_version()->constants()->cache(); ++ tty->print_cr("new cp"); ++ for (int i=0; i<cp_new->length(); i++) { ++ cp_new->entry_at(i)->print(tty, i); ++ } ++ } ++ ++ iframe->set_method(jvf->method()->new_version(), bci); ++ RC_TRACE(0x00000002, ("Updated method to newer version")); ++ assert(jvf->method()->new_version() == NULL, "must be latest version"); ++ ++ } ++ } ++ } ++ vf = vf->sender(); ++ } + } + +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool so now the rewritten bytecodes have +- // valid references; the previous new constant pool will get +- // GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- merge_cp()->set_is_conc_safe(true); ++ // Advance to next thread ++ java_thread = java_thread->next(); + } +- assert(old_cp()->is_conc_safe(), "Just checking"); +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +- +- return JVMTI_ERROR_NONE; +-} // end merge_cp_and_rewrite() +- +- +-// Rewrite constant pool references in klass scratch_class. +-bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, +- TRAPS) { +- +- // rewrite constant pool references in the methods: +- if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { +- // propagate failure back to caller ++} ++ ++void VM_RedefineClasses::method_forwarding() { ++ ++ int forwarding_count = 0; ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00008000, ("found method: %s", ++ jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ methodOop m = jvf->method(); ++ int bci = jvf->bci(); ++ RC_TRACE(0x00008000, ("klass redef %d", ++ klass->is_redefining())); ++ ++ if (klass->new_version() != NULL && m->new_version() == NULL) { ++ RC_TRACE(0x00008000, ("found potential forwarding method: %s", ++ m->name()->as_C_string())); ++ ++ klassOop new_klass = klass->newest_version(); ++ methodOop new_method = new_klass->klass_part()->lookup_method(m->name(), m->signature()); ++ RC_TRACE(0x00000002, ("%d %d", ++ new_method, ++ new_method->constMethod()->has_code_section_table())); ++ ++ if (new_method != NULL && new_method->constMethod()->has_code_section_table()) { ++ RC_TRACE(0x00008000, ("found code section table for method: %s", ++ new_method->name()->as_C_string())); ++ m->set_forward_method(new_method); ++ if (new_method->max_locals() != m->max_locals()) { ++ tty->print_cr("new_m max locals: %d old_m max locals: %d", new_method->max_locals(), m->max_locals()); ++ } ++ assert(new_method->max_locals() == m->max_locals(), "number of locals must match"); ++ assert(new_method->max_stack() == m->max_stack(), "number of stack values must match"); ++ if (jvf->is_interpreted_frame()) { ++ if (m->is_in_code_section(bci)) { ++ // We must transfer now and cannot delay until next NOP. ++ int new_bci = m->calculate_forward_bci(bci, new_method); ++ interpretedVFrame* iframe = interpretedVFrame::cast(jvf); ++ RC_TRACE(0x00000002, ("Transfering execution of %s to new method old_bci=%d new_bci=%d", ++ new_method->name()->as_C_string(), ++ bci, ++ new_bci)); ++ iframe->set_method(new_method, new_bci); ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because %d is not in a code section", ++ new_method->name()->as_C_string(), ++ bci)); ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because method is compiled", ++ new_method->name()->as_C_string())); ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); ++ } ++ ++ RC_TRACE(0x00000001, ("Method forwarding applied to %d methods", ++ forwarding_count)); ++} ++ ++bool VM_RedefineClasses::check_method_stacks() { ++ ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00000400, ("found method: %s", jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ StackValueCollection *locals = jvf->locals(); ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ for (int i=0; i<locals->size(); i++) { ++ StackValue *stack_value = locals->at(i); ++ if (stack_value->type() == T_OBJECT) { ++ Handle obj = stack_value->get_obj(); ++ if (!obj.is_null() && obj->klass()->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType)) { ++ ++ // OK, so this is a possible failure => check local variable table, if it could be OK. ++ bool result = false; ++ methodOop method = jvf->method(); ++ if (method->has_localvariable_table()) { ++ LocalVariableTableElement *elem = jvf->method()->localvariable_table_start(); ++ for (int j=0; j<method->localvariable_table_length(); j++) { ++ ++ if (elem->slot == i) { ++ ++ // Matching index found ++ ++ if (elem->start_bci <= jvf->bci() && elem->start_bci + elem->length > jvf->bci()) { ++ ++ // Also in range!! ++ Symbol* signature = jvf->method()->constants()->symbol_at(elem->descriptor_cp_index); ++ Symbol* klass_name = signature_to_class_name(signature); ++ ++ klassOop local_klass = SystemDictionary::find(klass_name, jvf->method()->method_holder()->klass_part()->class_loader(), jvf->method()->method_holder()->klass_part()->protection_domain(), Thread::current())->klass_part()->newest_version(); ++ klassOop cur = obj->klass()->klass_part()->newest_version(); ++ ++ // Field is not null ++ if (cur->klass_part()->newest_version()->klass_part()->is_subtype_of(local_klass)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Local variable value is OK (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), cur->klass_part()->name()->as_C_string())); ++ result = true; ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Local variable value is INVALID - abort redefinition (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), ++ cur->klass_part()->name()->as_C_string())); ++ return false; ++ } ++ } ++ } ++ ++ elem++; ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Method %s does not have a local variable table => abort", ++ method->name_and_sig_as_C_string())); ++ } ++ ++ if (!result) { ++ return false; ++ } ++ ++ RC_TRACE(0x00008000, ("Verifying class %s", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ ++ Symbol* exception_name; ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ if (TraceRedefineClasses >= 4) { ++ ClassVerifier::_verify_verbose = true; ++ } ++ ++ Thread::current()->set_pretend_new_universe(true); ++ ClassVerifier split_verifier(klass, message_buffer, message_buffer_len, Thread::current()); ++ split_verifier.verify_method(jvf->method(), Thread::current()); ++ exception_name = split_verifier.result(); ++ Thread::current()->set_pretend_new_universe(false); ++ ++ if (TraceRedefineClasses >= 4) { ++ ClassVerifier::_verify_verbose = false; ++ } ++ ++ if (exception_name != NULL) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ RC_TRACE(0x00000001, ("Exception: %s", ++ exception_name->as_C_string())); ++ RC_TRACE(0x00000001, ("Message: %s", ++ message_buffer)); ++ Thread::current()->clear_pending_exception(); ++ return false; ++ } ++ ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); ++ } ++ ++ return true; ++} ++ ++bool VM_RedefineClasses::check_method(methodOop method) { ++ ++ ++ return true; ++} ++ ++// Warning: destroys redefinition level values of klasses. ++bool VM_RedefineClasses::check_loaded_methods() { ++ ++ class CheckLoadedMethodsClosure : public ObjectClosure { ++ ++ private: ++ ++ bool _result; ++ GrowableArray<klassOop> *_dangerous_klasses; ++ ++ public: ++ CheckLoadedMethodsClosure(GrowableArray<klassOop> *dangerous_klasses) { ++ _result = true; ++ _dangerous_klasses = dangerous_klasses; ++ } ++ ++ bool result() { ++ return _result; ++ } ++ ++ bool is_class_dangerous(klassOop k) { ++ return k->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType); ++ } ++ ++ bool can_be_affected(instanceKlass *klass) { ++ ++ constantPoolOop cp = klass->constants(); ++ ++ Thread *THREAD = Thread::current(); ++ klassOop k; ++ Symbol* symbol; ++ ++ for (int i=1; i<cp->length(); i++) { ++ jbyte tag = cp->tag_at(i).value(); ++ switch(tag) { ++ case JVM_CONSTANT_Long: ++ case JVM_CONSTANT_Double: ++ i++; ++ break; ++ ++ case JVM_CONSTANT_Utf8: ++ case JVM_CONSTANT_Unicode: ++ case JVM_CONSTANT_Integer: ++ case JVM_CONSTANT_Float: ++ case JVM_CONSTANT_String: ++ case JVM_CONSTANT_Fieldref: ++ case JVM_CONSTANT_Methodref: ++ case JVM_CONSTANT_InterfaceMethodref: ++ case JVM_CONSTANT_ClassIndex: ++ case JVM_CONSTANT_UnresolvedString: ++ case JVM_CONSTANT_StringIndex: ++ case JVM_CONSTANT_UnresolvedClassInError: ++ case JVM_CONSTANT_Object: ++ // do nothing ++ break; ++ ++ case JVM_CONSTANT_Class: ++ k = cp->klass_at(i, CHECK_(true)); ++ if (is_class_dangerous(k)) { ++ RC_TRACE(0x00000002, ("Class %s is potentially affected, because at cp[%d] references class %s", ++ klass->name()->as_C_string(), ++ i, ++ k->klass_part()->name()->as_C_string())); ++ return true; ++ } ++ break; ++ ++ case JVM_CONSTANT_NameAndType: ++ symbol = cp->symbol_at(cp->signature_ref_index_at(i)); ++ if (symbol->byte_at(0) == '(') { ++ // This must be a method ++ SignatureStream signatureStream(symbol); ++ while (true) { ++ ++ if (signatureStream.is_array()) { ++ Symbol* cur_signature = signatureStream.as_symbol(Thread::current()); ++ if (is_type_signature_dangerous(cur_signature)) { ++ return true; ++ } ++ } else if (signatureStream.is_object()) { ++ if (is_symbol_dangerous(signatureStream.as_symbol(Thread::current()))) { ++ return true; ++ } ++ } ++ ++ if (signatureStream.at_return_type()) { ++ break; ++ } ++ ++ signatureStream.next(); ++ } ++ ++ } else if (is_type_signature_dangerous(symbol)) { ++ return true; ++ } ++ break; ++ ++ case JVM_CONSTANT_UnresolvedClass: ++ symbol = cp->unresolved_klass_at(i); ++ if (is_symbol_dangerous(symbol)) { ++ return true; ++ } ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ return false; ++ } ++ ++ bool is_type_signature_dangerous(Symbol* signature) { ++ // This must be a field type ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ if (is_symbol_dangerous(name)) { ++ return true; ++ } ++ } else if (FieldType::is_array(signature)) { ++ //jint dimension; ++ //Symbol* object_key; ++ FieldArrayInfo fd; ++ FieldType::get_array_info(signature, fd, Thread::current()); ++ if (is_symbol_dangerous(fd.object_key())) { ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ bool is_symbol_dangerous(Symbol* symbol) { ++ for (int i=0; i<_dangerous_klasses->length(); i++) { ++ if(_dangerous_klasses->at(i)->klass_part()->name() == symbol) { ++ RC_TRACE(0x00000002, ("Found constant pool index %d references class %s", ++ i, ++ symbol->as_C_string())); ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ virtual void do_object(oop obj) { ++ ++ if (!_result) return; ++ ++ klassOop klassObj = (klassOop)obj; ++ Thread *THREAD = Thread::current(); ++ ++ // We found an instance klass! ++ instanceKlass *klass = instanceKlass::cast(klassObj); ++ instanceKlassHandle handle(klassObj); ++ ++ RC_TRACE(0x00000400, ("Check if verification is necessary for class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ ++ if (!can_be_affected(klass)) { ++ RC_TRACE(0x00000400, ("Skipping verification of class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ return; ++ } ++ ++ if (handle->major_version() < Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION) { ++ RC_TRACE(0x00000001, ("Failing because cannot verify class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ _result = false; ++ return; ++ } ++ ++ RC_TRACE(0x00000001, ("Verifying class %s", handle->name()->as_C_string())); ++ ++ if (!Verifier::verify(handle, Verifier::NoException, true, false, Thread::current())) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", handle->name()->as_C_string())); ++ //Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ //RC_TRACE(0x00000002, ("exception when verifying class: '%s'", ex_name->as_C_string()); ++ //PENDING_EXCEPTION->print(); ++ CLEAR_PENDING_EXCEPTION; ++ _result = false; ++ } ++ ++ /*int method_count = klass->methods()->length(); ++ for (int i=0; i<method_count; i++) { ++ methodOop cur_method = (methodOop)klass->methods()->obj_at(i); ++ if (!check_method(cur_method)) { ++ RC_TRACE(0x00000001, ("Failed to verify consistency of method %s of klass %s", cur_method->name()->as_C_string(), klass->name()->as_C_string()); ++ } ++ }*/ ++ } ++ }; ++ ++ // TODO: Check bytecodes in case of interface => class or class => interface etc.. ++ ++ GrowableArray<klassOop> dangerous_klasses; ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle handle = _new_classes->at(i); ++ if (handle->check_redefinition_flag(Klass::RemoveSuperType)) { ++ dangerous_klasses.append(handle()); ++ } ++ } ++ ++ CheckLoadedMethodsClosure checkLoadedMethodsClosure(&dangerous_klasses); ++ Thread::current()->set_pretend_new_universe(true); ++ SystemDictionary::classes_do(&checkLoadedMethodsClosure); ++ Thread::current()->set_pretend_new_universe(false); ++ ++ ++ return checkLoadedMethodsClosure.result(); ++} ++ ++bool VM_RedefineClasses::check_type_consistency() { ++ ++ Universe::set_verify_in_progress(true); ++ ++ SystemDictionary::classes_do(calculate_type_check_information); ++ bool result = check_field_value_types(); ++ SystemDictionary::classes_do(clear_type_check_information); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong field or array element value!")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the class_annotations: +- if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ result = check_method_stacks(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong value on the stack")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the fields_annotations: +- if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ result = check_loaded_methods(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong loaded method")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the methods_annotations: +- if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ RC_TRACE(0x00000001, ("Verification passed => hierarchy change is valid!")); ++ Universe::set_verify_in_progress(false); ++ return true; ++} ++ ++void VM_RedefineClasses::rollback() { ++ RC_TRACE(0x00000001, ("Rolling back redefinition!")); ++ SystemDictionary::rollback_redefinition(); ++ ++ RC_TRACE(0x00000001, ("After rolling back system dictionary!")); ++ for (int i=0; i<_new_classes->length(); i++) { ++ SystemDictionary::remove_from_hierarchy(_new_classes->at(i)); ++ } ++ ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle new_class = _new_classes->at(i); ++ new_class->set_redefining(false); ++ new_class->old_version()->klass_part()->set_new_version(NULL); ++ new_class->set_old_version(NULL); ++ } ++ ++} ++ ++template <class T> void VM_RedefineClasses::do_oop_work(T* p) { ++ T heap_oop = oopDesc::load_heap_oop(p); ++ if (!oopDesc::is_null(heap_oop)) { ++ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); ++ if (obj->is_instanceKlass()) { ++ klassOop klass = (klassOop)obj; ++ // DCEVM: note: can overwrite owner of old_klass constants pool with new_klass, so we need to fix it back later ++ if (klass->new_version() != NULL && klass->new_version()->klass_part()->is_redefining()) { ++ obj = klass->klass_part()->new_version(); ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ } ++ } else if (obj->blueprint()->newest_version() == SystemDictionary::Class_klass()->klass_part()->newest_version()) { ++ // update references to java.lang.Class to point to newest version. Only update references to non-primitive ++ // java.lang.Class instances. ++ klassOop klass_oop = java_lang_Class::as_klassOop(obj); ++ if (klass_oop != NULL) { ++ if (klass_oop->new_version() != NULL && klass_oop->new_version()->klass_part()->is_redefining()) { ++ obj = klass_oop->new_version()->java_mirror(); ++ } else if (klass_oop->klass_part()->is_redefining()) { ++ obj = klass_oop->java_mirror(); ++ } ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ ++ ++ // FIXME: DCEVM: better implementation? ++ // Starting from JDK 7 java_mirror can be kept in the regular heap. Therefore, it is possible ++ // that new java_mirror is in the young generation whereas p is in tenured generation. In that ++ // case we need to run write barrier to make sure card table is properly updated. This will ++ // allow JVM to detect reference in tenured generation properly during young generation GC. ++ if (Universe::heap()->is_in_reserved(p)) { ++ if (GenCollectedHeap::heap()->is_in_young(obj)) { ++ GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); ++ assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); ++ CardTableRS* _rs = (CardTableRS*)rs; ++ _rs->inline_write_ref_field_gc(p, obj); ++ } ++ } ++ } ++ } ++ } ++} ++ ++void VM_RedefineClasses::swap_marks(oop first, oop second) { ++ markOop first_mark = first->mark(); ++ markOop second_mark = second->mark(); ++ first->set_mark(second_mark); ++ second->set_mark(first_mark); ++} ++ ++void VM_RedefineClasses::doit() { ++ Thread *thread = Thread::current(); ++ ++ RC_TRACE(0x00000001, ("Entering doit!")); ++ ++ ++ if ((_max_redefinition_flags & Klass::RemoveSuperType) != 0) { ++ ++ RC_TIMER_START(_timer_check_type); ++ ++ if (!check_type_consistency()) { ++ // (tw) TODO: Rollback the class redefinition ++ rollback(); ++ RC_TRACE(0x00000001, ("Detected type inconsistency!")); ++ _result = JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ RC_TIMER_STOP(_timer_check_type); ++ return; ++ } ++ ++ RC_TIMER_STOP(_timer_check_type); ++ ++ } else { ++ RC_TRACE(0x00000001, ("No type narrowing => skipping check for type inconsistency")); ++ } ++ ++ if (UseMethodForwardPoints) { ++ RC_TRACE(0x00000001, ("Check stack for forwarding methods to new version")); ++ method_forwarding(); ++ } ++ ++ if (UseSharedSpaces) { ++ // Sharing is enabled so we remap the shared readonly space to ++ // shared readwrite, private just in case we need to redefine ++ // a shared class. We do the remap during the doit() phase of ++ // the safepoint to be safer. ++ if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { ++ RC_TRACE(0x00000001, ("failed to remap shared readonly space to readwrite, private")); ++ _result = JVMTI_ERROR_INTERNAL; ++ return; ++ } ++ } ++ ++ RC_TIMER_START(_timer_prepare_redefinition); ++ for (int i = 0; i < _new_classes->length(); i++) { ++ redefine_single_class(_new_classes->at(i), thread); ++ } ++ ++ // Deoptimize all compiled code that depends on this class ++ flush_dependent_code(instanceKlassHandle(Thread::current(), (klassOop)NULL), Thread::current()); ++ ++ // Adjust constantpool caches and vtables for all classes ++ // that reference methods of the evolved class. ++ SystemDictionary::classes_do(adjust_cpool_cache, Thread::current()); ++ ++ RC_TIMER_STOP(_timer_prepare_redefinition); ++ RC_TIMER_START(_timer_redefinition); ++ ++ class ChangePointersOopClosure : public OopClosure { ++ virtual void do_oop(oop* o) { ++ do_oop_work(o); ++ } ++ ++ virtual void do_oop(narrowOop* o) { ++ do_oop_work(o); ++ } ++ }; ++ ++ class ChangePointersObjectClosure : public ObjectClosure { ++ ++ private: ++ ++ OopClosure *_closure; ++ bool _needs_instance_update; ++ GrowableArray<oop> *_updated_oops; ++ ++ public: ++ ChangePointersObjectClosure(OopClosure *closure) : _closure(closure), _needs_instance_update(false), _updated_oops(NULL) {} ++ ++ bool needs_instance_update() { ++ return _needs_instance_update; ++ } ++ ++ GrowableArray<oop> *updated_oops() { return _updated_oops; } ++ ++ virtual void do_object(oop obj) { ++ if (!obj->is_instanceKlass()) { ++ obj->oop_iterate(_closure); ++ ++ if (obj->blueprint()->is_redefining()) { ++ ++ if (obj->blueprint()->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ if (_updated_oops == NULL) { ++ _updated_oops = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true); ++ } ++ _updated_oops->append(obj); ++ } ++ ++ if(obj->blueprint()->update_information() != NULL || obj->is_perm()) { ++ ++ assert(obj->blueprint()->old_version() != NULL, "must have old version"); ++ obj->set_klass_no_check(obj->blueprint()->old_version()); ++ ++ if (obj->size() != obj->size_given_klass(obj->blueprint()->new_version()->klass_part()) || obj->is_perm()) { ++ // We need an instance update => set back to old klass ++ _needs_instance_update = true; ++ ++ } else { ++ MarkSweep::update_fields(obj, obj); ++ assert(obj->blueprint()->is_redefining(), "update fields resets the klass"); ++ } ++ } ++ } ++ ++ } else { ++ instanceKlass *klass = instanceKlass::cast((klassOop)obj); ++ if (klass->is_redefining()) { ++ // DCEVM: We need to restorte constants pool owner which was updated by do_oop_work ++ instanceKlass* old_klass = instanceKlass::cast(klass->old_version()); ++ old_klass->constants()->set_pool_holder(klass->old_version()); ++ ++ // Initialize the new class! Special static initialization that does not execute the ++ // static constructor but copies static field values from the old class if name ++ // and signature of a static field match. ++ klass->initialize_redefined_class(); ++ } ++ // idubrov: FIXME: we probably don't need that since oop's will be visited in a regular way... ++ // idubrov: need to check if there is a test to verify that fields referencing class being updated ++ // idubrov: will get new version of that class ++ //klass->iterate_static_fields(_closure); ++ } ++ } ++ }; ++ ++ ChangePointersOopClosure oopClosure; ++ ChangePointersObjectClosure objectClosure(&oopClosure); ++ ++ { ++ SharedHeap::heap()->gc_prologue(true); ++ Universe::root_oops_do(&oopClosure); ++ Universe::heap()->object_iterate(&objectClosure); ++ SharedHeap::heap()->gc_epilogue(false); ++ } ++ ++ // Swap marks to have same hashcodes ++ for (int i=0; i<_new_classes->length(); i++) { ++ swap_marks(_new_classes->at(i)(), _new_classes->at(i)->old_version()); ++ swap_marks(_new_classes->at(i)->java_mirror(), _new_classes->at(i)->old_version()->java_mirror()); ++ } ++ ++ _updated_oops = objectClosure.updated_oops(); ++ ++ if (objectClosure.needs_instance_update()){ ++ ++ // Do a full garbage collection to update the instance sizes accordingly ++ RC_TRACE(0x00000001, ("Before performing full GC!")); ++ Universe::set_redefining_gc_run(true); ++ JvmtiGCMarker jgcm; ++ notify_gc_begin(true); ++ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); ++ notify_gc_end(); ++ Universe::set_redefining_gc_run(false); ++ RC_TRACE(0x00000001, ("GC done!")); ++ } ++ ++ ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (_updated_oops != NULL) { ++ RC_TRACE(0x00000001, ("%d object(s) updated!", _updated_oops->length())); ++ } else { ++ RC_TRACE(0x00000001, ("No objects updated!")); ++ } ++ } ++ ++ // Unmark klassOops as "redefining" ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur = _new_classes->at(i)(); ++ _new_classes->at(i)->set_redefining(false); ++ _new_classes->at(i)->clear_update_information(); ++ _new_classes->at(i)->update_supers_to_newest_version(); ++ ++ if (((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses() != NULL) { ++ update_array_classes_to_newest_version(((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses()); ++ ++ // Transfer the array classes, otherwise we might get cast exceptions when casting array types. ++ ((instanceKlass*)cur->klass_part())->set_array_klasses(((instanceKlass*)cur->klass_part()->old_version()->klass_part())->array_klasses()); ++ ++ oop new_mirror = _new_classes->at(i)->java_mirror(); ++ oop old_mirror = _new_classes->at(i)->old_version()->java_mirror(); ++ java_lang_Class::set_array_klass(new_mirror, java_lang_Class::array_klass(old_mirror)); ++ ++ // Transfer init state ++ instanceKlass::ClassState state = instanceKlass::cast(cur->old_version())->init_state(); ++ if (state > instanceKlass::linked) { ++ instanceKlass::cast(cur)->call_class_initializer(thread); ++ } ++ } ++ } ++ ++ for (int i=T_BOOLEAN; i<=T_LONG; i++) { ++ update_array_classes_to_newest_version(Universe::typeArrayKlassObj((BasicType)i)); ++ } ++ ++ // Disable any dependent concurrent compilations ++ SystemDictionary::notice_modification(); ++ ++ // Set flag indicating that some invariants are no longer true. ++ // See jvmtiExport.hpp for detailed explanation. ++ JvmtiExport::set_has_redefined_a_class(); ++ ++ // Clean up caches in the compiler interface and compiler threads ++ CompileBroker::cleanup_after_redefinition(); ++ ++#ifdef ASSERT ++ ++ // Universe::verify(); ++ // JNIHandles::verify(); ++ ++ SystemDictionary::classes_do(check_class, thread); ++#endif ++ ++ update_active_methods(); ++ RC_TIMER_STOP(_timer_redefinition); ++ ++} ++ ++void VM_RedefineClasses::update_array_classes_to_newest_version(klassOop smallest_dimension) { ++ ++ arrayKlass *curArrayKlass = arrayKlass::cast(smallest_dimension); ++ assert(curArrayKlass->lower_dimension() == NULL, "argument must be smallest dimension"); ++ ++ ++ while (curArrayKlass != NULL) { ++ klassOop higher_dimension = curArrayKlass->higher_dimension(); ++ klassOop lower_dimension = curArrayKlass->lower_dimension(); ++ curArrayKlass->update_supers_to_newest_version(); ++ ++ curArrayKlass = NULL; ++ if (higher_dimension != NULL) { ++ curArrayKlass = arrayKlass::cast(higher_dimension); ++ } ++ } ++ ++} ++ ++void VM_RedefineClasses::doit_epilogue() { ++ ++ RC_TIMER_START(_timer_vm_op_epilogue); ++ ++ unlock_threads(); ++ ++ ResourceMark mark; ++ ++ VM_GC_Operation::doit_epilogue(); ++ RC_TRACE(0x00000001, ("GC Operation epilogue finished! ")); ++ ++ GrowableArray<methodHandle> instanceTransformerMethods; ++ ++ // Call static transformers ++ for (int i=0; i<_new_classes->length(); i++) { ++ ++ instanceKlassHandle klass = _new_classes->at(i); ++ ++ // Find instance transformer method ++ ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ ++ RC_TRACE(0x00008000, ("Call instance transformer of %s instance", klass->name()->as_C_string())); ++ klassOop cur_klass = klass(); ++ while (cur_klass != NULL) { ++ methodOop method = ((instanceKlass*)cur_klass->klass_part())->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature()); ++ if (method != NULL) { ++ methodHandle instanceTransformerMethod(method); ++ instanceTransformerMethods.append(instanceTransformerMethod); ++ break; ++ } else { ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ assert(cur_klass != NULL, "must have instance transformer method"); ++ } else { ++ instanceTransformerMethods.append(methodHandle(Thread::current(), NULL)); ++ } ++ } ++ ++ ++ // Call instance transformers ++ if (_updated_oops != NULL) { ++ ++ for (int i=0; i<_updated_oops->length(); i++) { ++ assert(_updated_oops->at(i) != NULL, "must not be null!"); ++ Handle cur(_updated_oops->at(i)); ++ instanceKlassHandle klass(cur->klass()); ++ ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ ++ methodHandle method = instanceTransformerMethods.at(klass->redefinition_index()); ++ ++ RC_TRACE(0x00008000, ("executing transformer method")); ++ ++ Thread *__the_thread__ = Thread::current(); ++ JavaValue result(T_VOID); ++ JavaCallArguments args(cur); ++ JavaCalls::call(&result, ++ method, ++ &args, ++ THREAD); ++ ++ // TODO: What to do with an exception here? ++ if (HAS_PENDING_EXCEPTION) { ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000002, ("exception when executing transformer: '%s'", ++ ex_name->as_C_string())); ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ } ++ ++ delete _updated_oops; ++ _updated_oops = NULL; ++ } ++ ++ // Free the array of scratch classes ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TRACE(0x00000001, ("Redefinition finished!")); ++ ++ RC_TIMER_STOP(_timer_vm_op_epilogue); ++} ++ ++bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { ++ // classes for primitives cannot be redefined ++ if (java_lang_Class::is_primitive(klass_mirror)) { + return false; + } +- +- // rewrite constant pool references in the methods_parameter_annotations: +- if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller ++ klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); ++ // classes for arrays cannot be redefined ++ if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { + return false; + } +- +- // rewrite constant pool references in the methods_default_annotations: +- if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; ++ return true; ++} ++ ++#ifdef ASSERT ++ ++void VM_RedefineClasses::verify_classes(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ ++ instanceKlassHandle k_handle(THREAD, k_oop); ++ Verifier::verify(k_handle, Verifier::ThrowException, true, true, THREAD); ++ k_oop = k_oop->klass_part()->old_version(); + } +- +- return true; +-} // end rewrite_cp_refs() +- +- +-// Rewrite constant pool references in the methods. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods(THREAD, scratch_class->methods()); +- +- if (methods.is_null() || methods->length() == 0) { +- // no methods so nothing to do +- return true; +- } +- +- // rewrite constant pool references in the methods: +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- methodHandle new_method; +- rewrite_cp_refs_in_method(method, &new_method, CHECK_false); +- if (!new_method.is_null()) { +- // the method has been replaced so save the new method version +- methods->obj_at_put(i, new_method()); ++} ++ ++#endif ++ ++// Rewrite faster byte-codes back to their slower equivalent. Undoes rewriting happening in templateTable_xxx.cpp ++// The reason is that once we zero cpool caches, we need to re-resolve all entries again. Faster bytecodes do not ++// do that, they assume that cache entry is resolved already. ++static void unpatch_bytecode(methodOop method) { ++ RawBytecodeStream bcs(method); ++ Bytecodes::Code code; ++ Bytecodes::Code java_code; ++ while (!bcs.is_last_bytecode()) { ++ code = bcs.raw_next(); ++ address bcp = bcs.bcp(); ++ ++ if (code == Bytecodes::_breakpoint) { ++ int bci = method->bci_from(bcp); ++ code = method->orig_bytecode_at(bci); ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ // Let breakpoint table handling unpatch bytecode ++ method->set_orig_bytecode_at(bci, java_code); ++ } ++ } else { ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ *bcp = java_code; ++ } ++ } ++ ++ // Additionally, we need to unpatch bytecode at bcp+1 for fast_xaccess (which would be fast field access) ++ if (code == Bytecodes::_fast_iaccess_0 || code == Bytecodes::_fast_aaccess_0 || code == Bytecodes::_fast_faccess_0) { ++ Bytecodes::Code code2 = Bytecodes::code_or_bp_at(bcp + 1); ++ assert(code2 == Bytecodes::_fast_igetfield || ++ code2 == Bytecodes::_fast_agetfield || ++ code2 == Bytecodes::_fast_fgetfield, ""); ++ *(bcp + 1) = Bytecodes::java_code(code2); + } + } +- +- return true; + } + +- +-// Rewrite constant pool references in the specific method. This code +-// was adapted from Rewriter::rewrite_method(). +-void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method, +- methodHandle *new_method_p, TRAPS) { +- +- *new_method_p = methodHandle(); // default is no new method +- +- // We cache a pointer to the bytecodes here in code_base. If GC +- // moves the methodOop, then the bytecodes will also move which +- // will likely cause a crash. We create a No_Safepoint_Verifier +- // object to detect whether we pass a possible safepoint in this +- // code block. +- No_Safepoint_Verifier nsv; +- +- // Bytecodes and their length +- address code_base = method->code_base(); +- int code_length = method->code_size(); +- +- int bc_length; +- for (int bci = 0; bci < code_length; bci += bc_length) { +- address bcp = code_base + bci; +- Bytecodes::Code c = (Bytecodes::Code)(*bcp); +- +- bc_length = Bytecodes::length_for(c); +- if (bc_length == 0) { +- // More complicated bytecodes report a length of zero so +- // we have to try again a slightly different way. +- bc_length = Bytecodes::length_at(method(), bcp); +- } +- +- assert(bc_length != 0, "impossible bytecode length"); +- +- switch (c) { +- case Bytecodes::_ldc: +- { +- int cp_index = *(bcp + 1); +- int new_index = find_new_index(cp_index); +- +- if (StressLdcRewrite && new_index == 0) { +- // If we are stressing ldc -> ldc_w rewriting, then we +- // always need a new_index value. +- new_index = cp_index; +- } +- if (new_index != 0) { +- // the original index is mapped so we have more work to do +- if (!StressLdcRewrite && new_index <= max_jubyte) { +- // The new value can still use ldc instead of ldc_w +- // unless we are trying to stress ldc -> ldc_w rewriting +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- *(bcp + 1) = new_index; +- } else { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s->ldc_w@" INTPTR_FORMAT " old=%d, new=%d", +- Bytecodes::name(c), bcp, cp_index, new_index)); +- // the new value needs ldc_w instead of ldc +- u_char inst_buffer[4]; // max instruction size is 4 bytes +- bcp = (address)inst_buffer; +- // construct new instruction sequence +- *bcp = Bytecodes::_ldc_w; +- bcp++; +- // Rewriter::rewrite_method() does not rewrite ldc -> ldc_w. +- // See comment below for difference between put_Java_u2() +- // and put_native_u2(). +- Bytes::put_Java_u2(bcp, new_index); +- +- Relocator rc(method, NULL /* no RelocatorListener needed */); +- methodHandle m; +- { +- Pause_No_Safepoint_Verifier pnsv(&nsv); +- +- // ldc is 2 bytes and ldc_w is 3 bytes +- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD); +- if (m.is_null() || HAS_PENDING_EXCEPTION) { +- guarantee(false, "insert_space_at() failed"); +- } +- } +- +- // return the new method so that the caller can update +- // the containing class +- *new_method_p = method = m; +- // switch our bytecode processing loop from the old method +- // to the new method +- code_base = method->code_base(); +- code_length = method->code_size(); +- bcp = code_base + bci; +- c = (Bytecodes::Code)(*bcp); +- bc_length = Bytecodes::length_for(c); +- assert(bc_length != 0, "sanity check"); +- } // end we need ldc_w instead of ldc +- } // end if there is a mapped index +- } break; +- +- // these bytecodes have a two-byte constant pool index +- case Bytecodes::_anewarray : // fall through +- case Bytecodes::_checkcast : // fall through +- case Bytecodes::_getfield : // fall through +- case Bytecodes::_getstatic : // fall through +- case Bytecodes::_instanceof : // fall through +- case Bytecodes::_invokeinterface: // fall through +- case Bytecodes::_invokespecial : // fall through +- case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokevirtual : // fall through +- case Bytecodes::_ldc_w : // fall through +- case Bytecodes::_ldc2_w : // fall through +- case Bytecodes::_multianewarray : // fall through +- case Bytecodes::_new : // fall through +- case Bytecodes::_putfield : // fall through +- case Bytecodes::_putstatic : +- { +- address p = bcp + 1; +- int cp_index = Bytes::get_Java_u2(p); +- int new_index = find_new_index(cp_index); +- if (new_index != 0) { +- // the original index is mapped so update w/ new value +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- // Rewriter::rewrite_method() uses put_native_u2() in this +- // situation because it is reusing the constant pool index +- // location for a native index into the constantPoolCache. +- // Since we are updating the constant pool index prior to +- // verification and constantPoolCache initialization, we +- // need to keep the new index in Java byte order. +- Bytes::put_Java_u2(p, new_index); +- } +- } break; +- } +- } // end for each bytecode +-} // end rewrite_cp_refs_in_method() +- +- +-// Rewrite constant pool references in the class_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- typeArrayHandle class_annotations(THREAD, +- scratch_class->class_annotations()); +- if (class_annotations.is_null() || class_annotations->length() == 0) { +- // no class_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_annotations length=%d", class_annotations->length())); +- +- int byte_i = 0; // byte index into class_annotations +- return rewrite_cp_refs_in_annotations_typeArray(class_annotations, byte_i, +- THREAD); +-} +- +- +-// Rewrite constant pool references in an annotations typeArray. This +-// "structure" is adapted from the RuntimeVisibleAnnotations_attribute +-// that is described in section 4.8.15 of the 2nd-edition of the VM spec: +-// +-// annotations_typeArray { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for num_annotations field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for num_annotations field")); +- return false; +- } +- +- u2 num_annotations = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_annotations=%d", num_annotations)); +- +- int calc_num_annotations = 0; +- for (; calc_num_annotations < num_annotations; calc_num_annotations++) { +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad annotation_struct at %d", calc_num_annotations)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_annotations == calc_num_annotations, "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotations_typeArray() +- +- +-// Rewrite constant pool references in the annotation struct portion of +-// an annotations_typeArray. This "structure" is from section 4.8.15 of +-// the 2nd-edition of the VM spec: +-// +-// struct annotation { +-// u2 type_index; +-// u2 num_element_value_pairs; +-// { +-// u2 element_name_index; +-// element_value value; +-// } element_value_pairs[num_element_value_pairs]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- if ((byte_i_ref + 2 + 2) > annotations_typeArray->length()) { +- // not enough room for smallest annotation_struct +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for annotation_struct")); +- return false; +- } +- +- u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray, +- byte_i_ref, "mapped old type_index=%d", THREAD); +- +- u2 num_element_value_pairs = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr( +- byte_i_ref)); +- byte_i_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_index=%d num_element_value_pairs=%d", type_index, +- num_element_value_pairs)); +- +- int calc_num_element_value_pairs = 0; +- for (; calc_num_element_value_pairs < num_element_value_pairs; +- calc_num_element_value_pairs++) { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for another element_name_index, let alone +- // the rest of another component +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for element_name_index")); +- return false; +- } +- +- u2 element_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old element_name_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("element_name_index=%d", element_name_index)); +- +- if (!rewrite_cp_refs_in_element_value(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad element_value at %d", calc_num_element_value_pairs)); +- // propagate failure back to caller +- return false; +- } +- } // end for each component +- assert(num_element_value_pairs == calc_num_element_value_pairs, +- "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotation_struct() +- +- +-// Rewrite a constant pool reference at the current position in +-// annotations_typeArray if needed. Returns the original constant +-// pool reference if a rewrite was not needed or the new constant +-// pool reference if a rewrite was needed. +-u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS) { +- +- address cp_index_addr = (address) +- annotations_typeArray->byte_at_addr(byte_i_ref); +- u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr); +- u2 new_cp_index = find_new_index(old_cp_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index)); +- Bytes::put_Java_u2(cp_index_addr, new_cp_index); +- old_cp_index = new_cp_index; +- } +- byte_i_ref += 2; +- return old_cp_index; +-} +- +- +-// Rewrite constant pool references in the element_value portion of an +-// annotations_typeArray. This "structure" is from section 4.8.15.1 of +-// the 2nd-edition of the VM spec: +-// +-// struct element_value { +-// u1 tag; +-// union { +-// u2 const_value_index; +-// { +-// u2 type_name_index; +-// u2 const_name_index; +-// } enum_const_value; +-// u2 class_info_index; +-// annotation annotation_value; +-// struct { +-// u2 num_values; +-// element_value values[num_values]; +-// } array_value; +-// } value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_element_value( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 1) > annotations_typeArray->length()) { +- // not enough room for a tag let alone the rest of an element_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a tag")); +- return false; +- } +- +- u1 tag = annotations_typeArray->byte_at(byte_i_ref); +- byte_i_ref++; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("tag='%c'", tag)); +- +- switch (tag) { +- // These BaseType tag values are from Table 4.2 in VM spec: +- case 'B': // byte +- case 'C': // char +- case 'D': // double +- case 'F': // float +- case 'I': // int +- case 'J': // long +- case 'S': // short +- case 'Z': // boolean +- +- // The remaining tag values are from Table 4.8 in the 2nd-edition of +- // the VM spec: +- case 's': +- { +- // For the above tag values (including the BaseType values), +- // value.const_value_index is right union field. +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a const_value_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a const_value_index")); +- return false; +- } +- +- u2 const_value_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_value_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("const_value_index=%d", const_value_index)); +- } break; +- +- case 'e': +- { +- // for the above tag value, value.enum_const_value is right union field +- +- if ((byte_i_ref + 4) > annotations_typeArray->length()) { +- // not enough room for a enum_const_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a enum_const_value")); +- return false; +- } +- +- u2 type_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old type_name_index=%d", THREAD); +- +- u2 const_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_name_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_name_index=%d const_name_index=%d", type_name_index, +- const_name_index)); +- } break; +- +- case 'c': +- { +- // for the above tag value, value.class_info_index is right union field +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a class_info_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a class_info_index")); +- return false; +- } +- +- u2 class_info_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old class_info_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_info_index=%d", class_info_index)); +- } break; +- +- case '@': +- // For the above tag value, value.attr_value is the right union +- // field. This is a nested annotation. +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- break; +- +- case '[': +- { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a num_values field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_values field")); +- return false; +- } +- +- // For the above tag value, value.array_value is the right union +- // field. This is an array of nested element_value. +- u2 num_values = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("num_values=%d", num_values)); +- +- int calc_num_values = 0; +- for (; calc_num_values < num_values; calc_num_values++) { +- if (!rewrite_cp_refs_in_element_value( +- annotations_typeArray, byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad nested element_value at %d", calc_num_values)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_values == calc_num_values, "sanity check"); +- } break; +- +- default: +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("bad tag=0x%x", tag)); +- return false; +- } // end decode tag field +- +- return true; +-} // end rewrite_cp_refs_in_element_value() +- +- +-// Rewrite constant pool references in a fields_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle fields_annotations(THREAD, +- scratch_class->fields_annotations()); +- +- if (fields_annotations.is_null() || fields_annotations->length() == 0) { +- // no fields_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("fields_annotations length=%d", fields_annotations->length())); +- +- for (int i = 0; i < fields_annotations->length(); i++) { +- typeArrayHandle field_annotations(THREAD, +- (typeArrayOop)fields_annotations->obj_at(i)); +- if (field_annotations.is_null() || field_annotations->length() == 0) { +- // this field does not have any annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into field_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(field_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad field_annotations at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_fields_annotations() +- +- +-// Rewrite constant pool references in a methods_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_annotations(THREAD, +- scratch_class->methods_annotations()); +- +- if (methods_annotations.is_null() || methods_annotations->length() == 0) { +- // no methods_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_annotations length=%d", methods_annotations->length())); +- +- for (int i = 0; i < methods_annotations->length(); i++) { +- typeArrayHandle method_annotations(THREAD, +- (typeArrayOop)methods_annotations->obj_at(i)); +- if (method_annotations.is_null() || method_annotations->length() == 0) { +- // this method does not have any annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into method_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(method_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_annotations at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_annotations() +- +- +-// Rewrite constant pool references in a methods_parameter_annotations +-// field. This "structure" is adapted from the +-// RuntimeVisibleParameterAnnotations_attribute described in section +-// 4.8.17 of the 2nd-edition of the VM spec: +-// +-// methods_parameter_annotations_typeArray { +-// u1 num_parameters; +-// { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } parameter_annotations[num_parameters]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_parameter_annotations(THREAD, +- scratch_class->methods_parameter_annotations()); +- +- if (methods_parameter_annotations.is_null() +- || methods_parameter_annotations->length() == 0) { +- // no methods_parameter_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_parameter_annotations length=%d", +- methods_parameter_annotations->length())); +- +- for (int i = 0; i < methods_parameter_annotations->length(); i++) { +- typeArrayHandle method_parameter_annotations(THREAD, +- (typeArrayOop)methods_parameter_annotations->obj_at(i)); +- if (method_parameter_annotations.is_null() +- || method_parameter_annotations->length() == 0) { +- // this method does not have any parameter annotations so skip it +- continue; +- } +- +- if (method_parameter_annotations->length() < 1) { +- // not enough room for a num_parameters field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_parameters field at %d", i)); +- return false; +- } +- +- int byte_i = 0; // byte index into method_parameter_annotations +- +- u1 num_parameters = method_parameter_annotations->byte_at(byte_i); +- byte_i++; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_parameters=%d", num_parameters)); +- +- int calc_num_parameters = 0; +- for (; calc_num_parameters < num_parameters; calc_num_parameters++) { +- if (!rewrite_cp_refs_in_annotations_typeArray( +- method_parameter_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_parameter_annotations at %d", calc_num_parameters)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_parameters == calc_num_parameters, "sanity check"); +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_parameter_annotations() +- +- +-// Rewrite constant pool references in a methods_default_annotations +-// field. This "structure" is adapted from the AnnotationDefault_attribute +-// that is described in section 4.8.19 of the 2nd-edition of the VM spec: +-// +-// methods_default_annotations_typeArray { +-// element_value default_value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_default_annotations(THREAD, +- scratch_class->methods_default_annotations()); +- +- if (methods_default_annotations.is_null() +- || methods_default_annotations->length() == 0) { +- // no methods_default_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_default_annotations length=%d", +- methods_default_annotations->length())); +- +- for (int i = 0; i < methods_default_annotations->length(); i++) { +- typeArrayHandle method_default_annotations(THREAD, +- (typeArrayOop)methods_default_annotations->obj_at(i)); +- if (method_default_annotations.is_null() +- || method_default_annotations->length() == 0) { +- // this method does not have any default annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into method_default_annotations +- +- if (!rewrite_cp_refs_in_element_value( +- method_default_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad default element_value at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_default_annotations() +- +- +-// Rewrite constant pool references in the method's stackmap table. +-// These "structures" are adapted from the StackMapTable_attribute that +-// is described in section 4.8.4 of the 6.0 version of the VM spec +-// (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// stack_map { +-// u2 number_of_entries; +-// stack_map_frame entries[number_of_entries]; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table( +- methodHandle method, TRAPS) { +- +- if (!method->has_stackmap_table()) { +- return; +- } +- +- typeArrayOop stackmap_data = method->stackmap_data(); +- address stackmap_p = (address)stackmap_data->byte_at_addr(0); +- address stackmap_end = stackmap_p + stackmap_data->length(); +- +- assert(stackmap_p + 2 <= stackmap_end, "no room for number_of_entries"); +- u2 number_of_entries = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("number_of_entries=%u", number_of_entries)); +- +- // walk through each stack_map_frame +- u2 calc_number_of_entries = 0; +- for (; calc_number_of_entries < number_of_entries; calc_number_of_entries++) { +- // The stack_map_frame structure is a u1 frame_type followed by +- // 0 or more bytes of data: +- // +- // union stack_map_frame { +- // same_frame; +- // same_locals_1_stack_item_frame; +- // same_locals_1_stack_item_frame_extended; +- // chop_frame; +- // same_frame_extended; +- // append_frame; +- // full_frame; +- // } +- +- assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type"); +- // The Linux compiler does not like frame_type to be u1 or u2. It +- // issues the following warning for the first if-statement below: +- // +- // "warning: comparison is always true due to limited range of data type" +- // +- u4 frame_type = *stackmap_p; +- stackmap_p++; +- +- // same_frame { +- // u1 frame_type = SAME; /* 0-63 */ +- // } +- if (frame_type >= 0 && frame_type <= 63) { +- // nothing more to do for same_frame +- } +- +- // same_locals_1_stack_item_frame { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */ +- // verification_type_info stack[1]; +- // } +- else if (frame_type >= 64 && frame_type <= 127) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // reserved for future use +- else if (frame_type >= 128 && frame_type <= 246) { +- // nothing more to do for reserved frame_types +- } +- +- // same_locals_1_stack_item_frame_extended { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */ +- // u2 offset_delta; +- // verification_type_info stack[1]; +- // } +- else if (frame_type == 247) { +- stackmap_p += 2; +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // chop_frame { +- // u1 frame_type = CHOP; /* 248-250 */ +- // u2 offset_delta; +- // } +- else if (frame_type >= 248 && frame_type <= 250) { +- stackmap_p += 2; +- } +- +- // same_frame_extended { +- // u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/ +- // u2 offset_delta; +- // } +- else if (frame_type == 251) { +- stackmap_p += 2; +- } +- +- // append_frame { +- // u1 frame_type = APPEND; /* 252-254 */ +- // u2 offset_delta; +- // verification_type_info locals[frame_type - 251]; +- // } +- else if (frame_type >= 252 && frame_type <= 254) { +- assert(stackmap_p + 2 <= stackmap_end, +- "no room for offset_delta"); +- stackmap_p += 2; +- u1 len = frame_type - 251; +- for (u1 i = 0; i < len; i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- +- // full_frame { +- // u1 frame_type = FULL_FRAME; /* 255 */ +- // u2 offset_delta; +- // u2 number_of_locals; +- // verification_type_info locals[number_of_locals]; +- // u2 number_of_stack_items; +- // verification_type_info stack[number_of_stack_items]; +- // } +- else if (frame_type == 255) { +- assert(stackmap_p + 2 + 2 <= stackmap_end, +- "no room for smallest full_frame"); +- stackmap_p += 2; +- +- u2 number_of_locals = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 locals_i = 0; locals_i < number_of_locals; locals_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // Use the largest size for the number_of_stack_items, but only get +- // the right number of bytes. +- u2 number_of_stack_items = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 stack_i = 0; stack_i < number_of_stack_items; stack_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- } // end while there is a stack_map_frame +- assert(number_of_entries == calc_number_of_entries, "sanity check"); +-} // end rewrite_cp_refs_in_stack_map_table() +- +- +-// Rewrite constant pool references in the verification type info +-// portion of the method's stackmap table. These "structures" are +-// adapted from the StackMapTable_attribute that is described in +-// section 4.8.4 of the 6.0 version of the VM spec (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// The verification_type_info structure is a u1 tag followed by 0 or +-// more bytes of data: +-// +-// union verification_type_info { +-// Top_variable_info; +-// Integer_variable_info; +-// Float_variable_info; +-// Long_variable_info; +-// Double_variable_info; +-// Null_variable_info; +-// UninitializedThis_variable_info; +-// Object_variable_info; +-// Uninitialized_variable_info; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_verification_type_info( +- address& stackmap_p_ref, address stackmap_end, u2 frame_i, +- u1 frame_type, TRAPS) { +- +- assert(stackmap_p_ref + 1 <= stackmap_end, "no room for tag"); +- u1 tag = *stackmap_p_ref; +- stackmap_p_ref++; +- +- switch (tag) { +- // Top_variable_info { +- // u1 tag = ITEM_Top; /* 0 */ +- // } +- // verificationType.hpp has zero as ITEM_Bogus instead of ITEM_Top +- case 0: // fall through +- +- // Integer_variable_info { +- // u1 tag = ITEM_Integer; /* 1 */ +- // } +- case ITEM_Integer: // fall through +- +- // Float_variable_info { +- // u1 tag = ITEM_Float; /* 2 */ +- // } +- case ITEM_Float: // fall through +- +- // Double_variable_info { +- // u1 tag = ITEM_Double; /* 3 */ +- // } +- case ITEM_Double: // fall through +- +- // Long_variable_info { +- // u1 tag = ITEM_Long; /* 4 */ +- // } +- case ITEM_Long: // fall through +- +- // Null_variable_info { +- // u1 tag = ITEM_Null; /* 5 */ +- // } +- case ITEM_Null: // fall through +- +- // UninitializedThis_variable_info { +- // u1 tag = ITEM_UninitializedThis; /* 6 */ +- // } +- case ITEM_UninitializedThis: +- // nothing more to do for the above tag types +- break; +- +- // Object_variable_info { +- // u1 tag = ITEM_Object; /* 7 */ +- // u2 cpool_index; +- // } +- case ITEM_Object: +- { +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for cpool_index"); +- u2 cpool_index = Bytes::get_Java_u2(stackmap_p_ref); +- u2 new_cp_index = find_new_index(cpool_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("mapped old cpool_index=%d", cpool_index)); +- Bytes::put_Java_u2(stackmap_p_ref, new_cp_index); +- cpool_index = new_cp_index; +- } +- stackmap_p_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, cpool_index=%d", frame_i, +- frame_type, cpool_index)); +- } break; +- +- // Uninitialized_variable_info { +- // u1 tag = ITEM_Uninitialized; /* 8 */ +- // u2 offset; +- // } +- case ITEM_Uninitialized: +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for offset"); +- stackmap_p_ref += 2; +- break; +- +- default: +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, bad tag=0x%x", frame_i, frame_type, tag)); +- ShouldNotReachHere(); +- break; +- } // end switch (tag) +-} // end rewrite_cp_refs_in_verification_type_info() +- +- +-// Change the constant pool associated with klass scratch_class to +-// scratch_cp. If shrink is true, then scratch_cp_length elements +-// are copied from scratch_cp to a smaller constant pool and the +-// smaller constant pool is associated with scratch_class. +-void VM_RedefineClasses::set_new_constant_pool( +- instanceKlassHandle scratch_class, constantPoolHandle scratch_cp, +- int scratch_cp_length, bool shrink, TRAPS) { +- assert(!shrink || scratch_cp->length() >= scratch_cp_length, "sanity check"); +- +- if (shrink) { +- // scratch_cp is a merged constant pool and has enough space for a +- // worst case merge situation. We want to associate the minimum +- // sized constant pool with the klass to save space. +- constantPoolHandle smaller_cp(THREAD, +- oopFactory::new_constantPool(scratch_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- // preserve orig_length() value in the smaller copy +- int orig_length = scratch_cp->orig_length(); +- assert(orig_length != 0, "sanity check"); +- smaller_cp->set_orig_length(orig_length); +- scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); +- scratch_cp = smaller_cp; +- smaller_cp()->set_is_conc_safe(true); +- } +- +- // attach new constant pool to klass +- scratch_cp->set_pool_holder(scratch_class()); +- +- // attach klass to new constant pool +- scratch_class->set_constants(scratch_cp()); +- +- int i; // for portability +- +- // update each field in klass to use new constant pool indices as needed +- for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) { +- jshort cur_index = fs.name_index(); +- jshort new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-name_index change: %d to %d", cur_index, new_index)); +- fs.set_name_index(new_index); +- } +- cur_index = fs.signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-signature_index change: %d to %d", cur_index, new_index)); +- fs.set_signature_index(new_index); +- } +- cur_index = fs.initval_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-initval_index change: %d to %d", cur_index, new_index)); +- fs.set_initval_index(new_index); +- } +- cur_index = fs.generic_signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-generic_signature change: %d to %d", cur_index, new_index)); +- fs.set_generic_signature_index(new_index); +- } +- } // end for each field +- +- // Update constant pool indices in the inner classes info to use +- // new constant indices as needed. The inner classes info is a +- // quadruple: +- // (inner_class_info, outer_class_info, inner_name, inner_access_flags) +- typeArrayOop inner_class_list = scratch_class->inner_classes(); +- int icl_length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); +- if (icl_length > 0) { +- typeArrayHandle inner_class_list_h(THREAD, inner_class_list); +- for (int i = 0; i < icl_length; +- i += instanceKlass::inner_class_next_offset) { +- int cur_index = inner_class_list_h->ushort_at(i +- + instanceKlass::inner_class_inner_class_info_offset); +- if (cur_index == 0) { +- continue; // JVM spec. allows null inner class refs so skip it +- } +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_class_info change: %d to %d", cur_index, new_index)); +- inner_class_list_h->ushort_at_put(i +- + instanceKlass::inner_class_inner_class_info_offset, new_index); +- } +- cur_index = inner_class_list_h->ushort_at(i +- + instanceKlass::inner_class_outer_class_info_offset); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("outer_class_info change: %d to %d", cur_index, new_index)); +- inner_class_list_h->ushort_at_put(i +- + instanceKlass::inner_class_outer_class_info_offset, new_index); +- } +- cur_index = inner_class_list_h->ushort_at(i +- + instanceKlass::inner_class_inner_name_offset); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_name change: %d to %d", cur_index, new_index)); +- inner_class_list_h->ushort_at_put(i +- + instanceKlass::inner_class_inner_name_offset, new_index); +- } +- } // end for each inner class +- } // end if we have inner classes +- +- // Attach each method in klass to the new constant pool and update +- // to use new constant pool indices as needed: +- objArrayHandle methods(THREAD, scratch_class->methods()); +- for (i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- method->set_constants(scratch_cp()); +- +- int new_index = find_new_index(method->name_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-name_index change: %d to %d", method->name_index(), +- new_index)); +- method->set_name_index(new_index); +- } +- new_index = find_new_index(method->signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-signature_index change: %d to %d", +- method->signature_index(), new_index)); +- method->set_signature_index(new_index); +- } +- new_index = find_new_index(method->generic_signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-generic_signature_index change: %d to %d", +- method->generic_signature_index(), new_index)); +- method->set_generic_signature_index(new_index); +- } +- +- // Update constant pool indices in the method's checked exception +- // table to use new constant indices as needed. +- int cext_length = method->checked_exceptions_length(); +- if (cext_length > 0) { +- CheckedExceptionElement * cext_table = +- method->checked_exceptions_start(); +- for (int j = 0; j < cext_length; j++) { +- int cur_index = cext_table[j].class_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("cext-class_cp_index change: %d to %d", cur_index, new_index)); +- cext_table[j].class_cp_index = (u2)new_index; +- } +- } // end for each checked exception table entry +- } // end if there are checked exception table entries +- +- // Update each catch type index in the method's exception table +- // to use new constant pool indices as needed. The exception table +- // holds quadruple entries of the form: +- // (beg_bci, end_bci, handler_bci, klass_index) +- const int beg_bci_offset = 0; +- const int end_bci_offset = 1; +- const int handler_bci_offset = 2; +- const int klass_index_offset = 3; +- const int entry_size = 4; +- +- typeArrayHandle ex_table (THREAD, method->exception_table()); +- int ext_length = ex_table->length(); +- assert(ext_length % entry_size == 0, "exception table format has changed"); +- +- for (int j = 0; j < ext_length; j += entry_size) { +- int cur_index = ex_table->int_at(j + klass_index_offset); +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("ext-klass_index change: %d to %d", cur_index, new_index)); +- ex_table->int_at_put(j + klass_index_offset, new_index); +- } +- } // end for each exception table entry +- +- // Update constant pool indices in the method's local variable +- // table to use new constant indices as needed. The local variable +- // table hold sextuple entries of the form: +- // (start_pc, length, name_index, descriptor_index, signature_index, slot) +- int lvt_length = method->localvariable_table_length(); +- if (lvt_length > 0) { +- LocalVariableTableElement * lv_table = +- method->localvariable_table_start(); +- for (int j = 0; j < lvt_length; j++) { +- int cur_index = lv_table[j].name_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-name_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].name_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].descriptor_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-descriptor_cp_index change: %d to %d", cur_index, +- new_index)); +- lv_table[j].descriptor_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].signature_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-signature_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].signature_cp_index = (u2)new_index; +- } +- } // end for each local variable table entry +- } // end if there are local variable table entries +- +- rewrite_cp_refs_in_stack_map_table(method, THREAD); +- } // end for each method +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +-} // end set_new_constant_pool() +- +- +-// Unevolving classes may point to methods of the_class directly ++// Unevolving classes may point to old methods directly + // from their constant pool caches, itables, and/or vtables. We + // use the SystemDictionary::classes_do() facility and this helper +-// to fix up these pointers. ++// to fix up these pointers. Additional field offsets and vtable indices ++// in the constant pool cache entries are fixed. + // + // Note: We currently don't support updating the vtable in + // arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. +-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(klassOop k_oop, +- oop initiating_loader, TRAPS) { ++void VM_RedefineClasses::adjust_cpool_cache(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ //tty->print_cr("name=%s", k_oop->klass_part()->name()->as_C_string()); ++/* ++ methodOop *matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ methodOop *matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ ++ for (int i=0; i<_matching_methods_length; i++) { ++ matching_old_methods[i] = (methodOop)_old_methods->obj_at(_matching_old_methods[i]); ++ matching_new_methods[i] = (methodOop)_new_methods->obj_at(_matching_new_methods[i]); ++ }*/ ++ ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; ++ ++ constantPoolHandle other_cp; ++ constantPoolCacheOop cp_cache; ++ ++ other_cp = constantPoolHandle(ik->constants()); ++ ++ for (int i=0; i<other_cp->length(); i++) { ++ if (other_cp->tag_at(i).is_klass()) { ++ klassOop klass = other_cp->klass_at(i, THREAD); ++ if (klass->klass_part()->new_version() != NULL) { ++ ++ // (tw) TODO: check why/if this is necessary ++ other_cp->klass_at_put(i, klass->klass_part()->new_version()); ++ } ++ klass = other_cp->klass_at(i, THREAD); ++ assert(klass->klass_part()->new_version() == NULL, "Must be new klass!"); ++ } ++ } ++ ++ cp_cache = other_cp->cache(); ++ ++ if (cp_cache != NULL) { ++ cp_cache->adjust_entries(NULL, ++ NULL, ++ 0); ++ } ++ ++ // If bytecode rewriting is enabled, we also need to unpatch bytecode to force resolution of zeroied entries ++ if (RewriteBytecodes) { ++ ik->methods_do(unpatch_bytecode); ++ } ++ } ++ k_oop = k_oop->klass_part()->old_version(); ++ } ++} ++ ++void VM_RedefineClasses::update_jmethod_ids() { ++ for (int j = 0; j < _matching_methods_length; ++j) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ RC_TRACE(0x00008000, ("matching method %s", old_method->name_and_sig_as_C_string())); ++ ++ jmethodID jmid = old_method->find_jmethod_id_or_null(); ++ if (old_method->new_version() != NULL && jmid == NULL) { ++ // (tw) Have to create jmethodID in this case ++ jmid = old_method->jmethod_id(); ++ } ++ ++ if (jmid != NULL) { ++ // There is a jmethodID, change it to point to the new method ++ methodHandle new_method_h((methodOop)_new_methods->obj_at(_matching_new_methods[j])); ++ if (old_method->new_version() == NULL) { ++ methodHandle old_method_h((methodOop)_old_methods->obj_at(_matching_old_methods[j])); ++ jmethodID new_jmethod_id = JNIHandles::make_jmethod_id(old_method_h); ++ bool result = instanceKlass::cast(old_method_h->method_holder())->update_jmethod_id(old_method_h(), new_jmethod_id); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for old method assigned to %d / result=%d", new_jmethod_id, result); ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } else { ++ jmethodID mid = new_method_h->jmethod_id(); ++ bool result = instanceKlass::cast(new_method_h->method_holder())->update_jmethod_id(new_method_h(), jmid); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for new method assigned to %d / result=%d", jmid, result); ++ ++ } ++ JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); ++ //RC_TRACE(0x00008000, ("changing method associated with jmethod id %d to %s", (int)jmid, new_method_h->name()->as_C_string()); ++ assert(JNIHandles::resolve_jmethod_id(jmid) == (methodOop)_new_methods->obj_at(_matching_new_methods[j]), "should be replaced"); ++ jmethodID mid = ((methodOop)_new_methods->obj_at(_matching_new_methods[j]))->jmethod_id(); ++ assert(JNIHandles::resolve_non_null((jobject)mid) == new_method_h(), "must match!"); ++ ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } ++ } ++} ++ ++ ++// Deoptimize all compiled code that depends on this class. ++// ++// If the can_redefine_classes capability is obtained in the onload ++// phase then the compiler has recorded all dependencies from startup. ++// In that case we need only deoptimize and throw away all compiled code ++// that depends on the class. ++// ++// If can_redefine_classes is obtained sometime after the onload ++// phase then the dependency information may be incomplete. In that case ++// the first call to RedefineClasses causes all compiled code to be ++// thrown away. As can_redefine_classes has been obtained then ++// all future compilations will record dependencies so second and ++// subsequent calls to RedefineClasses need only throw away code ++// that depends on the class. ++// ++void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { ++ assert_locked_or_safepoint(Compile_lock); ++ ++ // All dependencies have been recorded from startup or this is a second or ++ // subsequent use of RedefineClasses ++ ++ // For now deopt all ++ // (tw) TODO: Improve the dependency system such that we can safely deopt only a subset of the methods ++ if (0 && JvmtiExport::all_dependencies_are_recorded()) { ++ Universe::flush_evol_dependents_on(k_h); ++ } else { ++ CodeCache::mark_all_nmethods_for_deoptimization(); ++ ++ ResourceMark rm(THREAD); ++ DeoptimizationMarker dm; ++ ++ // Deoptimize all activations depending on marked nmethods ++ Deoptimization::deoptimize_dependents(); ++ ++ // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) ++ CodeCache::make_marked_nmethods_not_entrant(); ++ ++ // From now on we know that the dependency information is complete ++ JvmtiExport::set_all_dependencies_are_recorded(true); ++ } ++} ++ ++void VM_RedefineClasses::compute_added_deleted_matching_methods() { ++ methodOop old_method; ++ methodOop new_method; ++ ++ _matching_old_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _matching_new_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _added_methods = NEW_RESOURCE_ARRAY(int, _new_methods->length()); ++ _deleted_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ ++ _matching_methods_length = 0; ++ _deleted_methods_length = 0; ++ _added_methods_length = 0; ++ ++ int nj = 0; ++ int oj = 0; ++ while (true) { ++ if (oj >= _old_methods->length()) { ++ if (nj >= _new_methods->length()) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ new_method = (methodOop) _new_methods->obj_at(nj); ++ _added_methods[_added_methods_length++] = nj; ++ ++nj; ++ } else if (nj >= _new_methods->length()) { ++ // Old method, at the end, is deleted ++ old_method = (methodOop) _old_methods->obj_at(oj); ++ _deleted_methods[_deleted_methods_length++] = oj; ++ ++oj; ++ } else { ++ old_method = (methodOop) _old_methods->obj_at(oj); ++ new_method = (methodOop) _new_methods->obj_at(nj); ++ if (old_method->name() == new_method->name()) { ++ if (old_method->signature() == new_method->signature()) { ++ _matching_old_methods[_matching_methods_length ] = oj;//old_method; ++ _matching_new_methods[_matching_methods_length++] = nj;//new_method; ++ ++nj; ++ ++oj; ++ } else { ++ // added overloaded have already been moved to the end, ++ // so this is a deleted overloaded method ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; ++ ++oj; ++ } ++ } else { // names don't match ++ if (old_method->name()->fast_compare(new_method->name()) > 0) { ++ // new method ++ _added_methods[_added_methods_length++] = nj;//new_method; ++ ++nj; ++ } else { ++ // deleted method ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; ++ ++oj; ++ } ++ } ++ } ++ } ++ assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); ++ assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); ++ RC_TRACE(0x00008000, ("Matching methods = %d / deleted methods = %d / added methods = %d", ++ _matching_methods_length, _deleted_methods_length, _added_methods_length)); ++} ++ ++ ++ ++// Install the redefinition of a class: ++// - house keeping (flushing breakpoints and caches, deoptimizing ++// dependent compiled code) ++// - adjusting constant pool caches and vtables in other classes ++void VM_RedefineClasses::redefine_single_class(instanceKlassHandle the_new_class, TRAPS) { ++ ++ ResourceMark rm(THREAD); ++ ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ instanceKlassHandle the_old_class = instanceKlassHandle(THREAD, the_new_class->old_version()); ++ ++#ifndef JVMTI_KERNEL ++ // Remove all breakpoints in methods of this class ++ JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); ++ jvmti_breakpoints.clearall_in_class_at_safepoint(the_old_class()); ++#endif // !JVMTI_KERNEL ++ ++ if (the_old_class() == Universe::reflect_invoke_cache()->klass()) { ++ // We are redefining java.lang.reflect.Method. Method.invoke() is ++ // cached and users of the cache care about each active version of ++ // the method so we have to track this previous version. ++ // Do this before methods get switched ++ Universe::reflect_invoke_cache()->add_previous_version( ++ the_old_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); ++ } ++ ++ _old_methods = the_old_class->methods(); ++ _new_methods = the_new_class->methods(); ++ _the_class_oop = the_old_class(); ++ compute_added_deleted_matching_methods(); ++ ++ // track which methods are EMCP for add_previous_version() call below ++ ++ // (tw) TODO: Check if we need the concept of EMCP? ++ BitMap emcp_methods(_old_methods->length()); ++ int emcp_method_count = 0; ++ emcp_methods.clear(); // clears 0..(length() - 1) ++ ++ // We need to mark methods as old!! ++ check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); ++ update_jmethod_ids(); ++ ++ // keep track of previous versions of this class ++ the_new_class->add_previous_version(the_old_class, &emcp_methods, ++ emcp_method_count); ++ ++ // TODO: ++ transfer_old_native_function_registrations(the_old_class); ++ ++ ++#ifdef ASSERT ++ ++// klassOop systemLookup1 = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++// assert(systemLookup1 == the_new_class(), "New class must be in system dictionary!"); ++ ++ //JNIHandles::verify(); ++ ++// klassOop systemLookup = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++ ++// assert(systemLookup == the_new_class(), "New class must be in system dictionary!"); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ for (int i=0; i<the_new_class->methods()->length(); i++) { ++ assert(((methodOop)the_new_class->methods()->obj_at(i))->method_holder() == the_new_class(), "method holder must match!"); ++ } ++ ++ _old_methods->verify(); ++ _new_methods->verify(); ++ ++ the_new_class->vtable()->verify(tty); ++ the_old_class->vtable()->verify(tty); ++ ++#endif ++ ++ // increment the classRedefinedCount field in the_class and in any ++ // direct and indirect subclasses of the_class ++ increment_class_counter((instanceKlass *)the_old_class()->klass_part(), THREAD); ++ ++} ++ ++ ++void VM_RedefineClasses::check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p) { ++ RC_TRACE(0x00008000, ("Checking matching methods for EMCP")); ++ *emcp_method_count_p = 0; ++ int obsolete_count = 0; ++ int old_index = 0; ++ for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ methodOop new_method = (methodOop)_new_methods->obj_at(_matching_new_methods[j]); ++ methodOop old_array_method; ++ ++ // Maintain an old_index into the _old_methods array by skipping ++ // deleted methods ++ while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) ++ != old_method) { ++ ++old_index; ++ } ++ ++ if (MethodComparator::methods_EMCP(old_method, new_method)) { ++ // The EMCP definition from JSR-163 requires the bytecodes to be ++ // the same with the exception of constant pool indices which may ++ // differ. However, the constants referred to by those indices ++ // must be the same. ++ // ++ // We use methods_EMCP() for comparison since constant pool ++ // merging can remove duplicate constant pool entries that were ++ // present in the old method and removed from the rewritten new ++ // method. A faster binary comparison function would consider the ++ // old and new methods to be different when they are actually ++ // EMCP. ++ ++ // track which methods are EMCP for add_previous_version() call ++ emcp_methods->set_bit(old_index); ++ (*emcp_method_count_p)++; ++ ++ // An EMCP method is _not_ obsolete. An obsolete method has a ++ // different jmethodID than the current method. An EMCP method ++ // has the same jmethodID as the current method. Having the ++ // same jmethodID for all EMCP versions of a method allows for ++ // a consistent view of the EMCP methods regardless of which ++ // EMCP method you happen to have in hand. For example, a ++ // breakpoint set in one EMCP method will work for all EMCP ++ // versions of the method including the current one. ++ ++ old_method->set_new_version(new_method); ++ new_method->set_old_version(old_method); ++ ++ RC_TRACE(0x00008000, ("Found EMCP method %s", old_method->name_and_sig_as_C_string())); ++ ++ // Transfer breakpoints ++ instanceKlass *ik = instanceKlass::cast(old_method->method_holder()); ++ for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = bp->next()) { ++ RC_TRACE(0x00000002, ("Checking breakpoint")); ++ RC_TRACE(0x00000002, ("%d / %d", ++ bp->match(old_method), bp->match(new_method))); ++ if (bp->match(old_method)) { ++ assert(bp->match(new_method), "if old method is method, then new method must match too"); ++ RC_TRACE(0x00000002, ("Found a breakpoint in an old EMCP method")); ++ new_method->set_breakpoint(bp->bci()); ++ } ++ } ++ ++ ++ ++ } else { ++ // mark obsolete methods as such ++ old_method->set_is_obsolete(); ++ obsolete_count++; ++ ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ old_method->set_is_old(); ++ } ++ for (int i = 0; i < _deleted_methods_length; ++i) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_deleted_methods[i]); ++ ++ //assert(old_method->vtable_index() < 0, ++ // "cannot delete methods with vtable entries");; ++ ++ // Mark all deleted methods as old and obsolete ++ old_method->set_is_old(); ++ old_method->set_is_obsolete(); ++ ++obsolete_count; ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark deleted %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ //assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), "sanity check"); ++ RC_TRACE(0x00008000, ("EMCP_cnt=%d, obsolete_cnt=%d !", *emcp_method_count_p, obsolete_count)); ++} ++ ++// Increment the classRedefinedCount field in the specific instanceKlass ++// and in all direct and indirect subclasses. ++void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { ++ oop class_mirror = ik->java_mirror(); ++ klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); ++ int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; ++ java_lang_Class::set_classRedefinedCount(class_mirror, new_count); ++ RC_TRACE(0x00008000, ("updated count for class=%s to %d", ik->external_name(), new_count)); ++} ++ ++#ifndef PRODUCT ++void VM_RedefineClasses::check_class(klassOop k_oop, TRAPS) { + Klass *k = k_oop->klass_part(); + if (k->oop_is_instance()) { + HandleMark hm(THREAD); + instanceKlass *ik = (instanceKlass *) k; +- +- // HotSpot specific optimization! HotSpot does not currently +- // support delegation from the bootstrap class loader to a +- // user-defined class loader. This means that if the bootstrap +- // class loader is the initiating class loader, then it will also +- // be the defining class loader. This also means that classes +- // loaded by the bootstrap class loader cannot refer to classes +- // loaded by a user-defined class loader. Note: a user-defined +- // class loader can delegate to the bootstrap class loader. +- // +- // If the current class being redefined has a user-defined class +- // loader as its defining class loader, then we can skip all +- // classes loaded by the bootstrap class loader. +- bool is_user_defined = +- instanceKlass::cast(_the_class_oop)->class_loader() != NULL; +- if (is_user_defined && ik->class_loader() == NULL) { +- return; +- } +- +- // This is a very busy routine. We don't want too much tracing +- // printed out. +- bool trace_name_printed = false; +- +- // Very noisy: only enable this call if you are trying to determine +- // that a specific class gets found by this routine. +- // RC_TRACE macro has an embedded ResourceMark +- // RC_TRACE_WITH_THREAD(0x00100000, THREAD, +- // ("adjust check: name=%s", ik->external_name())); +- // trace_name_printed = true; +- +- // Fix the vtable embedded in the_class and subclasses of the_class, +- // if one exists. We discard scratch_class and we don't keep an +- // instanceKlass around to hold obsolete methods so we don't have +- // any other instanceKlass embedded vtables to update. The vtable +- // holds the methodOops for virtual (but not final) methods. +- if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { +- // ik->vtable() creates a wrapper object; rm cleans it up ++ assert(ik->is_newest_version(), "must be latest version in system dictionary"); ++ ++ if (ik->vtable_length() > 0) { + ResourceMark rm(THREAD); +- ik->vtable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // If the current class has an itable and we are either redefining an +- // interface or if the current class is a subclass of the_class, then +- // we potentially have to fix the itable. If we are redefining an +- // interface, then we have to call adjust_method_entries() for +- // every instanceKlass that has an itable since there isn't a +- // subclass relationship between an interface and an instanceKlass. +- if (ik->itable_length() > 0 && (Klass::cast(_the_class_oop)->is_interface() +- || ik->is_subclass_of(_the_class_oop))) { +- // ik->itable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->itable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // The constant pools in other classes (other_cp) can refer to +- // methods in the_class. We have to update method information in +- // other_cp's cache. If other_cp has a previous version, then we +- // have to repeat the process for each previous version. The +- // constant pool cache holds the methodOops for non-virtual +- // methods and for virtual, final methods. +- // +- // Special case: if the current class is the_class, then new_cp +- // has already been attached to the_class and old_cp has already +- // been added as a previous version. The new_cp doesn't have any +- // cached references to old methods so it doesn't need to be +- // updated. We can simply start with the previous version(s) in +- // that case. +- constantPoolHandle other_cp; +- constantPoolCacheOop cp_cache; +- +- if (k_oop != _the_class_oop) { +- // this klass' constant pool cache may need adjustment +- other_cp = constantPoolHandle(ik->constants()); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); ++ if (!ik->vtable()->check_no_old_entries()) { ++ RC_TRACE(0x00000001, ("size of class: %d\n", ++ k_oop->size())); ++ RC_TRACE(0x00000001, ("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ++ ik->signature_name())); ++ assert(false, "OLD method found"); + } +- } +- { +- ResourceMark rm(THREAD); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // the previous versions' constant pool caches may need adjustment +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- other_cp = pv_info->prev_constant_pool_handle(); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up +- } +-} +- +-void VM_RedefineClasses::update_jmethod_ids() { +- for (int j = 0; j < _matching_methods_length; ++j) { +- methodOop old_method = _matching_old_methods[j]; +- jmethodID jmid = old_method->find_jmethod_id_or_null(); +- if (jmid != NULL) { +- // There is a jmethodID, change it to point to the new method +- methodHandle new_method_h(_matching_new_methods[j]); +- JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); +- assert(JNIHandles::resolve_jmethod_id(jmid) == _matching_new_methods[j], +- "should be replaced"); ++ ++ ik->vtable()->verify(tty, true); + } + } + } + +-void VM_RedefineClasses::check_methods_and_mark_as_obsolete( +- BitMap *emcp_methods, int * emcp_method_count_p) { +- *emcp_method_count_p = 0; +- int obsolete_count = 0; +- int old_index = 0; +- for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { +- methodOop old_method = _matching_old_methods[j]; +- methodOop new_method = _matching_new_methods[j]; +- methodOop old_array_method; +- +- // Maintain an old_index into the _old_methods array by skipping +- // deleted methods +- while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) +- != old_method) { +- ++old_index; ++#endif ++ ++VM_RedefineClasses::FindAffectedKlassesClosure::FindAffectedKlassesClosure( GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result ) ++{ ++ assert(original_klasses != NULL && result != NULL, ""); ++ this->_original_klasses = original_klasses; ++ this->_result = result; ++ SystemDictionary::classes_do(this); ++} ++ ++void VM_RedefineClasses::FindAffectedKlassesClosure::do_object( oop obj ) ++{ ++ klassOop klass = (klassOop)obj; ++ assert(!_result->contains(klass), "must not occur more than once!"); ++ assert(klass->klass_part()->new_version() == NULL, "Only last version is valid entry in system dictionary"); ++ ++ for(int i=0; i<_original_klasses->length(); i++) { ++ instanceKlassHandle cur = _original_klasses->at(i); ++ if (cur() != klass && klass->klass_part()->is_subtype_of(cur()) && !_original_klasses->contains(klass)) { ++ RC_TRACE(0x00008000, ("Found affected class: %s", klass->klass_part()->name()->as_C_string())); ++ _result->append(klass); ++ break; + } +- +- if (MethodComparator::methods_EMCP(old_method, new_method)) { +- // The EMCP definition from JSR-163 requires the bytecodes to be +- // the same with the exception of constant pool indices which may +- // differ. However, the constants referred to by those indices +- // must be the same. +- // +- // We use methods_EMCP() for comparison since constant pool +- // merging can remove duplicate constant pool entries that were +- // present in the old method and removed from the rewritten new +- // method. A faster binary comparison function would consider the +- // old and new methods to be different when they are actually +- // EMCP. +- // +- // The old and new methods are EMCP and you would think that we +- // could get rid of one of them here and now and save some space. +- // However, the concept of EMCP only considers the bytecodes and +- // the constant pool entries in the comparison. Other things, +- // e.g., the line number table (LNT) or the local variable table +- // (LVT) don't count in the comparison. So the new (and EMCP) +- // method can have a new LNT that we need so we can't just +- // overwrite the new method with the old method. +- // +- // When this routine is called, we have already attached the new +- // methods to the_class so the old methods are effectively +- // overwritten. However, if an old method is still executing, +- // then the old method cannot be collected until sometime after +- // the old method call has returned. So the overwriting of old +- // methods by new methods will save us space except for those +- // (hopefully few) old methods that are still executing. +- // +- // A method refers to a constMethodOop and this presents another +- // possible avenue to space savings. The constMethodOop in the +- // new method contains possibly new attributes (LNT, LVT, etc). +- // At first glance, it seems possible to save space by replacing +- // the constMethodOop in the old method with the constMethodOop +- // from the new method. The old and new methods would share the +- // same constMethodOop and we would save the space occupied by +- // the old constMethodOop. However, the constMethodOop contains +- // a back reference to the containing method. Sharing the +- // constMethodOop between two methods could lead to confusion in +- // the code that uses the back reference. This would lead to +- // brittle code that could be broken in non-obvious ways now or +- // in the future. +- // +- // Another possibility is to copy the constMethodOop from the new +- // method to the old method and then overwrite the new method with +- // the old method. Since the constMethodOop contains the bytecodes +- // for the method embedded in the oop, this option would change +- // the bytecodes out from under any threads executing the old +- // method and make the thread's bcp invalid. Since EMCP requires +- // that the bytecodes be the same modulo constant pool indices, it +- // is straight forward to compute the correct new bcp in the new +- // constMethodOop from the old bcp in the old constMethodOop. The +- // time consuming part would be searching all the frames in all +- // of the threads to find all of the calls to the old method. +- // +- // It looks like we will have to live with the limited savings +- // that we get from effectively overwriting the old methods +- // when the new methods are attached to the_class. +- +- // track which methods are EMCP for add_previous_version() call +- emcp_methods->set_bit(old_index); +- (*emcp_method_count_p)++; +- +- // An EMCP method is _not_ obsolete. An obsolete method has a +- // different jmethodID than the current method. An EMCP method +- // has the same jmethodID as the current method. Having the +- // same jmethodID for all EMCP versions of a method allows for +- // a consistent view of the EMCP methods regardless of which +- // EMCP method you happen to have in hand. For example, a +- // breakpoint set in one EMCP method will work for all EMCP +- // versions of the method including the current one. +- } else { +- // mark obsolete methods as such +- old_method->set_is_obsolete(); +- obsolete_count++; +- +- // obsolete methods need a unique idnum +- u2 num = instanceKlass::cast(_the_class_oop)->next_method_idnum(); +- if (num != constMethodOopDesc::UNSET_IDNUM) { +-// u2 old_num = old_method->method_idnum(); +- old_method->set_method_idnum(num); +-// TO DO: attach obsolete annotations to obsolete method's new idnum ++ } ++} ++ ++jvmtiError VM_RedefineClasses::do_topological_class_sorting( const jvmtiClassDefinition *class_defs, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS) ++{ ++ GrowableArray< Pair<klassOop, klassOop> > *links = new GrowableArray< Pair<klassOop, klassOop> >(); ++ ++ for (int i=0; i<class_count; i++) { ++ ++ oop mirror = JNIHandles::resolve_non_null(class_defs[i].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ instanceKlassHandle the_class(THREAD, the_class_oop); ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ++ ClassFileStream st((u1*) class_defs[i].class_bytes, ++ class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ ClassFileParser cfp(&st); ++ ++ GrowableArray<Symbol*> symbolArr; ++ RC_TRACE(0x00000002, ("Before find super symbols of class %s", ++ the_class->name()->as_C_string())); ++ cfp.findSuperSymbols(the_class->name(), the_class_loader, protection_domain, the_class, symbolArr, THREAD); ++ ++ for (int j=0; j<symbolArr.length(); j++) { ++ Symbol* sym = symbolArr.at(j); ++ ++ RC_TRACE(0x00008000, ("Before adding link to super class %s", sym->as_C_string())); ++ ++ for (int k=0; k<arr->length(); k++) { ++ klassOop curOop = arr->at(k)(); ++ // (tw) TODO: Check if we get aliasing problems with different class loaders? ++ if (curOop->klass_part()->name() == sym /*&& curOop->klass_part()->class_loader() == the_class_loader()*/) { ++ RC_TRACE(0x00000002, ("Found class to link")); ++ links->append(Pair<klassOop, klassOop>(curOop, the_class())); ++ break; ++ } + } +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); + } +- old_method->set_is_old(); + } +- for (int i = 0; i < _deleted_methods_length; ++i) { +- methodOop old_method = _deleted_methods[i]; +- +- assert(old_method->vtable_index() < 0, +- "cannot delete methods with vtable entries");; +- +- // Mark all deleted methods as old and obsolete +- old_method->set_is_old(); +- old_method->set_is_obsolete(); +- ++obsolete_count; +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark deleted %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); ++ ++ ++ RC_TRACE(0x00000001, ("Identified links between classes! ")); ++ ++ for (int i=0; i<affected->length(); i++) { ++ ++ instanceKlassHandle klass = affected->at(i); ++ ++ klassOop superKlass = klass->super(); ++ if (affected->contains(superKlass)) { ++ links->append(Pair<klassOop, klassOop>(superKlass, klass())); ++ } ++ ++ objArrayOop superInterfaces = klass->local_interfaces(); ++ for (int j=0; j<superInterfaces->length(); j++) { ++ klassOop interfaceKlass = (klassOop)superInterfaces->obj_at(j); ++ if (arr->contains(interfaceKlass)) { ++ links->append(Pair<klassOop, klassOop>(interfaceKlass, klass())); ++ } ++ } + } +- assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), +- "sanity check"); +- RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p, +- obsolete_count)); ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Identified links: ")); ++ for (int i=0; i<links->length(); i++) { ++ RC_TRACE(0x00000002, ("%s to %s", ++ links->at(i).left()->klass_part()->name()->as_C_string(), ++ links->at(i).right()->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++ for (int i=0; i<arr->length(); i++) { ++ ++ int j; ++ for (j=i; j<arr->length(); j++) { ++ ++ int k; ++ for (k=0; k<links->length(); k++) { ++ ++ klassOop k1 = links->adr_at(k)->right(); ++ klassOop k2 = arr->at(j)(); ++ if (k1 == k2) { ++ break; ++ } ++ } ++ ++ if (k == links->length()) { ++ break; ++ } ++ } ++ ++ if (j == arr->length()) { ++ // circle detected ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } ++ ++ for (int k=0; k<links->length(); k++) { ++ if (links->adr_at(k)->left() == arr->at(j)()) { ++ links->at_put(k, links->at(links->length() - 1)); ++ links->remove_at(links->length() - 1); ++ k--; ++ } ++ } ++ ++ instanceKlassHandle tmp = arr->at(j); ++ arr->at_put(j, arr->at(i)); ++ arr->at_put(i, tmp); ++ } ++ ++ return JVMTI_ERROR_NONE; + } + ++void VM_RedefineClasses::oops_do(OopClosure *closure) { ++ ++ if (_updated_oops != NULL) { ++ for (int i=0; i<_updated_oops->length(); i++) { ++ closure->do_oop(_updated_oops->adr_at(i)); ++ } ++ } ++} ++ ++void VM_RedefineClasses::transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to) { ++ to->set_is_field_modification_watched(from->is_field_modification_watched()); ++ to->set_is_field_access_watched(from->is_field_access_watched()); ++ if (from->is_field_modification_watched() || from->is_field_access_watched()) { ++ RC_TRACE(0x00000002, ("Transfered watch for field %s", ++ from->name()->as_C_string())); ++ } ++ update_klass_field_access_flag(to); ++} ++ ++void VM_RedefineClasses::update_klass_field_access_flag(fieldDescriptor *fd) { ++ instanceKlass* ik = instanceKlass::cast(fd->field_holder()); ++ FieldInfo* fi = FieldInfo::from_field_array(ik->fields(), fd->index()); ++ fi->set_access_flags(fd->access_flags().as_short()); ++} ++ ++ + // This internal class transfers the native function registration from old methods + // to new methods. It is designed to handle both the simple case of unchanged + // native methods and the complex cases of native method prefixes being added and/or +@@ -2842,7 +3167,7 @@ + // Same, caused by prefix removal only 3_2_1_m -> 3_2_m + // + class TransferNativeFunctionRegistration { +- private: ++private: + instanceKlassHandle the_class; + int prefix_count; + char** prefixes; +@@ -2855,42 +3180,42 @@ + // (2) with the prefix. + // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) + methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, +- Symbol* signature) { +- TempNewSymbol name_symbol = SymbolTable::probe(name_str, (int)name_len); +- if (name_symbol != NULL) { +- methodOop method = Klass::cast(the_class())->lookup_method(name_symbol, signature); +- if (method != NULL) { +- // Even if prefixed, intermediate methods must exist. +- if (method->is_native()) { +- // Wahoo, we found a (possibly prefixed) version of the method, return it. +- return method; +- } +- if (depth < prefix_count) { +- // Try applying further prefixes (other than this one). +- method = search_prefix_name_space(depth+1, name_str, name_len, signature); +- if (method != NULL) { +- return method; // found ++ Symbol* signature) { ++ Symbol* name_symbol = SymbolTable::probe(name_str, (int)name_len); ++ if (name_symbol != NULL) { ++ methodOop method = Klass::cast(the_class()->klass_part()->new_version())->lookup_method(name_symbol, signature); ++ if (method != NULL) { ++ // Even if prefixed, intermediate methods must exist. ++ if (method->is_native()) { ++ // Wahoo, we found a (possibly prefixed) version of the method, return it. ++ return method; + } +- +- // Try adding this prefix to the method name and see if it matches +- // another method name. +- char* prefix = prefixes[depth]; +- size_t prefix_len = strlen(prefix); +- size_t trial_len = name_len + prefix_len; +- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); +- strcpy(trial_name_str, prefix); +- strcat(trial_name_str, name_str); +- method = search_prefix_name_space(depth+1, trial_name_str, trial_len, +- signature); +- if (method != NULL) { +- // If found along this branch, it was prefixed, mark as such +- method->set_is_prefixed_native(); +- return method; // found ++ if (depth < prefix_count) { ++ // Try applying further prefixes (other than this one). ++ method = search_prefix_name_space(depth+1, name_str, name_len, signature); ++ if (method != NULL) { ++ return method; // found ++ } ++ ++ // Try adding this prefix to the method name and see if it matches ++ // another method name. ++ char* prefix = prefixes[depth]; ++ size_t prefix_len = strlen(prefix); ++ size_t trial_len = name_len + prefix_len; ++ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); ++ strcpy(trial_name_str, prefix); ++ strcat(trial_name_str, name_str); ++ method = search_prefix_name_space(depth+1, trial_name_str, trial_len, ++ signature); ++ if (method != NULL) { ++ // If found along this branch, it was prefixed, mark as such ++ method->set_is_prefixed_native(); ++ return method; // found ++ } + } + } + } +- } +- return NULL; // This whole branch bore nothing ++ return NULL; // This whole branch bore nothing + } + + // Return the method name with old prefixes stripped away. +@@ -2915,10 +3240,10 @@ + ResourceMark rm; + char* name_str = method_name_without_prefixes(method); + return search_prefix_name_space(0, name_str, strlen(name_str), +- method->signature()); ++ method->signature()); + } + +- public: ++public: + + // Construct a native method transfer processor for this class. + TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { +@@ -2929,9 +3254,9 @@ + } + + // Attempt to transfer any of the old or deleted methods that are native +- void transfer_registrations(methodOop* old_methods, int methods_length) { ++ void transfer_registrations(instanceKlassHandle old_klass, int* old_methods, int methods_length) { + for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; ++ methodOop old_method = (methodOop)old_klass->methods()->obj_at(old_methods[j]); + + if (old_method->is_native() && old_method->has_native_function()) { + methodOop new_method = strip_and_search_for_new_native(old_method); +@@ -2940,7 +3265,9 @@ + // Redefine does not send events (except CFLH), certainly not this + // behind the scenes re-registration. + new_method->set_native_function(old_method->native_function(), +- !methodOopDesc::native_bind_event_is_interesting); ++ !methodOopDesc::native_bind_event_is_interesting); ++ ++ RC_TRACE(0x00008000, ("Transfering native function for method %s", old_method->name()->as_C_string())); + } + } + } +@@ -2948,494 +3275,8 @@ + }; + + // Don't lose the association between a native method and its JNI function. +-void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle the_class) { +- TransferNativeFunctionRegistration transfer(the_class); +- transfer.transfer_registrations(_deleted_methods, _deleted_methods_length); +- transfer.transfer_registrations(_matching_old_methods, _matching_methods_length); ++void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle old_klass) { ++ TransferNativeFunctionRegistration transfer(old_klass); ++ transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); ++ transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); + } +- +-// Deoptimize all compiled code that depends on this class. +-// +-// If the can_redefine_classes capability is obtained in the onload +-// phase then the compiler has recorded all dependencies from startup. +-// In that case we need only deoptimize and throw away all compiled code +-// that depends on the class. +-// +-// If can_redefine_classes is obtained sometime after the onload +-// phase then the dependency information may be incomplete. In that case +-// the first call to RedefineClasses causes all compiled code to be +-// thrown away. As can_redefine_classes has been obtained then +-// all future compilations will record dependencies so second and +-// subsequent calls to RedefineClasses need only throw away code +-// that depends on the class. +-// +-void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { +- assert_locked_or_safepoint(Compile_lock); +- +- // All dependencies have been recorded from startup or this is a second or +- // subsequent use of RedefineClasses +- if (JvmtiExport::all_dependencies_are_recorded()) { +- Universe::flush_evol_dependents_on(k_h); +- } else { +- CodeCache::mark_all_nmethods_for_deoptimization(); +- +- ResourceMark rm(THREAD); +- DeoptimizationMarker dm; +- +- // Deoptimize all activations depending on marked nmethods +- Deoptimization::deoptimize_dependents(); +- +- // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) +- CodeCache::make_marked_nmethods_not_entrant(); +- +- // From now on we know that the dependency information is complete +- JvmtiExport::set_all_dependencies_are_recorded(true); +- } +-} +- +-void VM_RedefineClasses::compute_added_deleted_matching_methods() { +- methodOop old_method; +- methodOop new_method; +- +- _matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _added_methods = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length()); +- _deleted_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- +- _matching_methods_length = 0; +- _deleted_methods_length = 0; +- _added_methods_length = 0; +- +- int nj = 0; +- int oj = 0; +- while (true) { +- if (oj >= _old_methods->length()) { +- if (nj >= _new_methods->length()) { +- break; // we've looked at everything, done +- } +- // New method at the end +- new_method = (methodOop) _new_methods->obj_at(nj); +- _added_methods[_added_methods_length++] = new_method; +- ++nj; +- } else if (nj >= _new_methods->length()) { +- // Old method, at the end, is deleted +- old_method = (methodOop) _old_methods->obj_at(oj); +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } else { +- old_method = (methodOop) _old_methods->obj_at(oj); +- new_method = (methodOop) _new_methods->obj_at(nj); +- if (old_method->name() == new_method->name()) { +- if (old_method->signature() == new_method->signature()) { +- _matching_old_methods[_matching_methods_length ] = old_method; +- _matching_new_methods[_matching_methods_length++] = new_method; +- ++nj; +- ++oj; +- } else { +- // added overloaded have already been moved to the end, +- // so this is a deleted overloaded method +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } +- } else { // names don't match +- if (old_method->name()->fast_compare(new_method->name()) > 0) { +- // new method +- _added_methods[_added_methods_length++] = new_method; +- ++nj; +- } else { +- // deleted method +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } +- } +- } +- } +- assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); +- assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); +-} +- +- +- +-// Install the redefinition of a class: +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding a weak reference to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-void VM_RedefineClasses::redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS) { +- +- RC_TIMER_START(_timer_rsc_phase1); +- +- oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass); +- klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- +-#ifndef JVMTI_KERNEL +- // Remove all breakpoints in methods of this class +- JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); +- jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); +-#endif // !JVMTI_KERNEL +- +- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) { +- // We are redefining java.lang.reflect.Method. Method.invoke() is +- // cached and users of the cache care about each active version of +- // the method so we have to track this previous version. +- // Do this before methods get switched +- Universe::reflect_invoke_cache()->add_previous_version( +- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); +- } +- +- // Deoptimize all compiled code that depends on this class +- flush_dependent_code(the_class, THREAD); +- +- _old_methods = the_class->methods(); +- _new_methods = scratch_class->methods(); +- _the_class_oop = the_class_oop; +- compute_added_deleted_matching_methods(); +- update_jmethod_ids(); +- +- // Attach new constant pool to the original klass. The original +- // klass still refers to the old constant pool (for now). +- scratch_class->constants()->set_pool_holder(the_class()); +- +-#if 0 +- // In theory, with constant pool merging in place we should be able +- // to save space by using the new, merged constant pool in place of +- // the old constant pool(s). By "pool(s)" I mean the constant pool in +- // the klass version we are replacing now and any constant pool(s) in +- // previous versions of klass. Nice theory, doesn't work in practice. +- // When this code is enabled, even simple programs throw NullPointer +- // exceptions. I'm guessing that this is caused by some constant pool +- // cache difference between the new, merged constant pool and the +- // constant pool that was just being used by the klass. I'm keeping +- // this code around to archive the idea, but the code has to remain +- // disabled for now. +- +- // Attach each old method to the new constant pool. This can be +- // done here since we are past the bytecode verification and +- // constant pool optimization phases. +- for (int i = _old_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)_old_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- +- { +- // walk all previous versions of the klass +- instanceKlass *ik = (instanceKlass *)the_class()->klass_part(); +- PreviousVersionWalker pvw(ik); +- instanceKlassHandle ikh; +- do { +- ikh = pvw.next_previous_version(); +- if (!ikh.is_null()) { +- ik = ikh(); +- +- // attach previous version of klass to the new constant pool +- ik->set_constants(scratch_class->constants()); +- +- // Attach each method in the previous version of klass to the +- // new constant pool +- objArrayOop prev_methods = ik->methods(); +- for (int i = prev_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)prev_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- } +- } while (!ikh.is_null()); +- } +-#endif +- +- // Replace methods and constantpool +- the_class->set_methods(_new_methods); +- scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods, +- // and to be able to undo operation easily. +- +- constantPoolOop old_constants = the_class->constants(); +- the_class->set_constants(scratch_class->constants()); +- scratch_class->set_constants(old_constants); // See the previous comment. +-#if 0 +- // We are swapping the guts of "the new class" with the guts of "the +- // class". Since the old constant pool has just been attached to "the +- // new class", it seems logical to set the pool holder in the old +- // constant pool also. However, doing this will change the observable +- // class hierarchy for any old methods that are still executing. A +- // method can query the identity of its "holder" and this query uses +- // the method's constant pool link to find the holder. The change in +- // holding class from "the class" to "the new class" can confuse +- // things. +- // +- // Setting the old constant pool's holder will also cause +- // verification done during vtable initialization below to fail. +- // During vtable initialization, the vtable's class is verified to be +- // a subtype of the method's holder. The vtable's class is "the +- // class" and the method's holder is gotten from the constant pool +- // link in the method itself. For "the class"'s directly implemented +- // methods, the method holder is "the class" itself (as gotten from +- // the new constant pool). The check works fine in this case. The +- // check also works fine for methods inherited from super classes. +- // +- // Miranda methods are a little more complicated. A miranda method is +- // provided by an interface when the class implementing the interface +- // does not provide its own method. These interfaces are implemented +- // internally as an instanceKlass. These special instanceKlasses +- // share the constant pool of the class that "implements" the +- // interface. By sharing the constant pool, the method holder of a +- // miranda method is the class that "implements" the interface. In a +- // non-redefine situation, the subtype check works fine. However, if +- // the old constant pool's pool holder is modified, then the check +- // fails because there is no class hierarchy relationship between the +- // vtable's class and "the new class". +- +- old_constants->set_pool_holder(scratch_class()); +-#endif +- +- // track which methods are EMCP for add_previous_version() call below +- BitMap emcp_methods(_old_methods->length()); +- int emcp_method_count = 0; +- emcp_methods.clear(); // clears 0..(length() - 1) +- check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); +- transfer_old_native_function_registrations(the_class); +- +- // The class file bytes from before any retransformable agents mucked +- // with them was cached on the scratch class, move to the_class. +- // Note: we still want to do this if nothing needed caching since it +- // should get cleared in the_class too. +- if (the_class->get_cached_class_file_bytes() == 0) { +- // the_class doesn't have a cache yet so copy it +- the_class->set_cached_class_file( +- scratch_class->get_cached_class_file_bytes(), +- scratch_class->get_cached_class_file_len()); +- } +-#ifndef PRODUCT +- else { +- assert(the_class->get_cached_class_file_bytes() == +- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match"); +- assert(the_class->get_cached_class_file_len() == +- scratch_class->get_cached_class_file_len(), "cache lens must match"); +- } +-#endif +- +- // Replace inner_classes +- typeArrayOop old_inner_classes = the_class->inner_classes(); +- the_class->set_inner_classes(scratch_class->inner_classes()); +- scratch_class->set_inner_classes(old_inner_classes); +- +- // Initialize the vtable and interface table after +- // methods have been rewritten +- { +- ResourceMark rm(THREAD); +- // no exception should happen here since we explicitly +- // do not check loader constraints. +- // compare_and_normalize_class_versions has already checked: +- // - classloaders unchanged, signatures unchanged +- // - all instanceKlasses for redefined classes reused & contents updated +- the_class->vtable()->initialize_vtable(false, THREAD); +- the_class->itable()->initialize_itable(false, THREAD); +- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception"); +- } +- +- // Leave arrays of jmethodIDs and itable index cache unchanged +- +- // Copy the "source file name" attribute from new class version +- the_class->set_source_file_name(scratch_class->source_file_name()); +- +- // Copy the "source debug extension" attribute from new class version +- the_class->set_source_debug_extension( +- scratch_class->source_debug_extension()); +- +- // Use of javac -g could be different in the old and the new +- if (scratch_class->access_flags().has_localvariable_table() != +- the_class->access_flags().has_localvariable_table()) { +- +- AccessFlags flags = the_class->access_flags(); +- if (scratch_class->access_flags().has_localvariable_table()) { +- flags.set_has_localvariable_table(); +- } else { +- flags.clear_has_localvariable_table(); +- } +- the_class->set_access_flags(flags); +- } +- +- // Replace class annotation fields values +- typeArrayOop old_class_annotations = the_class->class_annotations(); +- the_class->set_class_annotations(scratch_class->class_annotations()); +- scratch_class->set_class_annotations(old_class_annotations); +- +- // Replace fields annotation fields values +- objArrayOop old_fields_annotations = the_class->fields_annotations(); +- the_class->set_fields_annotations(scratch_class->fields_annotations()); +- scratch_class->set_fields_annotations(old_fields_annotations); +- +- // Replace methods annotation fields values +- objArrayOop old_methods_annotations = the_class->methods_annotations(); +- the_class->set_methods_annotations(scratch_class->methods_annotations()); +- scratch_class->set_methods_annotations(old_methods_annotations); +- +- // Replace methods parameter annotation fields values +- objArrayOop old_methods_parameter_annotations = +- the_class->methods_parameter_annotations(); +- the_class->set_methods_parameter_annotations( +- scratch_class->methods_parameter_annotations()); +- scratch_class->set_methods_parameter_annotations(old_methods_parameter_annotations); +- +- // Replace methods default annotation fields values +- objArrayOop old_methods_default_annotations = +- the_class->methods_default_annotations(); +- the_class->set_methods_default_annotations( +- scratch_class->methods_default_annotations()); +- scratch_class->set_methods_default_annotations(old_methods_default_annotations); +- +- // Replace minor version number of class file +- u2 old_minor_version = the_class->minor_version(); +- the_class->set_minor_version(scratch_class->minor_version()); +- scratch_class->set_minor_version(old_minor_version); +- +- // Replace major version number of class file +- u2 old_major_version = the_class->major_version(); +- the_class->set_major_version(scratch_class->major_version()); +- scratch_class->set_major_version(old_major_version); +- +- // Replace CP indexes for class and name+type of enclosing method +- u2 old_class_idx = the_class->enclosing_method_class_index(); +- u2 old_method_idx = the_class->enclosing_method_method_index(); +- the_class->set_enclosing_method_indices( +- scratch_class->enclosing_method_class_index(), +- scratch_class->enclosing_method_method_index()); +- scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx); +- +- // keep track of previous versions of this class +- the_class->add_previous_version(scratch_class, &emcp_methods, +- emcp_method_count); +- +- RC_TIMER_STOP(_timer_rsc_phase1); +- RC_TIMER_START(_timer_rsc_phase2); +- +- // Adjust constantpool caches and vtables for all classes +- // that reference methods of the evolved class. +- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); +- +- if (the_class->oop_map_cache() != NULL) { +- // Flush references to any obsolete methods from the oop map cache +- // so that obsolete methods are not pinned. +- the_class->oop_map_cache()->flush_obsolete_entries(); +- } +- +- // increment the classRedefinedCount field in the_class and in any +- // direct and indirect subclasses of the_class +- increment_class_counter((instanceKlass *)the_class()->klass_part(), THREAD); +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), +- java_lang_Class::classRedefinedCount(the_class_mirror), +- os::available_memory() >> 10)); +- +- RC_TIMER_STOP(_timer_rsc_phase2); +-} // end redefine_single_class() +- +- +-// Increment the classRedefinedCount field in the specific instanceKlass +-// and in all direct and indirect subclasses. +-void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { +- oop class_mirror = ik->java_mirror(); +- klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); +- int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; +- java_lang_Class::set_classRedefinedCount(class_mirror, new_count); +- +- if (class_oop != _the_class_oop) { +- // _the_class_oop count is printed at end of redefine_single_class() +- RC_TRACE_WITH_THREAD(0x00000008, THREAD, +- ("updated count in subclass=%s to %d", ik->external_name(), new_count)); +- } +- +- for (Klass *subk = ik->subklass(); subk != NULL; +- subk = subk->next_sibling()) { +- if (subk->oop_is_instance()) { +- // Only update instanceKlasses +- instanceKlass *subik = (instanceKlass*)subk; +- // recursively do subclasses of the current subclass +- increment_class_counter(subik, THREAD); +- } +- } +-} +- +-#ifndef PRODUCT +-void VM_RedefineClasses::check_class(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; +- +- if (ik->vtable_length() > 0) { +- ResourceMark rm(THREAD); +- if (!ik->vtable()->check_no_old_entries()) { +- tty->print_cr("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ik->signature_name()); +- ik->vtable()->dump_vtable(); +- dump_methods(); +- assert(false, "OLD method found"); +- } +- } +- } +-} +- +-void VM_RedefineClasses::dump_methods() { +- int j; +- tty->print_cr("_old_methods --"); +- for (j = 0; j < _old_methods->length(); ++j) { +- methodOop m = (methodOop) _old_methods->obj_at(j); +- tty->print("%4d (%5d) ", j, m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- tty->print_cr("_new_methods --"); +- for (j = 0; j < _new_methods->length(); ++j) { +- methodOop m = (methodOop) _new_methods->obj_at(j); +- tty->print("%4d (%5d) ", j, m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- tty->print_cr("_matching_(old/new)_methods --"); +- for (j = 0; j < _matching_methods_length; ++j) { +- methodOop m = _matching_old_methods[j]; +- tty->print("%4d (%5d) ", j, m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- m = _matching_new_methods[j]; +- tty->print(" (%5d) ", m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->cr(); +- } +- tty->print_cr("_deleted_methods --"); +- for (j = 0; j < _deleted_methods_length; ++j) { +- methodOop m = _deleted_methods[j]; +- tty->print("%4d (%5d) ", j, m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- tty->print_cr("_added_methods --"); +- for (j = 0; j < _added_methods_length; ++j) { +- methodOop m = _added_methods[j]; +- tty->print("%4d (%5d) ", j, m->vtable_index()); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +-} +-#endif +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.hpp b/src/share/vm/prims/jvmtiRedefineClasses.hpp +--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp +@@ -1,26 +1,29 @@ + /* +- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ ++* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. ++* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++* ++* This code is free software; you can redistribute it and/or modify it ++* under the terms of the GNU General Public License version 2 only, as ++* published by the Free Software Foundation. ++* ++* This code is distributed in the hope that it will be useful, but WITHOUT ++* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++* version 2 for more details (a copy is included in the LICENSE file that ++* accompanied this code). ++* ++* You should have received a copy of the GNU General Public License version ++* 2 along with this work; if not, write to the Free Software Foundation, ++* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++* ++* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++* or visit www.oracle.com if you need additional information or have any ++* questions. ++* ++*/ ++ ++// New version that allows arbitrary changes to already loaded classes. ++// Modifications done by: Thomas Wuerthinger <thomas.wuerthinger@gmail.com> + + #ifndef SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP + #define SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP +@@ -32,331 +35,28 @@ + #include "oops/objArrayOop.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" + #include "runtime/vm_operations.hpp" ++#include "gc_implementation/shared/vmGCOperations.hpp" + +-// Introduction: +-// +-// The RedefineClasses() API is used to change the definition of one or +-// more classes. While the API supports redefining more than one class +-// in a single call, in general, the API is discussed in the context of +-// changing the definition of a single current class to a single new +-// class. For clarity, the current class is will always be called +-// "the_class" and the new class will always be called "scratch_class". +-// +-// The name "the_class" is used because there is only one structure +-// that represents a specific class; redefinition does not replace the +-// structure, but instead replaces parts of the structure. The name +-// "scratch_class" is used because the structure that represents the +-// new definition of a specific class is simply used to carry around +-// the parts of the new definition until they are used to replace the +-// appropriate parts in the_class. Once redefinition of a class is +-// complete, scratch_class is thrown away. +-// +-// +-// Implementation Overview: +-// +-// The RedefineClasses() API is mostly a wrapper around the VM op that +-// does the real work. The work is split in varying degrees between +-// doit_prologue(), doit() and doit_epilogue(). +-// +-// 1) doit_prologue() is called by the JavaThread on the way to a +-// safepoint. It does parameter verification and loads scratch_class +-// which involves: +-// - parsing the incoming class definition using the_class' class +-// loader and security context +-// - linking scratch_class +-// - merging constant pools and rewriting bytecodes as needed +-// for the merged constant pool +-// - verifying the bytecodes in scratch_class +-// - setting up the constant pool cache and rewriting bytecodes +-// as needed to use the cache +-// - finally, scratch_class is compared to the_class to verify +-// that it is a valid replacement class +-// - if everything is good, then scratch_class is saved in an +-// instance field in the VM operation for the doit() call +-// +-// Note: A JavaThread must do the above work. +-// +-// 2) doit() is called by the VMThread during a safepoint. It installs +-// the new class definition(s) which involves: +-// - retrieving the scratch_class from the instance field in the +-// VM operation +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding weak reference(s) to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-// - telling the SystemDictionary to notice our changes +-// +-// Note: the above work must be done by the VMThread to be safe. +-// +-// 3) doit_epilogue() is called by the JavaThread after the VM op +-// is finished and the safepoint is done. It simply cleans up +-// memory allocated in doit_prologue() and used in doit(). +-// +-// +-// Constant Pool Details: +-// +-// When the_class is redefined, we cannot just replace the constant +-// pool in the_class with the constant pool from scratch_class because +-// that could confuse obsolete methods that may still be running. +-// Instead, the constant pool from the_class, old_cp, is merged with +-// the constant pool from scratch_class, scratch_cp. The resulting +-// constant pool, merge_cp, replaces old_cp in the_class. +-// +-// The key part of any merging algorithm is the entry comparison +-// function so we have to know the types of entries in a constant pool +-// in order to merge two of them together. Constant pools can contain +-// up to 12 different kinds of entries; the JVM_CONSTANT_Unicode entry +-// is not presently used so we only have to worry about the other 11 +-// entry types. For the purposes of constant pool merging, it is +-// helpful to know that the 11 entry types fall into 3 different +-// subtypes: "direct", "indirect" and "double-indirect". +-// +-// Direct CP entries contain data and do not contain references to +-// other CP entries. The following are direct CP entries: +-// JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} +-// +-// Indirect CP entries contain 1 or 2 references to a direct CP entry +-// and no other data. The following are indirect CP entries: +-// JVM_CONSTANT_{Class,NameAndType,String} +-// +-// Double-indirect CP entries contain two references to indirect CP +-// entries and no other data. The following are double-indirect CP +-// entries: +-// JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} +-// +-// When comparing entries between two constant pools, the entry types +-// are compared first and if they match, then further comparisons are +-// made depending on the entry subtype. Comparing direct CP entries is +-// simply a matter of comparing the data associated with each entry. +-// Comparing both indirect and double-indirect CP entries requires +-// recursion. +-// +-// Fortunately, the recursive combinations are limited because indirect +-// CP entries can only refer to direct CP entries and double-indirect +-// CP entries can only refer to indirect CP entries. The following is +-// an example illustration of the deepest set of indirections needed to +-// access the data associated with a JVM_CONSTANT_Fieldref entry: +-// +-// JVM_CONSTANT_Fieldref { +-// class_index => JVM_CONSTANT_Class { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-1> +-// } +-// } +-// name_and_type_index => JVM_CONSTANT_NameAndType { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-2> +-// } +-// descriptor_index => JVM_CONSTANT_Utf8 { +-// <data-3> +-// } +-// } +-// } +-// +-// The above illustration is not a data structure definition for any +-// computer language. The curly braces ('{' and '}') are meant to +-// delimit the context of the "fields" in the CP entry types shown. +-// Each indirection from the JVM_CONSTANT_Fieldref entry is shown via +-// "=>", e.g., the class_index is used to indirectly reference a +-// JVM_CONSTANT_Class entry where the name_index is used to indirectly +-// reference a JVM_CONSTANT_Utf8 entry which contains the interesting +-// <data-1>. In order to understand a JVM_CONSTANT_Fieldref entry, we +-// have to do a total of 5 indirections just to get to the CP entries +-// that contain the interesting pieces of data and then we have to +-// fetch the three pieces of data. This means we have to do a total of +-// (5 + 3) * 2 == 16 dereferences to compare two JVM_CONSTANT_Fieldref +-// entries. +-// +-// Here is the indirection, data and dereference count for each entry +-// type: +-// +-// JVM_CONSTANT_Class 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Double 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Fieldref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Float 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Integer 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_InterfaceMethodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Long 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Methodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_NameAndType 1 indir, 2 data, 4 derefs +-// JVM_CONSTANT_String 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Utf8 0 indir, 1 data, 1 deref +-// +-// So different subtypes of CP entries require different amounts of +-// work for a proper comparison. +-// +-// Now that we've talked about the different entry types and how to +-// compare them we need to get back to merging. This is not a merge in +-// the "sort -u" sense or even in the "sort" sense. When we merge two +-// constant pools, we copy all the entries from old_cp to merge_cp, +-// preserving entry order. Next we append all the unique entries from +-// scratch_cp to merge_cp and we track the index changes from the +-// location in scratch_cp to the possibly new location in merge_cp. +-// When we are done, any obsolete code that is still running that +-// uses old_cp should not be able to observe any difference if it +-// were to use merge_cp. As for the new code in scratch_class, it is +-// modified to use the appropriate index values in merge_cp before it +-// is used to replace the code in the_class. +-// +-// There is one small complication in copying the entries from old_cp +-// to merge_cp. Two of the CP entry types are special in that they are +-// lazily resolved. Before explaining the copying complication, we need +-// to digress into CP entry resolution. +-// +-// JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in +-// the class file, but are not stored in memory as such until they are +-// resolved. The entries are not resolved unless they are used because +-// resolution is expensive. During class file parsing the entries are +-// initially stored in memory as JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries. These special CP entry types +-// indicate that the JVM_CONSTANT_Class and JVM_CONSTANT_String entries +-// have been parsed, but the index values in the entries have not been +-// validated. After the entire constant pool has been parsed, the index +-// values can be validated and then the entries are converted into +-// JVM_CONSTANT_UnresolvedClass and JVM_CONSTANT_UnresolvedString +-// entries. During this conversion process, the UTF8 values that are +-// indirectly referenced by the JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries are changed into Symbol*s and the +-// entries are modified to refer to the Symbol*s. This optimization +-// eliminates one level of indirection for those two CP entry types and +-// gets the entries ready for verification. During class file parsing +-// it is also possible for JVM_CONSTANT_UnresolvedString entries to be +-// resolved into JVM_CONSTANT_String entries. Verification expects to +-// find JVM_CONSTANT_UnresolvedClass and either JVM_CONSTANT_String or +-// JVM_CONSTANT_UnresolvedString entries and not JVM_CONSTANT_Class +-// entries. +-// +-// Now we can get back to the copying complication. When we copy +-// entries from old_cp to merge_cp, we have to revert any +-// JVM_CONSTANT_Class entries to JVM_CONSTANT_UnresolvedClass entries +-// or verification will fail. +-// +-// It is important to explicitly state that the merging algorithm +-// effectively unresolves JVM_CONSTANT_Class entries that were in the +-// old_cp when they are changed into JVM_CONSTANT_UnresolvedClass +-// entries in the merge_cp. This is done both to make verification +-// happy and to avoid adding more brittleness between RedefineClasses +-// and the constant pool cache. By allowing the constant pool cache +-// implementation to (re)resolve JVM_CONSTANT_UnresolvedClass entries +-// into JVM_CONSTANT_Class entries, we avoid having to embed knowledge +-// about those algorithms in RedefineClasses. +-// +-// Appending unique entries from scratch_cp to merge_cp is straight +-// forward for direct CP entries and most indirect CP entries. For the +-// indirect CP entry type JVM_CONSTANT_NameAndType and for the double- +-// indirect CP entry types, the presence of more than one piece of +-// interesting data makes appending the entries more complicated. +-// +-// For the JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} entry types, +-// the entry is simply copied from scratch_cp to the end of merge_cp. +-// If the index in scratch_cp is different than the destination index +-// in merge_cp, then the change in index value is tracked. +-// +-// Note: the above discussion for the direct CP entries also applies +-// to the JVM_CONSTANT_Unresolved{Class,String} entry types. +-// +-// For the JVM_CONSTANT_{Class,String} entry types, since there is only +-// one data element at the end of the recursion, we know that we have +-// either one or two unique entries. If the JVM_CONSTANT_Utf8 entry is +-// unique then it is appended to merge_cp before the current entry. +-// If the JVM_CONSTANT_Utf8 entry is not unique, then the current entry +-// is updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// Note: the above discussion for JVM_CONSTANT_{Class,String} entry +-// types is theoretical. Since those entry types have already been +-// optimized into JVM_CONSTANT_Unresolved{Class,String} entry types, +-// they are handled as direct CP entries. +-// +-// For the JVM_CONSTANT_NameAndType entry type, since there are two +-// data elements at the end of the recursions, we know that we have +-// between one and three unique entries. Any unique JVM_CONSTANT_Utf8 +-// entries are appended to merge_cp before the current entry. For any +-// JVM_CONSTANT_Utf8 entries that are not unique, the current entry is +-// updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// For the JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} entry +-// types, since there are two indirect CP entries and three data +-// elements at the end of the recursions, we know that we have between +-// one and six unique entries. See the JVM_CONSTANT_Fieldref diagram +-// above for an example of all six entries. The uniqueness algorithm +-// for the JVM_CONSTANT_Class and JVM_CONSTANT_NameAndType entries is +-// covered above. Any unique entries are appended to merge_cp before +-// the current entry. For any entries that are not unique, the current +-// entry is updated to refer to the duplicate entry in merge_cp before +-// it is appended to merge_cp. Again, any changes in index values are +-// tracked as needed. +-// +-// +-// Other Details: +-// +-// Details for other parts of RedefineClasses need to be written. +-// This is a placeholder section. +-// +-// +-// Open Issues (in no particular order): +-// +-// - How do we serialize the RedefineClasses() API without deadlocking? +-// +-// - SystemDictionary::parse_stream() was called with a NULL protection +-// domain since the initial version. This has been changed to pass +-// the_class->protection_domain(). This change has been tested with +-// all NSK tests and nothing broke, but what will adding it now break +-// in ways that we don't test? +-// +-// - GenerateOopMap::rewrite_load_or_store() has a comment in its +-// (indirect) use of the Relocator class that the max instruction +-// size is 4 bytes. goto_w and jsr_w are 5 bytes and wide/iinc is +-// 6 bytes. Perhaps Relocator only needs a 4 byte buffer to do +-// what it does to the bytecodes. More investigation is needed. +-// +-// - java.lang.Object methods can be called on arrays. This is +-// implemented via the arrayKlassOop vtable which we don't +-// update. For example, if we redefine java.lang.Object.toString(), +-// then the new version of the method will not be called for array +-// objects. +-// +-// - How do we know if redefine_single_class() and the guts of +-// instanceKlass are out of sync? I don't think this can be +-// automated, but we should probably order the work in +-// redefine_single_class() to match the order of field +-// definitions in instanceKlass. We also need to add some +-// comments about keeping things in sync. +-// +-// - set_new_constant_pool() is huge and we should consider refactoring +-// it into smaller chunks of work. +-// +-// - The exception table update code in set_new_constant_pool() defines +-// const values that are also defined in a local context elsewhere. +-// The same literal values are also used in elsewhere. We need to +-// coordinate a cleanup of these constants with Runtime. +-// ++#define RC_ABORT(error) { _result = error; return false; } + +-class VM_RedefineClasses: public VM_Operation { ++class VM_RedefineClasses: public VM_GC_Operation { + private: ++ + // These static fields are needed by SystemDictionary::classes_do() + // facility and the adjust_cpool_cache_and_vtable() helper: + static objArrayOop _old_methods; + static objArrayOop _new_methods; +- static methodOop* _matching_old_methods; +- static methodOop* _matching_new_methods; +- static methodOop* _deleted_methods; +- static methodOop* _added_methods; ++ static int* _matching_old_methods; ++ static int* _matching_new_methods; ++ static int* _deleted_methods; ++ static int* _added_methods; + static int _matching_methods_length; + static int _deleted_methods_length; + static int _added_methods_length; + static klassOop _the_class_oop; + ++ static int _revision_number; ++ + // The instance fields are used to pass information from + // doit_prologue() to doit() and doit_epilogue(). + jint _class_count; +@@ -370,36 +70,29 @@ + // _index_map_p contains any entries. + int _index_map_count; + intArray * _index_map_p; +- // ptr to _class_count scratch_classes +- instanceKlassHandle * _scratch_classes; +- jvmtiError _res; ++ GrowableArray<instanceKlassHandle>* _new_classes; ++ GrowableArray<oop>* _updated_oops; ++ jvmtiError _result; ++ int _max_redefinition_flags; + + // Performance measurement support. These timers do not cover all + // the work done for JVM/TI RedefineClasses() but they do cover + // the heavy lifting. +- elapsedTimer _timer_rsc_phase1; +- elapsedTimer _timer_rsc_phase2; +- elapsedTimer _timer_vm_op_prologue; ++ elapsedTimer _timer_total; ++ elapsedTimer _timer_prologue; ++ elapsedTimer _timer_class_linking; ++ elapsedTimer _timer_class_loading; ++ elapsedTimer _timer_check_type; ++ elapsedTimer _timer_prepare_redefinition; ++ elapsedTimer _timer_wait_for_locks; ++ elapsedTimer _timer_redefinition; ++ elapsedTimer _timer_vm_op_epilogue; + +- // These routines are roughly in call order unless otherwise noted. +- +- // Load the caller's new class definition(s) into _scratch_classes. +- // Constant pool merging work is done here as needed. Also calls +- // compare_and_normalize_class_versions() to verify the class +- // definition(s). ++ jvmtiError check_redefinition_allowed(instanceKlassHandle new_class); ++ jvmtiError find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses); ++ jvmtiError find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed); + jvmtiError load_new_class_versions(TRAPS); + +- // Verify that the caller provided class definition(s) that meet +- // the restrictions of RedefineClasses. Normalize the order of +- // overloaded methods as needed. +- jvmtiError compare_and_normalize_class_versions( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class); +- +- // Swap annotations[i] with annotations[j] +- // Used by compare_and_normalize_class_versions() when normalizing +- // overloaded methods or changing idnum as when adding or deleting methods. +- void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); +- + // Figure out which new methods match old methods in name and signature, + // which methods have been added, and which are no longer present + void compute_added_deleted_matching_methods(); +@@ -407,96 +100,100 @@ + // Change jmethodIDs to point to the new methods + void update_jmethod_ids(); + +- // In addition to marking methods as obsolete, this routine +- // records which methods are EMCP (Equivalent Module Constant +- // Pool) in the emcp_methods BitMap and returns the number of +- // EMCP methods via emcp_method_count_p. This information is +- // used when information about the previous version of the_class +- // is squirreled away. +- void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, +- int * emcp_method_count_p); +- void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ class FindAffectedKlassesClosure : public ObjectClosure { + +- // Unevolving classes may point to methods of the_class directly +- // from their constant pool caches, itables, and/or vtables. We +- // use the SystemDictionary::classes_do() facility and this helper +- // to fix up these pointers. +- static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS); ++ private: ++ GrowableArray<instanceKlassHandle> *_original_klasses; ++ GrowableArray<instanceKlassHandle> *_result; ++ ++ public: ++ FindAffectedKlassesClosure(GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result); ++ ++ virtual void do_object(oop obj); ++ }; ++ ++ ++ static jvmtiError do_topological_class_sorting(const jvmtiClassDefinition *class_definitions, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS); + + // Install the redefinition of a class +- void redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS); ++ void redefine_single_class(instanceKlassHandle the_new_class, TRAPS); + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. + void increment_class_counter(instanceKlass *ik, TRAPS); + +- // Support for constant pool merging (these routines are in alpha +- // order): +- void append_entry(constantPoolHandle scratch_cp, int scratch_i, +- constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); +- int find_new_index(int old_index); +- bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- bool is_unresolved_string_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); +- bool merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS); +- jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class, +- instanceKlassHandle scratch_class, TRAPS); +- u2 rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS); +- bool rewrite_cp_refs(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_element_value( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_method(methodHandle method, +- methodHandle * new_method_p, TRAPS); +- bool rewrite_cp_refs_in_methods(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); +- void rewrite_cp_refs_in_verification_type_info( +- address& stackmap_addr_ref, address stackmap_end, u2 frame_i, +- u1 frame_size, TRAPS); +- void set_new_constant_pool(instanceKlassHandle scratch_class, +- constantPoolHandle scratch_cp, int scratch_cp_length, bool shrink, TRAPS); + + void flush_dependent_code(instanceKlassHandle k_h, TRAPS); + +- static void check_class(klassOop k_oop, oop initiating_loader, TRAPS) PRODUCT_RETURN; ++ static void check_class(klassOop k_oop,/* oop initiating_loader,*/ TRAPS) PRODUCT_RETURN; + +- static void dump_methods() PRODUCT_RETURN; ++ static void adjust_cpool_cache(klassOop k_oop, oop initiating_loader, TRAPS); ++ ++#ifdef ASSERT ++ static void verify_classes(klassOop k_oop, oop initiating_loader, TRAPS); ++#endif ++ ++ int calculate_redefinition_flags(instanceKlassHandle new_version); ++ void calculate_instance_update_information(klassOop new_version); ++ void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p); ++ ++ static void calculate_type_check_information(klassOop k); ++ static void clear_type_check_information(klassOop k); + + public: +- VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind); +- VMOp_Type type() const { return VMOp_RedefineClasses; } ++ VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind); ++ virtual ~VM_RedefineClasses(); ++ ++ bool check_arguments(); + bool doit_prologue(); + void doit(); + void doit_epilogue(); ++ void rollback(); + +- bool allow_nested_vm_operations() const { return true; } +- jvmtiError check_error() { return _res; } ++ jvmtiError check_exception() const; ++ VMOp_Type type() const { return VMOp_RedefineClasses; } ++ bool skip_operation() const { return false; } ++ bool allow_nested_vm_operations() const { return true; } ++ jvmtiError check_error() { return _result; } ++ ++ void update_active_methods(); ++ ++ // Checks for type consistency after hierarchy change ++ bool check_type_consistency(); ++ void calculate_type_check_information(); ++ bool check_field_value_types(); ++ void clear_type_check_information(); ++ bool check_method_stacks(); ++ bool check_loaded_methods(); ++ bool check_method(methodOop method); ++ static Symbol* signature_to_class_name(Symbol* signature); ++ ++ void method_forwarding(); ++ ++ void update_array_classes_to_newest_version(klassOop smallest_dimension); + + // Modifiable test must be shared between IsModifiableClass query + // and redefine implementation + static bool is_modifiable_class(oop klass_mirror); ++ ++ // Method used during garbage collection, the VM operation must iterate over all oops. ++ void oops_do(OopClosure* f); ++ ++ // Utility methods for transfering field access flags ++ ++ static void transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to); ++ static void update_klass_field_access_flag(fieldDescriptor *fd); ++ ++ void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ ++ void lock_threads(); ++ void unlock_threads(); ++ ++ template <class T> static void do_oop_work(T* p); ++ ++ static void swap_marks(oop first, oop second); ++ + }; + + #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP ++ +diff --git a/src/share/vm/prims/methodComparator.cpp b/src/share/vm/prims/methodComparator.cpp +--- a/src/share/vm/prims/methodComparator.cpp ++++ b/src/share/vm/prims/methodComparator.cpp +@@ -65,6 +65,37 @@ + if (! args_same(c_old, c_new)) + return false; + } ++ ++ // DCEVM: Added exception table comparison to EMCP comparison ++ ++ typeArrayOop ex_old = old_method->constMethod()->exception_table(); ++ typeArrayOop ex_new = new_method->constMethod()->exception_table(); ++ ++ if (ex_old == NULL && ex_new != NULL) return false; ++ if (ex_old != NULL && ex_new == NULL) return false; ++ ++ if (ex_old != NULL && ex_new != NULL && ex_old->length() == ex_new->length()) { ++ // Per entry: ++ /* start */ ++ /* limit */ ++ /* goto pc */ ++ /* cp index */ ++ for (int i=0; i<ex_old->length(); i++) { ++ int old_val = ex_old->int_at(i); ++ int new_val = ex_new->int_at(i); ++ if ((i + 1) % 4 == 0) { ++ if (old_val == 0 || new_val == 0) { ++ if (old_val != new_val) return false; ++ } else if ((_old_cp->klass_at_noresolve(old_val) != _new_cp->klass_at_noresolve(new_val))) ++ return false; ++ } else { ++ if (old_val != new_val) { ++ return false; ++ } ++ } ++ } ++ } ++ + return true; + } + +diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp +--- a/src/share/vm/prims/nativeLookup.cpp ++++ b/src/share/vm/prims/nativeLookup.cpp +@@ -35,6 +35,7 @@ + #include "oops/symbol.hpp" + #include "prims/jvm_misc.hpp" + #include "prims/nativeLookup.hpp" ++#include "prims/jvmtiRedefineClasses.hpp" + #include "runtime/arguments.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/javaCalls.hpp" +@@ -53,7 +54,6 @@ + # include "os_bsd.inline.hpp" + #endif + +- + static void mangle_name_on(outputStream* st, Symbol* name, int begin, int end) { + char* bytes = (char*)name->bytes() + begin; + char* end_bytes = (char*)name->bytes() + end; +@@ -136,6 +136,40 @@ + { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) } + }; + ++// Helper function to call redefineClasses from Java Code ++JVM_ENTRY(int, JVM_RedefineClassesHelper(JNIEnv *env, jclass cb, jclass target, jbyteArray bytes)) ++ ResourceMark rm(THREAD); ++ ++ JavaThread* current_thread = JavaThread::current(); ++ jbyte* bytecodes = NULL; ++ const int class_count = 1; ++ jvmtiClassDefinition* class_definitions = NEW_RESOURCE_ARRAY(jvmtiClassDefinition, class_count); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ jboolean is_copy = JNI_FALSE; ++ bytecodes = env->GetByteArrayElements(bytes, &is_copy); ++ class_definitions[0].klass = target; ++ class_definitions[0].class_byte_count = env->GetArrayLength(bytes); ++ class_definitions[0].class_bytes = (unsigned char*)bytecodes; ++ } ++ ++ VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); ++ VMThread::execute(&op); ++ int result = op.check_error(); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ if (env->ExceptionOccurred()) { ++ return -1; ++ } ++ env->ReleaseByteArrayElements(bytes, bytecodes, 0); ++ } ++ ++ return result; ++JVM_END ++ ++ + static address lookup_special_native(char* jni_name) { + int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods + int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); +@@ -175,6 +209,9 @@ + return entry; + } + } ++ if(strstr(jni_name, "Java_at_ssw_hotswap_ClassRedefinition_redefineClasses") != NULL) { ++ return CAST_FROM_FN_PTR(address, JVM_RedefineClassesHelper); ++ } + + // Otherwise call static method findNative in ClassLoader + KlassHandle klass (THREAD, SystemDictionary::ClassLoader_klass()); +diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp +--- a/src/share/vm/runtime/arguments.cpp ++++ b/src/share/vm/runtime/arguments.cpp +@@ -1747,6 +1747,15 @@ + status = false; + } + ++ // (tw) Must use serial GC ++ if (!UseSerialGC && i >= 1) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Must use the serial GC in the Dynamic Code Evolution VM\n"); ++ status = false; ++ } else { ++ UseSerialGC = true; ++ } ++ + return status; + } + +diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp +--- a/src/share/vm/runtime/deoptimization.cpp ++++ b/src/share/vm/runtime/deoptimization.cpp +@@ -595,6 +595,38 @@ + // Cleanup thread deopt data + cleanup_deopt_info(thread, array); + ++ // (tw) Redefinition support: Check if we need to transfer method execution points to new versions ++ { ++ ResourceMark res_mark; ++ ++ // Verify that the just-unpacked frames match the interpreter's ++ // notions of expression stack and locals ++ vframeArray* cur_array = thread->vframe_array_last(); ++ RegisterMap rm(thread, false); ++ rm.set_include_argument_oops(false); ++ for (int i = 0; i < cur_array->frames(); i++) { ++ vframeArrayElement* el = cur_array->element(i); ++ frame* frame = el->iframe(); ++ guarantee(frame->is_interpreted_frame(), "Wrong frame type"); ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(frame, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ methodOop method = iframe->method(); ++ int bci = iframe->bci(); ++ method = method->newest_version(); ++ iframe->set_method(method, bci); ++ ++ methodOop forward_method = method->forward_method(); ++ if (forward_method != NULL && method->is_in_code_section(bci)) { ++ int new_bci = method->calculate_forward_bci(bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ iframe->set_method(forward_method, new_bci); ++ } ++ } ++ } ++ + #ifndef PRODUCT + if (VerifyStack) { + ResourceMark res_mark; +diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp +--- a/src/share/vm/runtime/frame.cpp ++++ b/src/share/vm/runtime/frame.cpp +@@ -403,6 +403,12 @@ + *interpreter_frame_method_addr() = method; + } + ++// (tw) Sets constant pool cache oop ++void frame::interpreter_frame_set_cache(constantPoolCacheOop cp) { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ *interpreter_frame_cache_addr() = cp; ++} ++ + void frame::interpreter_frame_set_bcx(intptr_t bcx) { + assert(is_interpreted_frame(), "Not an interpreted frame"); + if (ProfileInterpreter) { +@@ -418,19 +424,27 @@ + // The bcx was just converted from bci to bcp. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. +- address mdp = mdo->di_to_dp(mdi); +- interpreter_frame_set_mdx((intptr_t)mdp); ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. ++ address mdp = mdo->di_to_dp(mdi); ++ interpreter_frame_set_mdx((intptr_t)mdp); ++ } + } + } else { + if (is_now_bci) { + // The bcx was just converted from bcp to bci. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdo->dp_to_di((address)mdx); +- interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdo->dp_to_di((address)mdx); ++ interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ } + } + } + } +diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp +--- a/src/share/vm/runtime/frame.hpp ++++ b/src/share/vm/runtime/frame.hpp +@@ -346,6 +346,7 @@ + // Method & constant pool cache + methodOop interpreter_frame_method() const; + void interpreter_frame_set_method(methodOop method); ++ void interpreter_frame_set_cache(constantPoolCacheOop method); + methodOop* interpreter_frame_method_addr() const; + constantPoolCacheOop* interpreter_frame_cache_addr() const; + #ifdef PPC +diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp +--- a/src/share/vm/runtime/globals.hpp ++++ b/src/share/vm/runtime/globals.hpp +@@ -1309,9 +1309,23 @@ + product(bool, StressLdcRewrite, false, \ + "Force ldc -> ldc_w rewrite during RedefineClasses") \ + \ ++ product(bool, UseMethodForwardPoints, false, \ ++ "Use method forward points") \ ++ \ ++ product(intx, MethodForwardPointsMaxLocals, 300, \ ++ "Maximum number of locals in forwarding method") \ ++ \ ++ product(intx, MethodForwardPointsMaxStack, 300, \ ++ "Maximum number of stack slots in forwarding method") \ ++ \ + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, TimeRedefineClasses, false, \ ++ "Measure timing for JVMTI RedefineClasses") \ ++ \ ++ product(bool, AllowAdvancedClassRedefinition, true, \ ++ "Allow advanced class redefinition beyond swapping method bodies")\ + develop(bool, StressMethodComparator, false, \ + "run the MethodComparator on all loaded methods") \ + \ +diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp +--- a/src/share/vm/runtime/interfaceSupport.hpp ++++ b/src/share/vm/runtime/interfaceSupport.hpp +@@ -296,7 +296,7 @@ + ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { + // We are leaving the VM at this point and going directly to native code. + // Block, if we are in the middle of a safepoint synchronization. +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + thread->frame_anchor()->make_walkable(thread); + trans_and_fence(_thread_in_vm, _thread_in_native); + // Check for pending. async. exceptions or suspends. +diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp +--- a/src/share/vm/runtime/javaCalls.cpp ++++ b/src/share/vm/runtime/javaCalls.cpp +@@ -60,7 +60,7 @@ + bool clear_pending_exception = true; + + guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code"); +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); + _result = result; + +diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp +--- a/src/share/vm/runtime/jniHandles.cpp ++++ b/src/share/vm/runtime/jniHandles.cpp +@@ -112,6 +112,10 @@ + } + + jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { ++ if (mh->newest_version() != mh()) { ++ methodHandle mh_new(Thread::current(), mh()->newest_version()); ++ return (jmethodID) make_weak_global(mh_new); ++ } + return (jmethodID) make_weak_global(mh); + } + +diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp +--- a/src/share/vm/runtime/mutex.cpp ++++ b/src/share/vm/runtime/mutex.cpp +@@ -1227,7 +1227,7 @@ + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1247,7 +1247,7 @@ + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1310,6 +1310,7 @@ + // already hold Terminator_lock - may happen because of periodic safepoints + if (this->rank() != Mutex::native && + this->rank() != Mutex::suspend_resume && ++ this->rank() != Mutex::redefine_classes && + locks != NULL && locks->rank() <= this->rank() && + !SafepointSynchronize::is_at_safepoint() && + this != Interrupt_lock && this != ProfileVM_lock && +diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp +--- a/src/share/vm/runtime/mutex.hpp ++++ b/src/share/vm/runtime/mutex.hpp +@@ -109,7 +109,8 @@ + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, +- native = max_nonleaf + 1 ++ native = max_nonleaf + 1, ++ redefine_classes = native + 1 + }; + + // The WaitSet and EntryList linked lists are composed of ParkEvents. +diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp +--- a/src/share/vm/runtime/mutexLocker.cpp ++++ b/src/share/vm/runtime/mutexLocker.cpp +@@ -49,6 +49,7 @@ + // Consider using GCC's __read_mostly. + + Mutex* Patching_lock = NULL; ++Mutex* RedefineClasses_lock = NULL; + Monitor* SystemDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -90,6 +91,7 @@ + Mutex* DirtyCardQ_FL_lock = NULL; + Monitor* DirtyCardQ_CBL_mon = NULL; + Mutex* Shared_DirtyCardQ_lock = NULL; ++Monitor* RedefinitionSync_lock = NULL; + Mutex* ParGCRareEvent_lock = NULL; + Mutex* EvacFailureStack_lock = NULL; + Mutex* DerivedPointerTableGC_lock = NULL; +@@ -205,6 +207,7 @@ + def(HotCardCache_lock , Mutex , special , true ); + def(EvacFailureStack_lock , Mutex , nonleaf , true ); + } ++ def(RedefinitionSync_lock , Monitor , leaf , false ); + def(ParGCRareEvent_lock , Mutex , leaf , true ); + def(DerivedPointerTableGC_lock , Mutex, leaf, true ); + def(CodeCache_lock , Mutex , special, true ); +@@ -279,6 +282,7 @@ + def(Debug2_lock , Mutex , nonleaf+4, true ); + def(Debug3_lock , Mutex , nonleaf+4, true ); + def(ProfileVM_lock , Monitor, nonleaf+4, false); // used for profiling of the VMThread ++ def(RedefineClasses_lock , Mutex, nonleaf+7, false ); // for ensuring that class redefinition is not done in parallel + def(CompileThread_lock , Monitor, nonleaf+5, false ); + + def(JfrQuery_lock , Monitor, nonleaf, true); // JFR locks, keep these in consecutive order +diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp +--- a/src/share/vm/runtime/mutexLocker.hpp ++++ b/src/share/vm/runtime/mutexLocker.hpp +@@ -43,6 +43,8 @@ + // Mutexes used in the VM. + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code ++extern Monitor* RedefinitionSync_lock; // a lock on synchronized class redefinition ++extern Mutex* RedefineClasses_lock; // a lock on class redefinition + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp +--- a/src/share/vm/runtime/reflection.cpp ++++ b/src/share/vm/runtime/reflection.cpp +@@ -469,7 +469,8 @@ + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +@@ -525,6 +526,12 @@ + AccessFlags access, + bool classloader_only, + bool protected_restriction) { ++ ++ // (tw) Decide accessibility based on active version ++ if (current_class != NULL) { ++ current_class = current_class->klass_part()->active_version(); ++ } ++ + // Verify that current_class can access a field of field_class, where that + // field's access bits are "access". We assume that we've already verified + // that current_class can access field_class. +@@ -566,7 +573,8 @@ + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp +--- a/src/share/vm/runtime/sharedRuntime.cpp ++++ b/src/share/vm/runtime/sharedRuntime.cpp +@@ -1139,7 +1139,20 @@ + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && +- callee_method->method_holder() != SystemDictionary::Object_klass()) { ++ callee_method->method_holder()->klass_part()->newest_version() != SystemDictionary::Object_klass()->klass_part()->newest_version()) { ++ ++ // DCEVM: If we are executing an old method, this is OK! ++ { ++ ResourceMark rm(thread); ++ RegisterMap cbl_map(thread, false); ++ frame caller_frame = thread->last_frame().sender(&cbl_map); ++ ++ CodeBlob* caller_cb = caller_frame.cb(); ++ guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); ++ nmethod* caller_nm = caller_cb->as_nmethod_or_null(); ++ if (caller_nm->method()->is_old()) break; ++ } ++ + // If has a pending exception then there is no need to re-try to + // resolve this method. + // If the method has been redefined, we need to try again. +diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp +--- a/src/share/vm/runtime/thread.cpp ++++ b/src/share/vm/runtime/thread.cpp +@@ -212,6 +212,8 @@ + set_self_raw_id(0); + set_lgrp_id(-1); + ++ _redefine_classes_mutex = new Mutex(Mutex::redefine_classes, "redefine classes lock", false); ++ + // allocated data structures + set_osthread(NULL); + set_resource_area(new ResourceArea()); +@@ -246,6 +248,7 @@ + omFreeProvision = 32 ; + omInUseList = NULL ; + omInUseCount = 0 ; ++ _pretend_new_universe = false; + + #ifdef ASSERT + _visited_for_critical_count = false; +@@ -857,6 +860,15 @@ + return false; + } + ++bool Thread::owns_locks_but_redefine_classes_lock() const { ++ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) { ++ if (cur != RedefineClasses_lock && cur->rank() != Mutex::redefine_classes) { ++ return true; ++ } ++ } ++ return false; ++} ++ + + #endif + +@@ -1507,7 +1519,7 @@ + ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm); + + assert(JavaThread::current() == this, "sanity check"); +- assert(!Thread::current()->owns_locks(), "sanity check"); ++ assert(!Thread::current()->owns_locks_but_redefine_classes_lock(), "sanity check"); + + DTRACE_THREAD_PROBE(start, this); + +@@ -3045,7 +3057,7 @@ + + // Create a CompilerThread + CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) +-: JavaThread(&compiler_thread_entry) { ++: JavaThread(&compiler_thread_entry), _should_bailout(false) { + _env = NULL; + _log = NULL; + _task = NULL; +@@ -3053,6 +3065,7 @@ + _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; ++ _compilation_mutex = new Mutex(Mutex::redefine_classes, "compilationMutex", false); + + #ifndef PRODUCT + _ideal_graph_printer = NULL; +@@ -3082,6 +3095,7 @@ + int Threads::_number_of_non_daemon_threads = 0; + int Threads::_return_code = 0; + size_t JavaThread::_stack_size_at_create = 0; ++bool Threads::_wait_at_instrumentation_entry = false; + + // All JavaThreads + #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next()) +diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp +--- a/src/share/vm/runtime/thread.hpp ++++ b/src/share/vm/runtime/thread.hpp +@@ -197,11 +197,14 @@ + void enter_signal_handler() { _num_nested_signal++; } + void leave_signal_handler() { _num_nested_signal--; } + bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ Mutex* redefine_classes_mutex() { return _redefine_classes_mutex; } + + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; + ++ Mutex* _redefine_classes_mutex; ++ + // Active_handles points to a block of handles + JNIHandleBlock* _active_handles; + +@@ -522,10 +525,15 @@ + uintptr_t _self_raw_id; // used by get_thread (mutable) + int _lgrp_id; + ++ ++ bool _pretend_new_universe; ++ + public: + // Stack overflow support + address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + ++ void set_pretend_new_universe(bool b) { if (_pretend_new_universe != b) { if (TraceRedefineClasses >= 5) tty->print_cr("Changing pretend universe to %d", (int)b); _pretend_new_universe = b; } } ++ bool pretend_new_universe() { return _pretend_new_universe; } + void set_stack_base(address base) { _stack_base = base; } + size_t stack_size() const { return _stack_size; } + void set_stack_size(size_t size) { _stack_size = size; } +@@ -563,6 +571,7 @@ + void print_owned_locks() const { print_owned_locks_on(tty); } + Monitor* owned_locks() const { return _owned_locks; } + bool owns_locks() const { return owned_locks() != NULL; } ++ bool owns_locks_but_redefine_classes_lock() const; + bool owns_locks_but_compiled_lock() const; + + // Deadlock detection +@@ -1745,6 +1754,8 @@ + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; ++ bool _should_bailout; ++ Mutex* _compilation_mutex; + + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + +@@ -1754,12 +1765,16 @@ + + CompilerThread(CompileQueue* queue, CompilerCounters* counters); + ++ bool should_bailout() const { return _should_bailout; } ++ void set_should_bailout(bool b) { _should_bailout = false; } ++ + bool is_Compiler_thread() const { return true; } + // Hide this compiler thread from external view. + bool is_hidden_from_external_view() const { return true; } + + CompileQueue* queue() { return _queue; } + CompilerCounters* counters() { return _counters; } ++ Mutex *compilation_mutex() { return _compilation_mutex; } + + // Get/set the thread's compilation environment. + ciEnv* env() { return _env; } +@@ -1814,6 +1829,7 @@ + static int _number_of_threads; + static int _number_of_non_daemon_threads; + static int _return_code; ++ static bool _wait_at_instrumentation_entry; + + public: + // Thread management +@@ -1825,6 +1841,9 @@ + static JavaThread* first() { return _thread_list; } + static void threads_do(ThreadClosure* tc); + ++ static bool wait_at_instrumentation_entry() { return _wait_at_instrumentation_entry; } ++ static void set_wait_at_instrumentation_entry(bool b) { _wait_at_instrumentation_entry = b; } ++ + // Initializes the vm and creates the vm thread + static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); + static void convert_vm_init_libraries_to_agents(); +diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp +--- a/src/share/vm/runtime/vframe.cpp ++++ b/src/share/vm/runtime/vframe.cpp +@@ -253,6 +253,46 @@ + return fr().interpreter_frame_method(); + } + ++// (tw) Sets interpreter frame method. ++void interpretedVFrame::set_method(methodOop new_method, int new_bci) { ++ methodOop old_method = fr().interpreter_frame_method(); ++ int old_stack_size = fr().interpreter_frame_expression_stack_size(); ++ if (old_method == new_method) return; ++ u_char *old_bcp = bcp(); ++ int old_bci = bci(); ++ fr().interpreter_frame_set_method(new_method); ++ fr().interpreter_frame_set_cache(new_method->constants()->cache()); ++ u_char *new_bcp = new_method->code_base() + new_bci; ++ assert(new_method->bcp_from(new_bci) == new_bcp, ""); ++ ++ set_bcp(new_bcp); ++ ++ Bytecodes::Code code = Bytecodes::java_code_at(old_method, old_bcp); ++ assert(Bytecodes::java_code_at(new_method, new_bcp) == code, "must have same bytecode at this position"); ++ ++ switch (code) { ++ case Bytecodes::_invokevirtual : ++ case Bytecodes::_invokespecial : ++ case Bytecodes::_invokestatic : ++ case Bytecodes::_invokeinterface: { ++ int old_index = Bytes::get_native_u2(old_bcp+1); ++ int new_index = Bytes::get_native_u2(new_bcp+1); ++ new_method->constants()->cache()->entry_at(new_index)->copy_from(old_method->constants()->cache()->entry_at(old_index)); ++ break; ++ } ++ ++ case Bytecodes::_invokedynamic: { ++ int old_index = Bytes::get_native_u4(old_bcp+1); ++ int new_index = Bytes::get_native_u4(new_bcp+1); ++ new_method->constants()->cache()->secondary_entry_at(new_index)->copy_from(old_method->constants()->cache()->secondary_entry_at(old_index)); ++ break; ++ } ++ } ++ ++ int new_stack_size = fr().interpreter_frame_expression_stack_size(); ++ assert(new_method->validate_bci_from_bcx((intptr_t)new_bcp) == new_bci, ""); ++} ++ + StackValueCollection* interpretedVFrame::locals() const { + int length = method()->max_locals(); + +diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp +--- a/src/share/vm/runtime/vframe.hpp ++++ b/src/share/vm/runtime/vframe.hpp +@@ -163,6 +163,7 @@ + StackValueCollection* locals() const; + StackValueCollection* expressions() const; + GrowableArray<MonitorInfo*>* monitors() const; ++ void set_method(methodOop method, int new_bci); + + void set_locals(StackValueCollection* values) const; + +diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp +--- a/src/share/vm/runtime/vmThread.cpp ++++ b/src/share/vm/runtime/vmThread.cpp +@@ -671,6 +671,9 @@ + void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + Thread::oops_do(f, cf); + _vm_queue->oops_do(f); ++ if (_cur_vm_operation != NULL) { ++ _cur_vm_operation->oops_do(f); ++ } + } + + //------------------------------------------------------------------------------------------------------------------ +diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp +--- a/src/share/vm/utilities/exceptions.cpp ++++ b/src/share/vm/utilities/exceptions.cpp +@@ -251,6 +251,8 @@ + assert(thread->is_Java_thread(), "can only be called by a Java thread"); + assert(!thread->has_pending_exception(), "already has exception"); + ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception; + + // Resolve exception klass +@@ -298,6 +300,8 @@ + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +diff --git a/src/share/vm/utilities/growableArray.hpp b/src/share/vm/utilities/growableArray.hpp +--- a/src/share/vm/utilities/growableArray.hpp ++++ b/src/share/vm/utilities/growableArray.hpp +@@ -135,6 +135,33 @@ + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } ++ ++}; ++ ++template<class E, class F> class Pair : public StackObj ++{ ++private: ++ E _left; ++ F _right; ++ ++public: ++ ++ Pair() { ++ ++ } ++ ++ Pair(E left, F right) { ++ this->_left = left; ++ this->_right = right; ++ } ++ ++ E left() { ++ return _left; ++ } ++ ++ F right() { ++ return _right; ++ } + }; + + template<class E> class GrowableArray : public GenericGrowableArray { diff --git a/hotspot/.hg/patches/full-jdk7u45-b08.patch b/hotspot/.hg/patches/full-jdk7u45-b08.patch new file mode 100644 index 00000000..9d61ec1d --- /dev/null +++ b/hotspot/.hg/patches/full-jdk7u45-b08.patch @@ -0,0 +1,12034 @@ +diff --git a/make/bsd/makefiles/gcc.make b/make/bsd/makefiles/gcc.make +index 2310141..8ddb2ab 100644 +--- a/make/bsd/makefiles/gcc.make ++++ b/make/bsd/makefiles/gcc.make +@@ -116,7 +116,10 @@ CFLAGS += $(VM_PICFLAG) + CFLAGS += -fno-rtti + CFLAGS += -fno-exceptions + CFLAGS += -pthread +-CFLAGS += -fcheck-new ++## well, strictly speaking we should check for clang not Darwin ++ifneq ($(OS_VENDOR), Darwin) ++ CFLAGS += -fcheck-new ++endif + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +diff --git a/src/cpu/x86/vm/templateTable_x86_32.cpp b/src/cpu/x86/vm/templateTable_x86_32.cpp +index fc19edc..d2cddd3 100644 +--- a/src/cpu/x86/vm/templateTable_x86_32.cpp ++++ b/src/cpu/x86/vm/templateTable_x86_32.cpp +@@ -2109,6 +2109,22 @@ void TemplateTable::resolve_cache_and_index(int byte_no, + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic : // fall through + case Bytecodes::_putstatic : // fall through + case Bytecodes::_getfield : // fall through +@@ -2211,6 +2227,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no, + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, + Register index, ++ int byte_no, + bool is_static, + bool has_tos) { + if (JvmtiExport::can_post_field_access()) { +@@ -2237,7 +2254,11 @@ void TemplateTable::jvmti_post_field_access(Register cache, + // cache: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), + rax, cache); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2258,7 +2279,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) { + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) pop_and_check_object(obj); +@@ -2393,7 +2414,7 @@ void TemplateTable::getstatic(int byte_no) { + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); + +@@ -2451,7 +2472,11 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is + // rcx: jvalue object on the stack + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), + rbx, rax, rcx); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // (tw) Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2467,7 +2492,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) { + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). +@@ -2818,6 +2843,11 @@ void TemplateTable::fast_accessfield(TosState state) { + // rcx: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -3008,6 +3038,26 @@ void TemplateTable::invokevirtual_helper(Register index, + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +@@ -3093,6 +3143,31 @@ void TemplateTable::invokeinterface(int byte_no) { + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ //__ movl(rax, rdx); ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Get receiver klass into rdx - also a null check ++ __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); ++ __ verify_oop(rdx); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), rcx, rax, rbx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, rsi, rdi); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore rdi + __ null_check(rcx, oopDesc::klass_offset_in_bytes()); +diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp +index 932ee97..67bb710 100644 +--- a/src/cpu/x86/vm/templateTable_x86_64.cpp ++++ b/src/cpu/x86/vm/templateTable_x86_64.cpp +@@ -2151,6 +2151,22 @@ void TemplateTable::resolve_cache_and_index(int byte_no, + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic: + case Bytecodes::_putstatic: + case Bytecodes::_getfield: +@@ -2267,7 +2283,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no, + // The registers cache and index expected to be set before call. + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, Register index, +- bool is_static, bool has_tos) { ++ int byte_no, bool is_static, bool has_tos) { + // do the JVMTI work here to avoid disturbing the register state below + // We use c_rarg registers here because we want to use the register used in + // the call to the VM +@@ -2298,7 +2314,11 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2320,7 +2340,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) { + const Register bc = c_rarg3; // uses same reg as obj, so don't mix them + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) { +@@ -2455,7 +2475,7 @@ void TemplateTable::getstatic(int byte_no) { + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + transition(vtos, vtos); + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); +@@ -2507,7 +2527,11 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2523,7 +2547,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) { + const Register bc = c_rarg3; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // [jk] not needed currently +@@ -2837,6 +2861,11 @@ void TemplateTable::fast_accessfield(TosState state) { + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -3073,6 +3102,26 @@ void TemplateTable::invokevirtual_helper(Register index, + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +@@ -3156,6 +3205,35 @@ void TemplateTable::invokeinterface(int byte_no) { + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ // DCEVM: FIXME: What exactly should we store here? ++ __ push(rcx); // destroyed by Linux arguments passing conventions ++ __ movptr(r14, rcx); ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), r14, rax, rbx); ++ __ pop(rcx); ++ ++ // Get receiver klass into rdx - also a null check ++ __ restore_locals(); // restore r14 ++ __ load_klass(rdx, rcx); ++ __ verify_oop(rdx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, r13, r14); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore r14 + __ null_check(rcx, oopDesc::klass_offset_in_bytes()); +diff --git a/src/os/bsd/vm/attachListener_bsd.cpp b/src/os/bsd/vm/attachListener_bsd.cpp +index dac5195..e939dcd 100644 +--- a/src/os/bsd/vm/attachListener_bsd.cpp ++++ b/src/os/bsd/vm/attachListener_bsd.cpp +@@ -460,14 +460,14 @@ AttachOperation* AttachListener::dequeue() { + + void AttachListener::vm_start() { + char fn[UNIX_PATH_MAX]; +- struct stat64 st; ++ struct stat st; + int ret; + + int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d", + os::get_temp_directory(), os::current_process_id()); + assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow"); + +- RESTARTABLE(::stat64(fn, &st), ret); ++ RESTARTABLE(::stat(fn, &st), ret); + if (ret == 0) { + ret = ::unlink(fn); + if (ret == -1) { +diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp +index 9a8ca61..196ab25 100644 +--- a/src/share/vm/c1/c1_Compilation.hpp ++++ b/src/share/vm/c1/c1_Compilation.hpp +@@ -242,8 +242,8 @@ class Compilation: public StackObj { + #define BAILOUT(msg) { bailout(msg); return; } + #define BAILOUT_(msg, res) { bailout(msg); return res; } + +-#define CHECK_BAILOUT() { if (bailed_out()) return; } +-#define CHECK_BAILOUT_(res) { if (bailed_out()) return res; } ++#define CHECK_BAILOUT() { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return; } ++#define CHECK_BAILOUT_(res) { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return res; } + + + class InstructionMark: public StackObj { +diff --git a/src/share/vm/ci/ciEnv.cpp b/src/share/vm/ci/ciEnv.cpp +index e20db5d..57f37db 100644 +--- a/src/share/vm/ci/ciEnv.cpp ++++ b/src/share/vm/ci/ciEnv.cpp +@@ -1172,3 +1172,11 @@ void ciEnv::record_out_of_memory_failure() { + // If memory is low, we stop compiling methods. + record_method_not_compilable("out of memory"); + } ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciEnv::cleanup_after_redefinition() { ++ ++ if (_factory != NULL) { ++ _factory->cleanup_after_redefinition(); ++ } ++} +diff --git a/src/share/vm/ci/ciEnv.hpp b/src/share/vm/ci/ciEnv.hpp +index 103e532..abe2e37 100644 +--- a/src/share/vm/ci/ciEnv.hpp ++++ b/src/share/vm/ci/ciEnv.hpp +@@ -417,6 +417,8 @@ public: + void record_failure(const char* reason); + void record_method_not_compilable(const char* reason, bool all_tiers = true); + void record_out_of_memory_failure(); ++ ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIENV_HPP +diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp +index e0ab96b..36efef4 100644 +--- a/src/share/vm/ci/ciObjectFactory.cpp ++++ b/src/share/vm/ci/ciObjectFactory.cpp +@@ -296,6 +296,11 @@ ciObject* ciObjectFactory::get(oop key) { + // into the table. We need to recompute our index. + index = find(keyHandle(), _ci_objects); + } ++ ++ if (is_found_at(index, keyHandle(), _ci_objects)) { ++ // DCEVM: Check if this is an error? Can occur when redefining classes. ++ return _ci_objects->at(index); ++ } + assert(!is_found_at(index, keyHandle(), _ci_objects), "no double insert"); + insert(index, new_object, _ci_objects); + return new_object; +@@ -764,3 +769,50 @@ void ciObjectFactory::print() { + _unloaded_instances->length(), + _unloaded_klasses->length()); + } ++ ++// DCEVM: Resoring the ciObject arrays after class redefinition ++void ciObjectFactory::sort_ci_objects(GrowableArray<ciObject*>* objects) { ++ ++ // Resort the _ci_objects array. The order of two class pointers can be changed during class redefinition. ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ if (last >= o) { ++ int cur_last_index = j - 1; ++ oop cur_last = last; ++ while (cur_last >= o) { ++ ++ // Swap the two objects to guarantee ordering ++ ciObject *tmp = objects->at(cur_last_index); ++ objects->at_put(cur_last_index, objects->at(cur_last_index + 1)); ++ objects->at_put(cur_last_index + 1, tmp); ++ ++ // Decrement index to move one step to the left ++ cur_last_index--; ++ if (cur_last_index < 0) { ++ break; ++ } ++ cur_last = objects->at(cur_last_index)->get_oop(); ++ } ++ } else { ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++ ++#ifdef ASSERT ++ if (CIObjectFactoryVerify) { ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++#endif // ASSERT ++} ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciObjectFactory::cleanup_after_redefinition() { ++ sort_ci_objects(_ci_objects); ++} +diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp +index 26cc2c3..855a4ac 100644 +--- a/src/share/vm/ci/ciObjectFactory.hpp ++++ b/src/share/vm/ci/ciObjectFactory.hpp +@@ -38,6 +38,7 @@ + class ciObjectFactory : public ResourceObj { + friend class VMStructs; + friend class ciEnv; ++ friend class CompileBroker; + + private: + static volatile bool _initialized; +@@ -137,6 +138,11 @@ public: + + void print_contents(); + void print(); ++ ++private: ++ ++ static void sort_ci_objects(GrowableArray<ciObject*>* objects); ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP +diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp +index 6ffa4bf..4ea4e2b 100644 +--- a/src/share/vm/classfile/classFileParser.cpp ++++ b/src/share/vm/classfile/classFileParser.cpp +@@ -795,6 +795,7 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS) { + ClassFileStream* cfs = stream(); + assert(length > 0, "only called for length>0"); +@@ -813,6 +814,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + interface_index, CHECK_(nullHandle)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); ++ if (!old_klass.is_null() && !interf->is_newest_version()) { ++ interf = KlassHandle(THREAD, interf->newest_version()); ++ } + } else { + Symbol* unresolved_klass = cp->klass_name_at(interface_index); + +@@ -825,6 +829,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + interf = KlassHandle(THREAD, k); + } + +@@ -1912,6 +1919,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf + int runtime_invisible_parameter_annotations_length = 0; + u1* annotation_default = NULL; + int annotation_default_length = 0; ++ u2 code_section_table_length; ++ typeArrayHandle code_section_table; + + // Parse code and exceptions attribute + u2 method_attributes_count = cfs->get_u2_fast(); +@@ -2081,6 +2090,24 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf + parse_stackmap_table(code_attribute_length, CHECK_(nullHandle)); + stackmap_data = typeArrayHandle(THREAD, sm); + parsed_stackmap_attribute = true; ++ } else if (UseMethodForwardPoints && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_code_sections()) { ++ int length = code_attribute_length; ++ int value_count = length / sizeof(u2); ++ int line_count = length / 3; ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Found code section attribute when loading class with %d entries", value_count, line_count); ++ } ++ code_section_table_length = value_count; ++ code_section_table = oopFactory::new_permanent_shortArray(value_count, CHECK_NULL); ++ code_section_table->set_length(value_count); ++ ++ for (int i = 0; i < value_count; ++i) { ++ u2 value = cfs->get_u2(CHECK_(nullHandle)); ++ code_section_table->short_at_put(i, value); ++ if (TraceRedefineClasses >= 4) { ++ tty->print_cr("Code section table at %d: %d", i, value); ++ } ++ } + } else { + // Skip unknown attributes + cfs->skip_u1(code_attribute_length, CHECK_(nullHandle)); +@@ -2206,6 +2233,18 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf + } + #endif + ++ // DCEVM: TODO: Get a different solution for the problem of method forward ++ // points and variable sized interpreter frames. ++ if (UseMethodForwardPoints) { ++ if (max_stack > MethodForwardPointsMaxStack) { ++ fatal(err_msg("Method has too large stack (%d), increase the value of MethodForwardPointsMaxStack (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ if (max_locals > MethodForwardPointsMaxLocals) { ++ fatal(err_msg("Method has too many locals (%d), increase the value of MethodForwardPointsMaxLocals (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ max_stack = MethodForwardPointsMaxStack; ++ max_locals = MethodForwardPointsMaxLocals; ++ } + // Fill in code attribute information + m->set_max_stack(max_stack); + m->set_max_locals(max_locals); +@@ -2219,6 +2258,8 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf + */ + m->constMethod()->set_stackmap_data(stackmap_data()); + ++ m->constMethod()->set_code_section_table(code_section_table()); ++ + // Copy byte codes + m->set_code(code_start); + +@@ -2792,6 +2833,15 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, + "Invalid Deprecated classfile attribute length %u in class file %s", + attribute_length, CHECK); + } ++ } else if (tag == vmSymbols::tag_field_redefinition_policy()) { ++ // DCEVM: Check for deleted field attribute ++ _field_redefinition_policy = cfs->get_u1_fast(); ++ } else if (tag == vmSymbols::tag_static_field_redefinition_policy()) { ++ // DCEVM: Check for deleted static field attribute ++ _static_field_redefinition_policy = cfs->get_u1_fast(); ++ } else if (tag == vmSymbols::tag_method_redefinition_policy()) { ++ // DCEVM: Check for deleted method attribute ++ _method_redefinition_policy = cfs->get_u1_fast(); + } else if (_major_version >= JAVA_1_5_VERSION) { + if (tag == vmSymbols::tag_signature()) { + if (attribute_length != 2) { +@@ -2895,6 +2945,17 @@ void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) { + } + k->set_inner_classes(_inner_classes()); + k->set_class_annotations(_annotations()); ++ ++ ++ if (_field_redefinition_policy != 0xff) { ++ k->set_field_redefinition_policy(_field_redefinition_policy); ++ } ++ if (_static_field_redefinition_policy != 0xff) { ++ k->set_static_field_redefinition_policy(_static_field_redefinition_policy); ++ } ++ if (_method_redefinition_policy != 0xff) { ++ k->set_method_redefinition_policy(_method_redefinition_policy); ++ } + } + + typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annotations, +@@ -2918,9 +2979,126 @@ typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annota + } + + ++// DCEVM: Finds the super symbols by reading the bytes of the class and returns ++// them in a growable array. ++void ClassFileParser::findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS) { ++ ++ _cp_patches = NULL; ++ // So that JVMTI can cache class file in the state before retransformable agents ++ // have modified it ++ unsigned char *cached_class_file_bytes = NULL; ++ ++ ClassFileStream* cfs = stream(); ++ ++ _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false; ++ ++ instanceKlassHandle nullHandle; ++ ++ // Save the class file name for easier error message printing. ++ _class_name = name != NULL ? name : vmSymbols::unknown_class_name(); ++ ++ cfs->guarantee_more(8, CHECK); // magic, major, minor ++ // Magic value ++ u4 magic = cfs->get_u4_fast(); ++ if (magic != JAVA_CLASSFILE_MAGIC) { ++ // Invalid class file! ++ return; ++ } ++ ++ // Version numbers ++ u2 minor_version = cfs->get_u2_fast(); ++ u2 major_version = cfs->get_u2_fast(); ++ ++ // Check version numbers - we check this even with verifier off ++ if (!is_supported_version(major_version, minor_version)) { ++ ++ // Unsupported version! ++ return; ++ } ++ ++ _major_version = major_version; ++ _minor_version = minor_version; ++ ++ ++ // Check if verification needs to be relaxed for this class file ++ // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376) ++ _relax_verify = Verifier::relax_verify_for(class_loader()); ++ _need_verify = false; ++ ++ // Constant pool ++ constantPoolHandle cp = parse_constant_pool(class_loader(), CHECK); ++ int cp_size = cp->length(); ++ ++ cfs->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len ++ ++ // Access flags ++ AccessFlags access_flags; ++ jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; ++ ++ if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { ++ // Set abstract bit for old class files for backward compatibility ++ flags |= JVM_ACC_ABSTRACT; ++ } ++ access_flags.set_flags(flags); ++ ++ // This class and superclass ++ instanceKlassHandle super_klass; ++ u2 this_class_index = cfs->get_u2_fast(); ++ check_property( ++ valid_cp_range(this_class_index, cp_size) && ++ cp->tag_at(this_class_index).is_unresolved_klass(), ++ "Invalid this class index %u in constant pool in class file %s", ++ this_class_index, CHECK); ++ ++ Symbol* class_name = cp->unresolved_klass_at(this_class_index); ++ assert(class_name != NULL, "class_name can't be null"); ++ ++ // Update _class_name which could be null previously to be class_name ++ _class_name = class_name; ++ ++ // DCEVM: DO NOT release all handles when parsing is done ++ {// HandleMark hm(THREAD); ++ ++ // Checks if name in class file matches requested name ++ if (name != NULL && class_name != name) { ++ return; ++ } ++ ++ u2 super_class_index = cfs->get_u2_fast(); ++ ++ if (super_class_index != 0) { ++ Symbol* super_class = cp->klass_name_at(super_class_index); ++ handles.append(super_class); ++ } else { ++ // DCEVM: This redefinition must be for the Object class. ++ } ++ ++ // Interfaces ++ u2 itfs_len = cfs->get_u2_fast(); ++ objArrayHandle local_interfaces; ++ if (itfs_len == 0) { ++ local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); ++ } else { ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK); ++ } ++ ++ for (int i=0; i<local_interfaces->length(); i++) { ++ oop o = local_interfaces->obj_at(i); ++ Symbol* interface_handle = ((klassOop)o)->klass_part()->name(); ++ handles.append(interface_handle); ++ } ++ } ++} ++ + instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, +@@ -2971,10 +3149,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + unsigned char* ptr = cfs->buffer(); + unsigned char* end_ptr = cfs->buffer() + cfs->length(); + ++ bool pretend_new_universe = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, + &ptr, &end_ptr, + &cached_class_file_bytes, + &cached_class_file_length); ++ Thread::current()->set_pretend_new_universe(pretend_new_universe); + + if (ptr != cfs->buffer()) { + // JVMTI agent has modified class file data. +@@ -3130,7 +3311,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { +- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); ++ klassOop resolved_klass = cp->resolved_klass_at(super_class_index); ++ if (!old_klass.is_null()) { ++ resolved_klass = resolved_klass->klass_part()->newest_version(); ++ } ++ super_klass = instanceKlassHandle(THREAD, resolved_klass); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { +@@ -3148,7 +3333,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + if (itfs_len == 0) { + local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); + } else { +- local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle)); ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_(nullHandle)); + } + + u2 java_fields_count = 0; +@@ -3202,7 +3387,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + protection_domain, + true, + CHECK_(nullHandle)); +- ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + KlassHandle kh (THREAD, k); + super_klass = instanceKlassHandle(THREAD, kh()); + } +@@ -3591,6 +3778,19 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + rt = REF_NONE; + } else { + rt = super_klass->reference_type(); ++ ++ // DCEVM: With class redefinition, it can also happen that special classes are loaded. ++ if (name == vmSymbols::java_lang_ref_Reference()) { ++ rt = REF_OTHER; ++ } else if (name == vmSymbols::java_lang_ref_SoftReference()) { ++ rt = REF_SOFT; ++ } else if (name == vmSymbols::java_lang_ref_WeakReference()) { ++ rt = REF_WEAK; ++ } else if (name == vmSymbols::java_lang_ref_FinalReference()) { ++ rt = REF_FINAL; ++ } else if (name == vmSymbols::java_lang_ref_PhantomReference()) { ++ rt = REF_PHANTOM; ++ } + } + + // We can now create the basic klassOop for this klass +@@ -3691,7 +3891,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts); + + // Fill in has_finalizer, has_vanilla_constructor, and layout_helper +- set_precomputed_flags(this_klass); ++ set_precomputed_flags(this_klass, old_klass); + + // reinitialize modifiers, using the InnerClasses attribute + int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle)); +@@ -3714,6 +3914,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + ++ if (rt == REF_OTHER) { ++ instanceRefKlass::update_nonstatic_oop_maps(ik); ++ } ++ + ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), + false /* not shared class */); + +@@ -3856,7 +4060,7 @@ void ClassFileParser::fill_oop_maps(instanceKlassHandle k, + } + + +-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { ++void ClassFileParser::set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass) { + klassOop super = k->super(); + + // Check if this klass has an empty finalize method (i.e. one with return bytecode only), +@@ -3864,7 +4068,9 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + if (!_has_empty_finalizer) { + if (_has_finalizer || + (super != NULL && super->klass_part()->has_finalizer())) { +- k->set_has_finalizer(); ++ if (old_klass.is_null() || old_klass->has_finalizer()) { ++ k->set_has_finalizer(); ++ } + } + } + +@@ -3880,7 +4086,7 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + + // Check if this klass supports the java.lang.Cloneable interface + if (SystemDictionary::Cloneable_klass_loaded()) { +- if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) { ++ if (k->is_subtype_of(SystemDictionary::Cloneable_klass()) || k->is_subtype_of(SystemDictionary::Cloneable_klass()->klass_part()->newest_version())) { + k->set_is_cloneable(); + } + } +diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp +index 314ec5e..a35bf67 100644 +--- a/src/share/vm/classfile/classFileParser.hpp ++++ b/src/share/vm/classfile/classFileParser.hpp +@@ -64,6 +64,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + int _sde_length; + typeArrayHandle _inner_classes; + typeArrayHandle _annotations; ++ u1 _field_redefinition_policy; ++ u1 _static_field_redefinition_policy; ++ u1 _method_redefinition_policy; + + void set_class_synthetic_flag(bool x) { _synthetic_flag = x; } + void set_class_sourcefile(Symbol* x) { _sourcefile = x; } +@@ -151,6 +154,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS); + + // Field parsing +@@ -237,7 +241,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + unsigned int nonstatic_oop_map_count, + int* nonstatic_oop_offsets, + unsigned int* nonstatic_oop_counts); +- void set_precomputed_flags(instanceKlassHandle k); ++ void set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass); + objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, + objArrayHandle local_ifs, TRAPS); + +@@ -337,7 +341,12 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + + public: + // Constructor +- ClassFileParser(ClassFileStream* st) { set_stream(st); } ++ ClassFileParser(ClassFileStream* st) { ++ set_stream(st); ++ _field_redefinition_policy = 0xff; ++ _static_field_redefinition_policy = 0xff; ++ _method_redefinition_policy = 0xff; ++ } + + // Parse .class file and return new klassOop. The klassOop is not hooked up + // to the system dictionary or any other structures, so a .class file can +@@ -349,21 +358,33 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { + KlassHandle no_host_klass; +- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD); ++ return parseClassFile(name, class_loader, protection_domain, old_klass, no_host_klass, NULL, parsed_name, verify, THREAD); + } + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, + bool verify, + TRAPS); + ++ static void initialize_static_field(fieldDescriptor* fd, TRAPS); ++ ++ // DCEVM: Creates symbol handles for the super class and the interfaces ++ void findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS); ++ + // Verifier checks + static void check_super_class_access(instanceKlassHandle this_klass, TRAPS); + static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS); +diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp +index a2e61a4..450e19f 100644 +--- a/src/share/vm/classfile/classLoader.cpp ++++ b/src/share/vm/classfile/classLoader.cpp +@@ -915,6 +915,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { + instanceKlassHandle result = parser.parseClassFile(h_name, + class_loader, + protection_domain, ++ KlassHandle(), + parsed_name, + false, + CHECK_(h)); +diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp +index 78e76cc..ee21f3a 100644 +--- a/src/share/vm/classfile/dictionary.cpp ++++ b/src/share/vm/classfile/dictionary.cpp +@@ -326,6 +326,21 @@ void Dictionary::classes_do(void f(klassOop)) { + } + } + ++ ++// DCEVM: Just the classes from defining class loaders ++void Dictionary::classes_do(ObjectClosure *closure) { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* probe = bucket(index); ++ probe != NULL; ++ probe = probe->next()) { ++ klassOop k = probe->klass(); ++ if (probe->loader() == instanceKlass::cast(k)->class_loader()) { ++ closure->do_object(k); ++ } ++ } ++ } ++} ++ + // Added for initialize_itable_for_klass to handle exceptions + // Just the classes from defining class loaders + void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) { +@@ -433,6 +448,33 @@ void Dictionary::add_klass(Symbol* class_name, Handle class_loader, + add_entry(index, entry); + } + ++// DCEVM: Updates the klass entry to point to the new klassOop. Necessary only for class redefinition. ++bool Dictionary::update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class) { ++ ++ // There are several entries for the same class in the dictionary: One extra entry for each parent classloader of the classloader of the class. ++ bool found = false; ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass() == old_class()) { ++ entry->set_literal(k()); ++ found = true; ++ } ++ } ++ } ++ ++ return found; ++} ++ ++// DCEVM: Undo previous updates to the system dictionary ++void Dictionary::rollback_redefinition() { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass()->klass_part()->is_redefining()) { ++ entry->set_literal(entry->klass()->klass_part()->old_version()); ++ } ++ } ++ } ++} + + // This routine does not lock the system dictionary. + // +@@ -459,12 +501,22 @@ DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash, + return NULL; + } + ++// DCEVM: return old version if we are not in the new universe? ++klassOop Dictionary::intercept_for_version(klassOop k) { ++ if (k == NULL) return k; ++ ++ if (k->klass_part()->is_redefining() && !Thread::current()->pretend_new_universe()) { ++ return k->klass_part()->old_version(); ++ } ++ ++ return k; ++} + + klassOop Dictionary::find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS) { + DictionaryEntry* entry = get_entry(index, hash, name, loader); + if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { +- return entry->klass(); ++ return intercept_for_version(entry->klass()); + } else { + return NULL; + } +@@ -477,7 +529,7 @@ klassOop Dictionary::find_class(int index, unsigned int hash, + assert (index == index_for(name, loader), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, loader); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +@@ -489,7 +541,7 @@ klassOop Dictionary::find_shared_class(int index, unsigned int hash, + assert (index == index_for(name, Handle()), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, Handle()); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp +index bd33760..186d0eb 100644 +--- a/src/share/vm/classfile/dictionary.hpp ++++ b/src/share/vm/classfile/dictionary.hpp +@@ -73,6 +73,10 @@ public: + + void add_klass(Symbol* class_name, Handle class_loader,KlassHandle obj); + ++ bool update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class); ++ ++ void rollback_redefinition(); ++ + klassOop find_class(int index, unsigned int hash, + Symbol* name, Handle loader); + +@@ -89,6 +93,7 @@ public: + void classes_do(void f(klassOop, TRAPS), TRAPS); + void classes_do(void f(klassOop, oop)); + void classes_do(void f(klassOop, oop, TRAPS), TRAPS); ++ void classes_do(ObjectClosure *closure); + + void methods_do(void f(methodOop)); + +@@ -105,6 +110,7 @@ public: + bool do_unloading(BoolObjectClosure* is_alive); + + // Protection domains ++ static klassOop intercept_for_version(klassOop k); + klassOop find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS); + bool is_valid_protection_domain(int index, unsigned int hash, +diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp +index f8b10b3..cb68d82 100644 +--- a/src/share/vm/classfile/javaClasses.cpp ++++ b/src/share/vm/classfile/javaClasses.cpp +@@ -1798,7 +1798,7 @@ Handle java_lang_reflect_Method::create(TRAPS) { + klassOop klass = SystemDictionary::reflect_Method_klass(); + // This class is eagerly initialized during VM initialization, since we keep a refence + // to one of the methods +- assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized"); ++ assert(instanceKlass::cast(klass)->is_initialized() || klass->klass_part()->old_version() != NULL, "must be initialized"); + return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH); + } + +diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp +index b741cfa..8ce5287 100644 +--- a/src/share/vm/classfile/javaClasses.hpp ++++ b/src/share/vm/classfile/javaClasses.hpp +@@ -213,7 +213,6 @@ class java_lang_String : AllStatic { + + class java_lang_Class : AllStatic { + friend class VMStructs; +- + private: + // The fake offsets are added by the class loader when java.lang.Class is loaded + +diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfile/loaderConstraints.cpp +index 8650cd9..965cce2 100644 +--- a/src/share/vm/classfile/loaderConstraints.cpp ++++ b/src/share/vm/classfile/loaderConstraints.cpp +@@ -449,7 +449,7 @@ void LoaderConstraintTable::verify(Dictionary* dictionary, + if (k != NULL) { + // We found the class in the system dictionary, so we should + // make sure that the klassOop matches what we already have. +- guarantee(k == probe->klass(), "klass should be in dictionary"); ++ guarantee(k == probe->klass()->klass_part()->newest_version(), "klass should be in dictionary"); + } else { + // If we don't find the class in the system dictionary, it + // has to be in the placeholders table. +diff --git a/src/share/vm/classfile/loaderConstraints.hpp b/src/share/vm/classfile/loaderConstraints.hpp +index d01b2c4..1ad80f7 100644 +--- a/src/share/vm/classfile/loaderConstraints.hpp ++++ b/src/share/vm/classfile/loaderConstraints.hpp +@@ -106,7 +106,7 @@ public: + + klassOop klass() { return literal(); } + klassOop* klass_addr() { return literal_addr(); } +- void set_klass(klassOop k) { set_literal(k); } ++ void set_klass(klassOop k) { set_literal(k); assert(k == NULL || !k->klass_part()->is_redefining(), "just checking"); } + + LoaderConstraintEntry* next() { + return (LoaderConstraintEntry*)HashtableEntry<klassOop, mtClass>::next(); +diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp +index 899153a..fa45c6d 100644 +--- a/src/share/vm/classfile/systemDictionary.cpp ++++ b/src/share/vm/classfile/systemDictionary.cpp +@@ -157,6 +157,7 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, Handle class_load + // can return a null klass + klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); + } ++ assert(klass == NULL || klass->klass_part()->is_newest_version() || klass->klass_part()->newest_version()->klass_part()->is_redefining(), "must be"); + return klass; + } + +@@ -199,7 +200,8 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), ++ // DCEVM: Check if this relaxing of the condition is correct? Test case hs203t004 failing otherwise. ++ assert(!THREAD->is_Compiler_thread() || JvmtiThreadState::state_for(JavaThread::current())->get_class_being_redefined() != NULL, + err_msg("can not load classes with compiler thread: class=%s, classloader=%s", + class_name->as_C_string(), + class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); +@@ -961,6 +963,7 @@ klassOop SystemDictionary::parse_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ KlassHandle(), + host_klass, + cp_patches, + parsed_name, +@@ -1022,7 +1025,14 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + Handle protection_domain, + ClassFileStream* st, + bool verify, ++ KlassHandle old_class, + TRAPS) { ++ bool redefine_classes_locked = false; ++ if (!Thread::current()->redefine_classes_mutex()->owned_by_self()) { ++ Thread::current()->redefine_classes_mutex()->lock(); ++ redefine_classes_locked = true; ++ } ++ + // Classloaders that support parallelism, e.g. bootstrap classloader, + // or all classloaders with UnsyncloadClass do not acquire lock here + bool DoObjectLock = true; +@@ -1050,9 +1060,14 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ old_class, + parsed_name, + verify, + THREAD); ++ if (!old_class.is_null() && !k.is_null()) { ++ k->set_redefining(true); ++ k->set_old_version(old_class()); ++ } + + const char* pkg = "java/"; + if (!HAS_PENDING_EXCEPTION && +@@ -1087,13 +1102,18 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + // Add class just loaded + // If a class loader supports parallel classloading handle parallel define requests + // find_or_define_instance_class may return a different instanceKlass +- if (is_parallelCapable(class_loader)) { ++ // (tw) TODO: for class redefinition the parallel version does not work, check if this is a problem? ++ if (is_parallelCapable(class_loader) && old_class.is_null()) { + k = find_or_define_instance_class(class_name, class_loader, k, THREAD); + } else { +- define_instance_class(k, THREAD); ++ define_instance_class(k, old_class, THREAD); + } + } + ++ if (redefine_classes_locked) { ++ Thread::current()->redefine_classes_mutex()->unlock(); ++ } ++ + // If parsing the class file or define_instance_class failed, we + // need to remove the placeholder added on our behalf. But we + // must make sure parsed_name is valid first (it won't be if we had +@@ -1122,7 +1142,7 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + MutexLocker mu(SystemDictionary_lock, THREAD); + + klassOop check = find_class(parsed_name, class_loader); +- assert(check == k(), "should be present in the dictionary"); ++ assert((check == k() && !k->is_redefining()) || (k->is_redefining() && check == k->old_version()), "should be present in the dictionary"); + + klassOop check2 = find_class(h_name, h_loader); + assert(check == check2, "name inconsistancy in SystemDictionary"); +@@ -1349,7 +1369,11 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha + } + } + +-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ++void SystemDictionary::rollback_redefinition() { ++ dictionary()->rollback_redefinition(); ++} ++ ++void SystemDictionary::define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS) { + + Handle class_loader_h(THREAD, k->class_loader()); + +@@ -1376,13 +1400,23 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + Symbol* name_h = k->name(); + unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h); + int d_index = dictionary()->hash_to_index(d_hash); +- check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK); ++ ++ // DCEVM: Update version of the klassOop in the system dictionary ++ // TODO: Check for thread safety! ++ if (!old_class.is_null()) { ++ bool ok = dictionary()->update_klass(d_index, d_hash, name_h, class_loader_h, k, old_class); ++ assert (ok, "must have found old class and updated!"); ++ } ++ check_constraints(d_index, d_hash, k, class_loader_h, old_class.is_null(), CHECK); ++ ++ if(!old_class.is_null() && TraceRedefineClasses >= 3){ tty->print_cr("Class has been updated!"); } + + // Register class just loaded with class loader (placed in Vector) + // Note we do this before updating the dictionary, as this can + // fail with an OutOfMemoryError (if it does, we will *not* put this + // class in the dictionary and will not update the class hierarchy). +- if (k->class_loader() != NULL) { ++ // (tw) Only register if not redefining a class. ++ if (k->class_loader() != NULL && old_class.is_null()) { + methodHandle m(THREAD, Universe::loader_addClass_method()); + JavaValue result(T_VOID); + JavaCallArguments args(class_loader_h); +@@ -1408,8 +1442,9 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + } + k->eager_initialize(THREAD); + ++ // (tw) Only notify jvmti if not redefining a class. + // notify jvmti +- if (JvmtiExport::should_post_class_load()) { ++ if (JvmtiExport::should_post_class_load() && old_class.is_null()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + +@@ -1482,7 +1517,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* clas + } + } + +- define_instance_class(k, THREAD); ++ define_instance_class(k, KlassHandle(), THREAD); + + Handle linkage_exception = Handle(); // null handle + +@@ -1612,6 +1647,14 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) { + Universe::flush_dependents_on(k); + } + ++// (tw) Remove from hierarchy - Undo add_to_hierarchy. ++void SystemDictionary::remove_from_hierarchy(instanceKlassHandle k) { ++ assert(k.not_null(), "just checking"); ++ ++ k->remove_from_sibling_list(); ++ ++ // TODO: Remove from interfaces. ++} + + // ---------------------------------------------------------------------------- + // GC support +@@ -1701,6 +1744,24 @@ void SystemDictionary::oops_do(OopClosure* f) { + } + + ++// (tw) Iterate over all pre-loaded classes in the dictionary. ++void SystemDictionary::preloaded_classes_do(OopClosure *f) { ++ for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { ++ f->do_oop((oop*) &_well_known_klasses[k]); ++ } ++ ++ { ++ for (int i = 0; i < T_VOID+1; i++) { ++ if (_box_klasses[i] != NULL) { ++ assert(i >= T_BOOLEAN, "checking"); ++ f->do_oop((oop*) &_box_klasses[i]); ++ } ++ } ++ } ++ ++ // TODO: Check if we need to call FilterFieldsMap ++} ++ + void SystemDictionary::preloaded_oops_do(OopClosure* f) { + for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { + f->do_oop((oop*) &_well_known_klasses[k]); +@@ -1733,6 +1794,11 @@ void SystemDictionary::classes_do(void f(klassOop)) { + dictionary()->classes_do(f); + } + ++// (tw) Iterate over all classes in the dictionary. ++void SystemDictionary::classes_do(ObjectClosure *closure) { ++ dictionary()->classes_do(closure); ++} ++ + // Added for initialize_itable_for_klass + // Just the classes from defining class loaders + // Don't iterate over placeholders +@@ -1869,7 +1935,9 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) { + + // Preload ref klasses and set reference types + instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER); +- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); ++ ++ // (tw) This is now done in parseClassFile in order to support class redefinition ++ // instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK); + instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT); +@@ -1955,7 +2023,11 @@ void SystemDictionary::check_constraints(int d_index, unsigned int d_hash, + // also holds array classes + + assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary"); +- if ((defining == true) || (k() != check)) { ++ if ((defining == true) && ((k() != check) && k->old_version() != check)) { ++ ResourceMark rm(Thread::current()); ++ tty->print_cr("(%d / %d) (%s/%s)", k->revision_number(), check->klass_part()->revision_number(), k->name()->as_C_string(), check->klass_part()->name()->as_C_string()); ++ k()->print(); ++ check->print(); + linkage_error = "loader (instance of %s): attempted duplicate class " + "definition for name: \"%s\""; + } else { +diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp +index adf82e5..e316f8e 100644 +--- a/src/share/vm/classfile/systemDictionary.hpp ++++ b/src/share/vm/classfile/systemDictionary.hpp +@@ -268,7 +268,7 @@ public: + // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) + static klassOop resolve_from_stream(Symbol* class_name, Handle class_loader, + Handle protection_domain, +- ClassFileStream* st, bool verify, TRAPS); ++ ClassFileStream* st, bool verify, KlassHandle old_class, TRAPS); + + // Lookup an already loaded class. If not found NULL is returned. + static klassOop find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); +@@ -309,6 +309,8 @@ public: + // Iterate over all klasses in dictionary + // Just the classes from defining class loaders + static void classes_do(void f(klassOop)); ++ static void classes_do(ObjectClosure *closure); ++ static void preloaded_classes_do(OopClosure *closure); + // Added for initialize_itable_for_klass to handle exceptions + static void classes_do(void f(klassOop, TRAPS), TRAPS); + // All classes, and their class loaders +@@ -415,6 +417,8 @@ public: + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); + } + ++ static void rollback_redefinition(); ++ + public: + #define WK_KLASS_DECLARE(name, symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } +@@ -596,7 +600,7 @@ private: + // after waiting, but before reentering SystemDictionary_lock + // to preserve lock order semantics. + static void double_lock_wait(Handle lockObject, TRAPS); +- static void define_instance_class(instanceKlassHandle k, TRAPS); ++ static void define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS); + static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, + Handle class_loader, + instanceKlassHandle k, TRAPS); +@@ -615,12 +619,17 @@ private: + // Setup link to hierarchy + static void add_to_hierarchy(instanceKlassHandle k, TRAPS); + ++public: ++ ++ // Remove link to hierarchy ++ static void remove_from_hierarchy(instanceKlassHandle k); ++ ++private: + // event based tracing + static void post_class_load_event(TracingTime start_time, instanceKlassHandle k, + Handle initiating_loader); + static void post_class_unload_events(BoolObjectClosure* is_alive); + +-private: + // We pass in the hashtable index so we can calculate it outside of + // the SystemDictionary_lock. + +diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp +index da188bb..53455df 100644 +--- a/src/share/vm/classfile/verifier.cpp ++++ b/src/share/vm/classfile/verifier.cpp +@@ -106,7 +106,7 @@ bool Verifier::relax_verify_for(oop loader) { + return !need_verify; + } + +-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) { ++bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS) { + HandleMark hm; + ResourceMark rm(THREAD); + +@@ -117,7 +117,8 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + + const char* klassName = klass->external_name(); + bool can_failover = FailOverToOldVerifier && +- klass->major_version() < NOFAILOVER_MAJOR_VERSION; ++ klass->major_version() < NOFAILOVER_MAJOR_VERSION && ++ may_use_old_verifier; + + // If the class should be verified, first see if we can use the split + // verifier. If not, or if verification fails and FailOverToOldVerifier +@@ -138,6 +139,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + tty->print_cr( + "Fail over class verification to old verifier for: %s", klassName); + } ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -145,6 +147,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + exception_message = split_verifier.exception_message(); + } + } else { ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -159,6 +162,9 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + } + tty->print_cr("End class verification for: %s", klassName); + } ++ } else if (TraceClassInitialization) { ++ // (tw) Output not verified classes ++ tty->print_cr("Class %s was not verified", klassName); + } + + if (HAS_PENDING_EXCEPTION) { +@@ -210,7 +216,7 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou + // NOTE: this is called too early in the bootstrapping process to be + // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. + (refl_magic_klass == NULL || +- !klass->is_subtype_of(refl_magic_klass) || ++ !(klass->is_subtype_of(refl_magic_klass) || klass->is_subtype_of(refl_magic_klass->klass_part()->newest_version())) || + VerifyReflectionBytecodes) + ); + } +@@ -517,7 +523,7 @@ void ErrorContext::stackmap_details(outputStream* ss, methodOop method) const { + + ClassVerifier::ClassVerifier( + instanceKlassHandle klass, TRAPS) +- : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass) { ++ : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass->newest_version()), _klass_to_verify(klass) { + _this_type = VerificationType::reference_type(klass->name()); + // Create list to hold symbols in reference area. + _symbols = new GrowableArray<Symbol*>(100, 0, NULL); +@@ -547,7 +553,7 @@ void ClassVerifier::verify_class(TRAPS) { + _klass->external_name()); + } + +- objArrayHandle methods(THREAD, _klass->methods()); ++ objArrayHandle methods(THREAD, _klass_to_verify->methods()); + int num_methods = methods->length(); + + for (int index = 0; index < num_methods; index++) { +@@ -2444,7 +2450,10 @@ void ClassVerifier::verify_invoke_instructions( + VerificationType stack_object_type = + current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); + if (current_type() != stack_object_type) { +- assert(cp->cache() == NULL, "not rewritten yet"); ++ ++ // (tw) TODO: Check if relaxing the following assertion is correct. For class redefinition we might call the verifier twice. ++ //assert(cp->cache() == NULL, "not rewritten yet"); ++ + Symbol* ref_class_name = + cp->klass_name_at(cp->klass_ref_index_at(index)); + // See the comments in verify_field_instructions() for +diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp +index 4457f4a..b1b96f2 100644 +--- a/src/share/vm/classfile/verifier.hpp ++++ b/src/share/vm/classfile/verifier.hpp +@@ -47,7 +47,7 @@ class Verifier : AllStatic { + * Otherwise, no exception is thrown and the return indicates the + * error. + */ +- static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS); ++ static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS); + + // Return false if the class is loaded by the bootstrap loader, + // or if defineClass was called requesting skipping verification +@@ -256,7 +256,10 @@ class ClassVerifier : public StackObj { + + ErrorContext _error_context; // contains information about an error + ++public: + void verify_method(methodHandle method, TRAPS); ++ ++private: + char* generate_code_data(methodHandle m, u4 code_length, TRAPS); + void verify_exception_handler_table(u4 code_length, char* code_data, + int& min, int& max, TRAPS); +@@ -329,6 +332,7 @@ class ClassVerifier : public StackObj { + + VerificationType object_type() const; + ++ instanceKlassHandle _klass_to_verify; + instanceKlassHandle _klass; // the class being verified + methodHandle _method; // current method being verified + VerificationType _this_type; // the verification type of the current class +diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp +index b4595c6..341de7a 100644 +--- a/src/share/vm/classfile/vmSymbols.hpp ++++ b/src/share/vm/classfile/vmSymbols.hpp +@@ -138,6 +138,10 @@ + template(tag_annotation_default, "AnnotationDefault") \ + template(tag_enclosing_method, "EnclosingMethod") \ + template(tag_bootstrap_methods, "BootstrapMethods") \ ++ template(tag_static_field_redefinition_policy, "StaticFieldRedefinitionPolicy") \ ++ template(tag_field_redefinition_policy, "FieldRedefinitionPolicy") \ ++ template(tag_method_redefinition_policy, "MethodRedefinitionPolicy") \ ++ template(tag_code_sections, "CodeSections") \ + \ + /* exception klasses: at least all exceptions thrown by the VM have entries here */ \ + template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \ +@@ -377,6 +381,10 @@ + template(oop_size_name, "oop_size") \ + template(static_oop_field_count_name, "static_oop_field_count") \ + \ ++ /* mutator in case of class redefinition */ \ ++ template(static_transformer_name, "$staticTransformer") \ ++ template(transformer_name, "$transformer") \ ++ \ + /* non-intrinsic name/signature pairs: */ \ + template(register_method_name, "register") \ + do_alias(register_method_signature, object_void_signature) \ +diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp +index 0feca89..cfa1958 100644 +--- a/src/share/vm/compiler/compileBroker.cpp ++++ b/src/share/vm/compiler/compileBroker.cpp +@@ -1181,6 +1181,14 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, + int comp_level, + methodHandle hot_method, int hot_count, + const char* comment, Thread* THREAD) { ++ JavaThread* thread = JavaThread::current(); ++ if (thread->is_Compiler_thread() && thread->as_CompilerThread()->should_bailout()) { ++ return NULL; // FIXME: DCEVM: should we do something else? ++ } ++ if (instanceKlass::cast(method->method_holder())->is_not_initialized()) { ++ return NULL; // FIXME: DCEVM: how should we avoid this? ++ } ++ + // make sure arguments make sense + assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method"); + assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); +@@ -1260,6 +1268,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci, + } + + // RedefineClasses() has replaced this method; just return ++ // (tw) This is important for the new version of hotswapping: Old code will only execute properly in the interpreter! + if (method->is_old()) { + return NULL; + } +@@ -1592,6 +1601,8 @@ void CompileBroker::compiler_thread_loop() { + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { ++ thread->compilation_mutex()->lock(); ++ thread->set_should_bailout(false); + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + #ifdef COMPILER1 +@@ -1615,6 +1626,7 @@ void CompileBroker::compiler_thread_loop() { + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); + } ++ thread->compilation_mutex()->unlock(); + } + } + } +@@ -2164,3 +2176,15 @@ void CompileBroker::print_compiler_threads_on(outputStream* st) { + st->cr(); + #endif + } ++ ++// (tw) Clean up compiler interface after a class redefinition step ++void CompileBroker::cleanup_after_redefinition() { ++ int num_threads = _method_threads->length(); ++ ++ ciObjectFactory::sort_ci_objects(ciObjectFactory::_shared_ci_objects); ++ for (int i=0; i<num_threads; i++) { ++ if (_method_threads->at(i)->env() != NULL && _method_threads->at(i)->env() != (ciEnv *)badAddress) { ++ _method_threads->at(i)->env()->cleanup_after_redefinition(); ++ } ++ } ++} +diff --git a/src/share/vm/compiler/compileBroker.hpp b/src/share/vm/compiler/compileBroker.hpp +index 29f2b22..37989d1 100644 +--- a/src/share/vm/compiler/compileBroker.hpp ++++ b/src/share/vm/compiler/compileBroker.hpp +@@ -408,6 +408,7 @@ class CompileBroker: AllStatic { + + static void print_compiler_threads_on(outputStream* st); + ++ static void cleanup_after_redefinition(); + static int get_total_compile_count() { return _total_compile_count; } + static int get_total_bailout_count() { return _total_bailout_count; } + static int get_total_invalidated_count() { return _total_invalidated_count; } +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +index b0c9ec8..b3298e0 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +@@ -162,6 +162,13 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, + } + } + ++ ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +index 3b7bb9a..de7e54b 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +@@ -149,6 +149,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp +index 29841d8..a13a35d 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp +@@ -32,6 +32,8 @@ + #include "oops/objArrayKlass.inline.hpp" + #include "oops/oop.inline.hpp" + ++GrowableArray<oop>* MarkSweep::_rescued_oops = NULL; ++ + Stack<oop, mtGC> MarkSweep::_marking_stack; + Stack<DataLayout*, mtGC> MarkSweep::_revisit_mdo_stack; + Stack<Klass*, mtGC> MarkSweep::_revisit_klass_stack; +@@ -357,3 +359,86 @@ void MarkSweep::trace(const char* msg) { + } + + #endif ++ ++// (tw) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ oop rescued_obj = _rescued_oops->at(i); ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ if (rescued_obj->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_obj, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (tw) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->blueprint()->new_version() != NULL, "class of old object must have new version"); ++ ++ klassOop old_klass_oop = q->klass(); ++ klassOop new_klass_oop = q->blueprint()->new_version(); ++ ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = instanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ oop tmp_obj = q; ++ ++ if (new_klass_oop->klass_part()->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ int *cur = new_klass_oop->klass_part()->update_information(); ++ ++ tmp_obj->set_klass_no_check(new_klass_oop); ++ ++ if (cur == NULL) { ++ assert(size == new_size, "just checking"); ++ Copy::conjoint_words(((HeapWord *)tmp_obj), ((HeapWord *)new_location), size); ++ } else { ++ int destOffset = 0; ++ while (*cur != 0) { ++ if (*cur > 0) { ++ int size = *cur; ++ cur++; ++ int offset = *cur; ++ Copy::conjoint_jbytes(((char *)tmp_obj) + offset, ((char *)new_location) + destOffset, size); ++ destOffset += size; ++ cur++; ++ } else { ++ assert(*cur < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(((char*)new_location) + destOffset, skip, 0); ++ destOffset += skip; ++ cur++; ++ } ++ } ++ } ++ ++ if (tmp_obj != q) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp_obj, size); ++ } ++} +diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp +index eb8252c..b96a677 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp +@@ -117,8 +117,12 @@ class MarkSweep : AllStatic { + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<oop>* _rescued_oops; ++ + // + // Vars + // +@@ -208,6 +212,8 @@ class MarkSweep : AllStatic { + template <class T> static inline void mark_and_push(T* p); + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); +diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp +index 32c0bdb..7e30e78 100644 +--- a/src/share/vm/interpreter/interpreterRuntime.cpp ++++ b/src/share/vm/interpreter/interpreterRuntime.cpp +@@ -402,7 +402,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea + assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); + assert(h_exception->is_oop(), "just checking"); + // Check that exception is a subclass of Throwable, otherwise we have a VerifyError +- if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { ++ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()->klass_part()->newest_version())) && !(h_exception->is_a(SystemDictionary::Throwable_klass()))) { + if (ExitVMOnVerifyError) vm_exit(-1); + ShouldNotReachHere(); + } +@@ -656,6 +656,82 @@ IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, methodOopDes + JvmtiExport::post_raw_breakpoint(thread, method, bcp); + IRT_END + ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::forward_method(JavaThread *thread)) ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ while (Threads::wait_at_instrumentation_entry()) { ++ ml.wait(); ++ } ++ } ++ frame f = last_frame(thread); ++ methodOop m = f.interpreter_frame_method(); ++ methodOop forward_method = m->forward_method(); ++ if (forward_method != NULL) { ++ int bci = f.interpreter_frame_bci(); ++ ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Executing NOP in method %s at bci %d %d", m->name()->as_C_string(), bci, m->is_in_code_section(bci + 1)); ++ } ++ ++ int next_bci = bci - 1; ++ // First try bci before NOP. ++ if (!m->is_in_code_section(next_bci)) { ++ // Try bci after NOP. ++ next_bci = bci + 1; ++ if (!m->is_in_code_section(next_bci)) return; ++ } ++ ++ int new_bci = m->calculate_forward_bci(next_bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(&f, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ iframe->set_method(forward_method, new_bci - 1); ++ } ++IRT_END ++ ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_method(JavaThread *thread, oopDesc* receiverOop, int vTableIndex)) ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ // TODO: Check for correctness if different vtable indices in different versions? ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_vtable(vTableIndex); ++ thread->set_vm_result(method); ++IRT_END ++ ++// Correctly resolve interface method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_interface_method(JavaThread *thread, oopDesc* receiverOop, oopDesc* interface_klass, int vTableIndex)) ++ ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_itable((klassOop)interface_klass, vTableIndex, THREAD); ++ thread->set_vm_result(method); ++IRT_END ++ + IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) { + // extract receiver from the outgoing argument list if necessary + Handle receiver(thread, NULL); +@@ -684,6 +760,10 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (info.resolved_method()->is_old()) { ++ // (tw) If we are executing an old method, this is OK! ++ if (method(thread)->is_old()) { ++ break; ++ } + // It is very unlikely that method is redefined more than 100 times + // in the middle of resolve. If it is looping here more than 100 times + // means then there could be a bug here. +diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp +index 7ec8e49..b60f062 100644 +--- a/src/share/vm/interpreter/interpreterRuntime.hpp ++++ b/src/share/vm/interpreter/interpreterRuntime.hpp +@@ -140,6 +140,9 @@ class InterpreterRuntime: AllStatic { + static void post_method_entry(JavaThread *thread); + static void post_method_exit (JavaThread *thread); + static int interpreter_contains(address pc); ++ static void forward_method(JavaThread *thread); ++ static void find_correct_method(JavaThread *thread, oopDesc* receiver, int vTableIndex); ++ static void find_correct_interface_method(JavaThread *thread, oopDesc* receiver, oopDesc* interface_klass, int vTableIndex); + + // Native signature handlers + static void prepare_native_call(JavaThread* thread, methodOopDesc* method); +diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp +index b17f405..6acf287 100644 +--- a/src/share/vm/interpreter/linkResolver.cpp ++++ b/src/share/vm/interpreter/linkResolver.cpp +@@ -153,8 +153,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass + // Klass resolution + + void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) { +- if (!Reflection::verify_class_access(ref_klass->as_klassOop(), +- sel_klass->as_klassOop(), ++ if (!Reflection::verify_class_access(ref_klass->as_klassOop()->klass_part()->newest_version(), ++ sel_klass->as_klassOop()->klass_part()->newest_version(), + true)) { + ResourceMark rm(THREAD); + Exceptions::fthrow( +@@ -338,7 +338,7 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass, + // We'll check for the method name first, as that's most likely + // to be false (so we'll short-circuit out of these tests). + if (sel_method->name() == vmSymbols::clone_name() && +- sel_klass() == SystemDictionary::Object_klass() && ++ sel_klass()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() && + resolved_klass->oop_is_array()) { + // We need to change "protected" to "public". + assert(flags.is_protected(), "clone not protected?"); +@@ -404,6 +404,156 @@ void LinkResolver::resolve_method_statically(methodHandle& resolved_method, Klas + } + } + ++ ++void LinkResolver::lookup_method(methodHandle& resolved_method, KlassHandle resolved_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, KlassHandle current_klass, TRAPS) { ++ ++ // Interface method lookup? ++ if (is_interface) { ++ ++ // lookup method in this interface or its super, java.lang.Object ++ lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // lookup method in all the super-interfaces ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ } ++ ++ // Other methods ++ } else { ++ Handle nested_exception; ++ ++ // 2. lookup method in resolved klass and its super klasses ++ lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { // not found in the class hierarchy ++ // 3. lookup method in all the interfaces implemented by the resolved klass ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc ++ lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, ++ current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ nested_exception = Handle(THREAD, PENDING_EXCEPTION); ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_field(fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static) { ++ ++ // First attempt unversioned ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int redefinition_policy = current_klass->new_version()->klass_part()->field_redefinition_policy(); ++ if (is_static) { ++ redefinition_policy = current_klass->new_version()->klass_part()->static_field_redefinition_policy(); ++ } ++ ++ assert(redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (redefinition_policy != Klass::DynamicCheck) { ++ ++ if (redefinition_policy == Klass::AccessOldMembers) { ++ // Forget looked up fields ++ sel_klass = KlassHandle(Thread::current(), (oop)NULL); ++ } ++ ++ assert(redefinition_policy == Klass::AccessOldMembers || redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (sel_klass.is_null() || fd.is_static() != is_static /* access old static field field is changed from static to non-static */) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ // FIXME: idubrov ++ //if (sel_klass.is_null()) { ++ // TRACE_RC2("Trying to resolve field (%s) in old universe failed => exception is the correct behaviour", field_name->as_C_string()); ++ //} else { ++ // assert(sel_klass->new_version() != NULL, "must be old class!"); ++ // TRACE_RC2("Resolved a field in the old universe (%s)!", field_name->as_C_string()); ++ //} ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_method(methodHandle& resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, TRAPS) { ++ ++ // First attempt unversioned ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // (tw) Are we in an old method that wants to see a different view on the world? ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int method_redefinition_policy = current_klass->new_version()->klass_part()->method_redefinition_policy(); ++ assert(method_redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (method_redefinition_policy != Klass::DynamicCheck) { ++ ++ // We do not throw the exception ++ if (method_redefinition_policy == Klass::AccessOldMembers) { ++ // Forget any new member lookup ++ resolved_method = methodHandle(THREAD, NULL); ++ } ++ ++ assert(method_redefinition_policy == Klass::AccessOldMembers || method_redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (resolved_method.is_null()) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ // Now do the lookup in a second attempt with a different resolved klass. ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // FIXME: idubrov ++ //IF_TRACE_RC2 { ++ // ResourceMark rm(THREAD); ++ // if (resolved_method.is_null()) { ++ // TRACE_RC2("Trying to resolve method (%s) in old universe failed => exception is the correct behaviour", method_name->as_C_string()); ++ // } else { ++ // assert(resolved_method->is_old(), "must be old method!"); ++ // TRACE_RC2("Resolved a method in the old universe (%s)!", resolved_method->name()->as_C_string()); ++ // } ++ //} ++ } ++ } ++ } ++ ++ if (resolved_method.is_null()) { ++ // no method found ++ ResourceMark rm(THREAD); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), ++ methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), ++ method_name, ++ method_signature)); ++ } ++} ++ ++void LinkResolver::find_correct_resolved_klass(KlassHandle &resolved_klass, KlassHandle ¤t_klass) { ++ int current_klass_revision = current_klass->revision_number(); ++ int resolved_klass_revision = resolved_klass->revision_number(); ++ // FIXME: idubrov ++ //TRACE_RC2("The two different revision numbers for interfaces: current=%d / resolved_callee=%d", current_klass_revision, resolved_klass_revision); ++ ++ while (resolved_klass->revision_number() > current_klass_revision) { ++ assert(resolved_klass->old_version(), "must have old version"); ++ resolved_klass = KlassHandle(Thread::current(), resolved_klass->old_version()); ++ } ++} ++ + void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, + Symbol* method_name, Symbol* method_signature, + KlassHandle current_klass, bool check_access, TRAPS) { +@@ -416,35 +566,8 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + +- Handle nested_exception; +- +- // 2. lookup method in resolved klass and its super klasses +- lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { // not found in the class hierarchy +- // 3. lookup method in all the interfaces implemented by the resolved klass +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc +- lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, +- current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- nested_exception = Handle(THREAD, PENDING_EXCEPTION); +- CLEAR_PENDING_EXCEPTION; +- } +- } +- +- if (resolved_method.is_null()) { +- // 4. method lookup failed +- ResourceMark rm(THREAD); +- THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature), +- nested_exception); +- } +- } ++ // 2. and 3. and 4. lookup method in resolved klass and its super klasses ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, false, CHECK); + + // 5. check if method is concrete + if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) { +@@ -512,20 +635,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method, + } + + // lookup method in this interface or its super, java.lang.Object +- lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // lookup method in all the super-interfaces +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- if (resolved_method.is_null()) { +- // no method found +- ResourceMark rm(THREAD); +- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature)); +- } +- } ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, true, CHECK); + + if (check_access) { + HandleMark hm(THREAD); +@@ -612,9 +722,14 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo + THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string()); + } + ++ KlassHandle ref_klass(THREAD, pool->pool_holder()->klass_part()); ++ + // Resolve instance field + fieldDescriptor fd; // find_field initializes fd if found +- KlassHandle sel_klass(THREAD, instanceKlass::cast(resolved_klass())->find_field(field, sig, &fd)); ++ ++ KlassHandle sel_klass; ++ lookup_correct_field(fd, sel_klass, resolved_klass, ref_klass, field, sig, is_static); ++ + // check if field exists; i.e., if a klass containing the field def has been selected + if (sel_klass.is_null()){ + ResourceMark rm(THREAD); +@@ -622,7 +737,6 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo + } + + // check access +- KlassHandle ref_klass(THREAD, pool->pool_holder()); + check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK); + + // check for errors +@@ -634,7 +748,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo + } + + // Final fields can only be accessed from its own class. +- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { ++ if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()->klass_part()->active_version() && sel_klass() != pool->pool_holder()) { + THROW(vmSymbols::java_lang_IllegalAccessError()); + } + +@@ -839,7 +953,7 @@ void LinkResolver::resolve_virtual_call(CallInfo& result, Handle recv, KlassHand + bool check_access, bool check_null_and_abstract, TRAPS) { + methodHandle resolved_method; + linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); +- runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK); ++ runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, current_klass, check_null_and_abstract, CHECK); + } + + // throws linktime exceptions +@@ -869,6 +983,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + KlassHandle resolved_klass, + Handle recv, + KlassHandle recv_klass, ++ KlassHandle current_klass, + bool check_null_and_abstract, + TRAPS) { + +@@ -917,7 +1032,40 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + // recv_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + instanceKlass* inst = (instanceKlass*)recv_klass()->klass_part(); ++ ++ // (tw) The type of the virtual method call and the type of the receiver do not need to ++ // have anything in common, as the receiver type could've been hotswapped. ++ // Does not always work (method could be resolved with correct dynamic type and later ++ // be called at the same place with a wrong dynamic type). ++ // (tw) TODO: Need to handle the static type vs dynamic type issue more generally. ++ ++ // The vTable must be based on the view of the world of the resolved method ++ klassOop method_holder = resolved_method->method_holder(); ++ ++ if (method_holder->klass_part()->new_version() != NULL) { ++ // We are executing in old code ++ // FIXME: idubrov ++ //TRACE_RC2("Calling a method in old code"); ++ while (method_holder->klass_part()->revision_number() < inst->revision_number()) { ++ inst = (instanceKlass *)(inst->old_version()->klass_part()); ++ } ++ } ++ ++ if (inst->is_subtype_of(method_holder)) { + selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); ++ } else { ++ ++ tty->print_cr("Failure:"); ++ inst->as_klassOop()->print(); ++ inst->super()->print(); ++ juint off = inst->super_check_offset(); ++ klassOop sup = *(klassOop*)( (address)inst->as_klassOop() + off ); ++ sup->print(); ++ method_holder->print(); ++ ++ bool b = inst->is_subtype_of(method_holder); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), "(tw) A virtual method was called, but the type of the receiver is not related with the type of the class of the called method!"); ++ } + } + } + +diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp +index dfd74f9..cf6e44a 100644 +--- a/src/share/vm/interpreter/linkResolver.hpp ++++ b/src/share/vm/interpreter/linkResolver.hpp +@@ -110,7 +110,11 @@ class CallInfo: public LinkInfo { + // It does all necessary link-time checks & throws exceptions if necessary. + + class LinkResolver: AllStatic { +- private: ++private: ++ static void lookup_method (methodHandle& result, KlassHandle resolved_klass, Symbol* name, Symbol* signature, bool is_interface, KlassHandle current_klass, TRAPS); ++ static void lookup_correct_field (fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static); ++ static void lookup_correct_method (methodHandle& result, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* name, Symbol* signature, bool is_interface, TRAPS); ++ static void find_correct_resolved_klass (KlassHandle &resolved_klass, KlassHandle ¤t_klass); + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +@@ -133,7 +137,7 @@ class LinkResolver: AllStatic { + static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + + static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); +- static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); ++ static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, KlassHandle current_klass, bool check_null_and_abstract, TRAPS); + static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); + + static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); +diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp +index 17e9f26..e77500f 100644 +--- a/src/share/vm/interpreter/templateTable.hpp ++++ b/src/share/vm/interpreter/templateTable.hpp +@@ -329,8 +329,8 @@ class TemplateTable: AllStatic { + static void shouldnotreachhere(); + + // jvmti support +- static void jvmti_post_field_access(Register cache, Register index, bool is_static, bool has_tos); +- static void jvmti_post_field_mod(Register cache, Register index, bool is_static); ++ static void jvmti_post_field_access(Register cache, Register index, int byte_no, bool is_static, bool has_tos); ++ static void jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static); + static void jvmti_post_fast_field_mod(); + + // debugging of TemplateGenerator +diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp +index 76e18d8..6af7c14 100644 +--- a/src/share/vm/memory/genMarkSweep.cpp ++++ b/src/share/vm/memory/genMarkSweep.cpp +@@ -421,6 +421,7 @@ void GenMarkSweep::mark_sweep_phase4() { + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + Generation* pg = gch->perm_gen(); + +@@ -433,10 +434,14 @@ void GenMarkSweep::mark_sweep_phase4() { + + VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); + + VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); + ++ MarkSweep::copy_rescued_objects_back(); ++ + pg->post_compact(); // Shared spaces verification. + } +diff --git a/src/share/vm/memory/permGen.cpp b/src/share/vm/memory/permGen.cpp +index 350f583..59faad1 100644 +--- a/src/share/vm/memory/permGen.cpp ++++ b/src/share/vm/memory/permGen.cpp +@@ -57,7 +57,12 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { + + for (;;) { + { +- MutexLocker ml(Heap_lock); ++ // (tw) Only lock when not at a safepoint (necessary to use the split verifier from the VmThread) ++ Monitor *lock = Heap_lock; ++ if (SafepointSynchronize::is_at_safepoint()) { ++ lock = NULL; ++ } ++ MutexLockerEx ml(lock); + if ((obj = gen->allocate(size, false)) != NULL) { + return obj; + } +diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp +index f97bc34..9b20d08 100644 +--- a/src/share/vm/memory/space.cpp ++++ b/src/share/vm/memory/space.cpp +@@ -378,6 +378,31 @@ void CompactibleSpace::clear(bool mangle_space) { + _compaction_top = bottom(); + } + ++// (tw) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, ++CompactPoint* cp, HeapWord* compact_top) { ++ // First check if we should switch compaction space ++ assert(this == cp->space, "'this' should be current compaction space."); ++ size_t compaction_max_size = pointer_delta(end(), compact_top); ++ while (size > compaction_max_size) { ++ // switch to next compaction space ++ cp->space->set_compaction_top(compact_top); ++ cp->space = cp->space->next_compaction_space(); ++ if (cp->space == NULL) { ++ cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); ++ assert(cp->gen != NULL, "compaction must succeed"); ++ cp->space = cp->gen->first_compaction_space(); ++ assert(cp->space != NULL, "generation must have a first compaction space"); ++ } ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ compaction_max_size = pointer_delta(cp->space->end(), compact_top); ++ } ++ ++ return compact_top; ++} ++ + HeapWord* CompactibleSpace::forward(oop q, size_t size, + CompactPoint* cp, HeapWord* compact_top) { + // q is alive +@@ -401,7 +426,7 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size, + } + + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -449,7 +474,208 @@ void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ return; ++ } ++ ++ /* Compute the new addresses for the live objects and store it in the mark ++ * Used by universe::mark_sweep_phase2() ++ */ ++ HeapWord* compact_top; /* This is where we are currently compacting to. */ ++ ++ /* We're sure to be here before any objects are compacted into this ++ * space, so this is a good time to initialize this: ++ */ ++ set_compaction_top(bottom()); ++ ++ if (cp->space == NULL) { ++ assert(cp->gen != NULL, "need a generation"); ++ assert(cp->threshold == NULL, "just checking"); ++ assert(cp->gen->first_compaction_space() == this, "just checking"); ++ cp->space = cp->gen->first_compaction_space(); ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ } else { ++ compact_top = cp->space->compaction_top(); ++ } ++ ++ /* We allow some amount of garbage towards the bottom of the space, so ++ * we don't start compacting before there is a significant gain to be made. ++ * Occasionally, we want to ensure a full compaction, which is determined ++ * by the MarkSweepAlwaysCompactCount parameter. ++ */ ++ int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations; ++ bool skip_dead = (MarkSweepAlwaysCompactCount < 1) ++ ||((invocations % MarkSweepAlwaysCompactCount) != 0); ++ ++ size_t allowed_deadspace = 0; ++ if (skip_dead) { ++ int ratio = (int)allowed_dead_ratio(); ++ allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; ++ } ++ ++ HeapWord* q = bottom(); ++ HeapWord* t = end(); ++ ++ HeapWord* end_of_live= q; /* One byte beyond the last byte of the last ++ live object. */ ++ HeapWord* first_dead = end();/* The first dead object. */ ++ LiveRange* liveRange = NULL; /* The current live range, recorded in the ++ first header of preceding free area. */ ++ _first_dead = first_dead; ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ while (q < t) { ++ assert(!block_is_obj(q) || ++ oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || ++ oop(q)->mark()->has_bias_pattern(), ++ "these are the only valid states during a mark sweep"); ++ if (block_is_obj(q) && oop(q)->is_gc_marked()) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size(q); ++ ++ // DCEVM: begin ++ ////////////////////////////////////////////////////////////////////////// ++ size_t forward_size = size; ++ ++ // Compute the forward sizes and leave out objects whose position could ++ // possibly overlap other objects. ++ ++ // DCEVM: There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL && oop(q)->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward_compact_top(forward_size, cp, compact_top); ++ ++ bool rescueing = false; ++ if (rescueing = must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ // FIXME: idubrov ++ //TRACE_RC5("rescue obj %d klass=%s", MarkSweep::_rescued_oops->length(), oop(q)->klass()->klass_part()->name()->as_C_string()); ++ MarkSweep::_rescued_oops->append(oop(q)); ++ } else { ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ } ++ ++ if ((size != forward_size || rescueing) && q < first_dead) { ++ // (tw) This object moves => first_dead must be set to here! ++ first_dead = q; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ q += size; ++ end_of_live = q; ++ } else { ++ /* run over all the contiguous dead objects */ ++ HeapWord* end = q; ++ do { ++ /* prefetch beyond end */ ++ Prefetch::write(end, interval); ++ end += block_size(end); ++ } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked())); ++ ++ /* see if we might want to pretend this object is alive so that ++ * we don't have to compact quite as often. ++ */ ++ if (allowed_deadspace > 0 && q == compact_top) { ++ size_t sz = pointer_delta(end, q); ++ if (insert_deadspace(allowed_deadspace, q, sz)) { ++ compact_top = cp->space->forward(oop(q), sz, cp, compact_top); ++ q = end; ++ end_of_live = end; ++ continue; ++ } ++ } ++ ++ /* otherwise, it really is a free region. */ ++ ++ /* for the previous LiveRange, record the end of the live objects. */ ++ if (liveRange) { ++ liveRange->set_end(q); ++ } ++ ++ /* record the current LiveRange object. ++ * liveRange->start() is overlaid on the mark word. ++ */ ++ liveRange = (LiveRange*)q; ++ liveRange->set_start(end); ++ liveRange->set_end(end); ++ ++ /* see if this is the first dead region. */ ++ if (q < first_dead) { ++ first_dead = q; ++ } ++ ++ /* move on to the next object */ ++ q = end; ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////// ++ // Compute the forwarding addresses for the objects that need to be ++ // rescued. ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ // FIXME: idubrov ++ //TRACE_RC2("Calculating new forward sizes for %d objects!", MarkSweep::_rescued_oops->length()); ++ ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ oop q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size((HeapWord*)q); ++ ++ size_t forward_size = size; ++ ++ // (tw) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ assert(compact_top <= t, "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ ++ assert(q == t, "just checking"); ++ if (liveRange != NULL) { ++ liveRange->set_end(q); ++ } ++ _end_of_live = end_of_live; ++ if (end_of_live < first_dead) { ++ first_dead = end_of_live; ++ } ++ _first_dead = first_dead; ++ ++// FIXME: idubrov ++// if (_first_dead > top()) { ++// _first_dead = top(); ++// } ++// ++// if (_end_of_live > top()) { ++// _end_of_live = top(); ++// } ++ assert(_first_dead <= top(), "Must be smaller equal"); ++ assert(_end_of_live <= top(), "Must be smaller equal"); ++ ++ /* save the compaction_top of the compaction space. */ ++ cp->space->set_compaction_top(compact_top); + } + + void Space::adjust_pointers() { +@@ -490,17 +716,313 @@ void Space::adjust_pointers() { + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ if (heap->is_in_permanent(obj)) { ++ return -1; ++ } ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ ++ assert(is_in_reserved(old_obj), "old_obj must be in this space"); ++ ++ if (old_obj->is_perm()) { ++ // This object is in perm gen; check for invariant obj->klass() <= obj ++ if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ return true; ++ } ++ } ++ ++ int size = old_obj->size(); ++ int original_size = size; ++ if (oop(old_obj)->blueprint()->is_redefining()) { ++ assert(oop(old_obj)->blueprint()->old_version() != NULL, "must not be null"); ++ original_size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->old_version()->klass_part()); ++ } else if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->new_version()->klass_part()); ++ } ++ ++ bool normalComparison = (old_obj + original_size < new_obj + size); ++ ++ if (is_in_reserved(new_obj)) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ return normalComparison; ++ ++ } else { ++ ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else if (tenured_gen->is_in_reserved(old_obj)) { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert((normalComparison) == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ return normalComparison; ++ } ++ } ++} ++ ++oop CompactibleSpace::rescue(oop old_obj) { ++ assert(must_rescue(old_obj, old_obj->forwardee()), "do not call otherwise"); ++ ++ int size = old_obj->size(); ++ oop rescued_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)old_obj, (HeapWord*)rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { + return; // Nothing to do. + } +- +- SCAN_AND_ADJUST_POINTERS(adjust_obj_size); ++ /* adjust all the interior pointers to point at the new locations of objects ++ * Used by MarkSweep::mark_sweep_phase3() */ ++ ++ HeapWord* q = bottom(); ++ HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ ++ ++ assert(_first_dead <= _end_of_live, "Stands to reason, no?"); ++ ++ debug_only(HeapWord* prev_q = NULL); ++ debug_only(HeapWord* prev_prev_q = NULL); ++ debug_only(HeapWord* prev_prev_prev_q = NULL); ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ /* we have a chunk of the space which hasn't moved and we've ++ * reinitialized the mark word during the previous pass, so we can't ++ * use is_gc_marked for the traversal. */ ++ HeapWord* end = _first_dead; ++ ++ while (q < end) { ++ /* I originally tried to conjoin "block_start(q) == q" to the ++ * assertion below, but that doesn't work, because you can't ++ * accurately traverse previous objects to get to the current one ++ * after their pointers (including pointers into permGen) have been ++ * updated, until the actual compaction is done. dld, 4/00 */ ++ assert(block_is_obj(q), ++ "should be at block boundaries, and should be looking at objs"); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++// if (_first_dead == t) { ++// q = t; ++// } else { ++// /* $$$ This is funky. Using this to read the previously written ++// * LiveRange. See also use below. */ ++// q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); ++// } ++ } ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ debug_only(prev_q = NULL); ++ debug_only(prev_prev_q = NULL); ++ debug_only(prev_prev_prev_q = NULL); ++ while (q < t) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ if (oop(q)->is_gc_marked()) { ++ /* q is alive */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } else { ++ /* q is not a live object, so its mark should point at the next ++ * live object */ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } ++ } ++ ++ assert(q == t, "just checking"); + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size); ++ return; ++ } ++ ++ /* Copy all live objects to their new location ++ * Used by MarkSweep::mark_sweep_phase4() */ ++ ++ HeapWord* q = bottom(); ++ HeapWord* const t = _end_of_live; ++ debug_only(HeapWord* prev_q = NULL); ++ ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ debug_only( ++ /* we have a chunk of the space which hasn't moved and we've reinitialized ++ * the mark word during the previous pass, so we can't use is_gc_marked for ++ * the traversal. */ ++ HeapWord* const end = _first_dead; ++ ++ while (q < end) { ++ size_t size = obj_size(q); // FIXME: idubrov oop(q)->size(); ++ assert(!oop(q)->is_gc_marked(), ++ "should be unmarked (special dense prefix handling)"); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ) /* debug_only */ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++ //if (_first_dead == t) { ++ // q = t; ++ //} else { ++ ///* $$$ Funky */ ++ //q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); ++ //} ++ } ++ ++ const intx scan_interval = PrefetchScanIntervalInBytes; ++ const intx copy_interval = PrefetchCopyIntervalInBytes; ++ while (q < t) { ++ if (!oop(q)->is_gc_marked()) { ++ /* mark is pointer to next marked oop */ ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } else { ++ /* prefetch beyond q */ ++ Prefetch::read(q, scan_interval); ++ ++ /* size and destination */ ++ size_t size = obj_size(q); ++ HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); ++ ++ if (must_rescue(oop(q), oop(q)->forwardee())) { ++ oop dest_obj = rescue(oop(q)); ++ debug_only(Copy::fill_to_words(q, size, 0)); ++ } else { ++ ++ /* prefetch beyond compaction_top */ ++ Prefetch::write(compaction_top, copy_interval); ++ ++ /* copy object and reinit its mark */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, ++ compaction_top)); ++ assert(q != compaction_top || oop(q)->blueprint()->new_version() != NULL, "everything in this pass should be moving"); ++ ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); ++ } else { ++ Copy::aligned_conjoint_words(q, compaction_top, size); ++ } ++ oop(compaction_top)->init_mark(); ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); ++ } ++ ++ debug_only(prev_q = q); ++ q += size; ++ } ++ } ++ ++ /* Let's remember if we were empty before we did the compaction. */ ++ bool was_empty = used_region().is_empty(); ++ /* Reset space after compaction is complete */ ++ reset_after_compaction(); ++ /* We do this clear, below, since it has overloaded meanings for some */ ++ /* space subtypes. For example, OffsetTableContigSpace's that were */ ++ /* compacted into will have had their offset table thresholds updated */ ++ /* continuously, but those that weren't need to have their thresholds */ ++ /* re-initialized. Also mangles unused area for debugging. */ ++ if (used_region().is_empty()) { ++ if (!was_empty) clear(SpaceDecorator::Mangle); ++ } else { ++ if (ZapUnusedHeapArea) mangle_unused_area(); ++ } ++ ++ //SCAN_AND_COMPACT(obj_size); + } + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp +index ef2f2c6..b54d470 100644 +--- a/src/share/vm/memory/space.hpp ++++ b/src/share/vm/memory/space.hpp +@@ -445,6 +445,9 @@ public: + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ oop rescue(oop old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -475,6 +478,10 @@ public: + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); + ++ // (tw) ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } + +diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp +index 8ce17d9..4c1ba52 100644 +--- a/src/share/vm/memory/universe.cpp ++++ b/src/share/vm/memory/universe.cpp +@@ -100,6 +100,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + klassOop Universe::_boolArrayKlassObj = NULL; + klassOop Universe::_byteArrayKlassObj = NULL; +@@ -204,6 +206,38 @@ void Universe::system_classes_do(void f(klassOop)) { + f(systemObjArrayKlassObj()); + } + ++// DCEVM: This method should iterate all pointers that are not within heap objects. ++void Universe::root_oops_do(OopClosure *oopClosure) { ++ ++ class AlwaysTrueClosure: public BoolObjectClosure { ++ public: ++ void do_object(oop p) { ShouldNotReachHere(); } ++ bool do_object_b(oop p) { return true; } ++ }; ++ AlwaysTrueClosure always_true; ++ ++ // General strong roots ++ Universe::oops_do(oopClosure); ++ JNIHandles::oops_do(oopClosure); ++ Threads::oops_do(oopClosure, NULL); ++ ObjectSynchronizer::oops_do(oopClosure); ++ FlatProfiler::oops_do(oopClosure); ++ //Management::oops_do(oopClosure); // DCEVM: TODO: Check if this is correct? ++ JvmtiExport::oops_do(oopClosure); ++ // SO_AllClasses ++ SystemDictionary::oops_do(oopClosure); ++ ++ // Now adjust pointers in remaining weak roots. (All of which should ++ // have been cleared if they pointed to non-surviving objects.) ++ // Global (weak) JNI handles ++ JNIHandles::weak_oops_do(&always_true, oopClosure); ++ ++ CodeCache::oops_do(oopClosure); ++ StringTable::oops_do(oopClosure); ++ //ref_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++ //PSScavenge::reference_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++} ++ + void Universe::oops_do(OopClosure* f, bool do_all) { + + f->do_oop((oop*) &_int_mirror); +diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp +index da21a8b..676675e 100644 +--- a/src/share/vm/memory/universe.hpp ++++ b/src/share/vm/memory/universe.hpp +@@ -127,6 +127,8 @@ class Universe: AllStatic { + friend class SystemDictionary; + friend class VMStructs; + friend class CompactingPermGenGen; ++ friend class Space; ++ friend class ContiguousSpace; + friend class VM_PopulateDumpSharedSpace; + + friend jint universe_init(); +@@ -258,7 +260,18 @@ class Universe: AllStatic { + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { ++ return _is_redefining_gc_run; ++ } ++ ++ static void set_redefining_gc_run(bool b) { ++ _is_redefining_gc_run = b; ++ } ++ + // Known classes in the VM + static klassOop boolArrayKlassObj() { return _boolArrayKlassObj; } + static klassOop byteArrayKlassObj() { return _byteArrayKlassObj; } +@@ -403,6 +416,8 @@ class Universe: AllStatic { + + // Iteration + ++ static void root_oops_do(OopClosure *f); ++ + // Apply "f" to the addresses of all the direct heap pointers maintained + // as static fields of "Universe". + static void oops_do(OopClosure* f, bool do_all = false); +@@ -419,6 +434,7 @@ class Universe: AllStatic { + + // Debugging + static bool verify_in_progress() { return _verify_in_progress; } ++ static void set_verify_in_progress(bool b) { _verify_in_progress = b; } + static void verify(bool silent, VerifyOption option); + static void verify(bool silent) { + verify(silent, VerifyOption_Default /* option */); +diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp +index 4aa1155..2738ea9 100644 +--- a/src/share/vm/oops/arrayKlass.cpp ++++ b/src/share/vm/oops/arrayKlass.cpp +@@ -129,9 +129,9 @@ objArrayOop arrayKlass::compute_secondary_supers(int num_extra_slots, TRAPS) { + + bool arrayKlass::compute_is_subtype_of(klassOop k) { + // An array is a subtype of Serializable, Clonable, and Object +- return k == SystemDictionary::Object_klass() +- || k == SystemDictionary::Cloneable_klass() +- || k == SystemDictionary::Serializable_klass(); ++ return k->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Cloneable_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Serializable_klass()->klass_part()->newest_version(); + } + + +diff --git a/src/share/vm/oops/constMethodKlass.cpp b/src/share/vm/oops/constMethodKlass.cpp +index e74811f..be3fe7d 100644 +--- a/src/share/vm/oops/constMethodKlass.cpp ++++ b/src/share/vm/oops/constMethodKlass.cpp +@@ -102,6 +102,7 @@ void constMethodKlass::oop_follow_contents(oop obj) { + constMethodOop cm = constMethodOop(obj); + MarkSweep::mark_and_push(cm->adr_constants()); + MarkSweep::mark_and_push(cm->adr_stackmap_data()); ++ MarkSweep::mark_and_push(cm->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -113,6 +114,7 @@ void constMethodKlass::oop_follow_contents(ParCompactionManager* cm, + constMethodOop cm_oop = constMethodOop(obj); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_constants()); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data()); ++ PSParallelCompact::mark_and_push(cm, cm_oop->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -123,6 +125,7 @@ int constMethodKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + constMethodOop cm = constMethodOop(obj); + blk->do_oop(cm->adr_constants()); + blk->do_oop(cm->adr_stackmap_data()); ++ blk->do_oop(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -138,6 +141,8 @@ int constMethodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) + if (mr.contains(adr)) blk->do_oop(adr); + adr = cm->adr_stackmap_data(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = cm->adr_code_section_table(); ++ if (mr.contains(adr)) blk->do_oop(adr); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -152,6 +157,7 @@ int constMethodKlass::oop_adjust_pointers(oop obj) { + constMethodOop cm = constMethodOop(obj); + MarkSweep::adjust_pointer(cm->adr_constants()); + MarkSweep::adjust_pointer(cm->adr_stackmap_data()); ++ MarkSweep::adjust_pointer(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +diff --git a/src/share/vm/oops/constMethodOop.hpp b/src/share/vm/oops/constMethodOop.hpp +index 549192b..5cea5c4 100644 +--- a/src/share/vm/oops/constMethodOop.hpp ++++ b/src/share/vm/oops/constMethodOop.hpp +@@ -129,7 +129,7 @@ private: + + public: + oop* oop_block_beg() const { return adr_constants(); } +- oop* oop_block_end() const { return adr_stackmap_data() + 1; } ++ oop* oop_block_end() const { return adr_code_section_table() + 1; } + + private: + // +@@ -141,6 +141,9 @@ private: + // Raw stackmap data for the method + typeArrayOop _stackmap_data; + ++ // (tw) Table mapping code sections for method forward points. ++ typeArrayOop _code_section_table; ++ + // + // End of the oop block. + // +@@ -195,6 +198,28 @@ public: + } + bool has_stackmap_table() const { return _stackmap_data != NULL; } + ++ // code section table ++ typeArrayOop code_section_table() const { return _code_section_table; } ++ void set_code_section_table(typeArrayOop e) { oop_store_without_check((oop*) &_code_section_table, (oop) e); } ++ bool has_code_section_table() const { return code_section_table() != NULL && code_section_table()->length() > 0; } ++ static const int ValuesPerCodeSectionEntry = 3; ++ int code_section_entries() const { ++ if (!has_code_section_table()) return 0; ++ return _code_section_table->length() / ValuesPerCodeSectionEntry; ++ } ++ ++ int code_section_new_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry); ++ } ++ ++ int code_section_original_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 1); ++ } ++ ++ int code_section_length_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 2); ++ } ++ + void init_fingerprint() { + const uint64_t initval = CONST64(0x8000000000000000); + _fingerprint = initval; +@@ -301,6 +326,7 @@ public: + // Garbage collection support + oop* adr_constants() const { return (oop*)&_constants; } + oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } ++ oop* adr_code_section_table() const { return (oop*)&_code_section_table; } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } + +diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp +index ad62921..f39f202 100644 +--- a/src/share/vm/oops/cpCacheOop.cpp ++++ b/src/share/vm/oops/cpCacheOop.cpp +@@ -37,9 +37,15 @@ + + // Implememtation of ConstantPoolCacheEntry + ++void ConstantPoolCacheEntry::copy_from(ConstantPoolCacheEntry *other) { ++ _flags = other->_flags; // flags ++} ++ + void ConstantPoolCacheEntry::initialize_entry(int index) { + assert(0 < index && index < 0x10000, "sanity check"); + _indices = index; ++ _f1 = NULL; ++ _f2 = 0; + assert(constant_pool_index() == index, ""); + } + +@@ -162,7 +168,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + int vtable_index) { + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); +- assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); ++ // (tw) No longer valid assert ++ //assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); + + int byte_no = -1; + bool change_to_virtual = false; +@@ -183,6 +190,7 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + set_method_flags(as_TosState(method->result_type()), + ( 1 << is_vfinal_shift) | + ((method->is_final_method() ? 1 : 0) << is_final_shift) | ++ ((method->is_old() ? 1 : 0) << is_old_method_shift) | + ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), + method()->size_of_parameters()); + set_f2_as_vfinal_method(method()); +@@ -190,9 +198,13 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + assert(vtable_index >= 0, "valid index"); + assert(!method->is_final_method(), "sanity"); + set_method_flags(as_TosState(method->result_type()), ++ ((method->is_old() ? 1 : 0) << is_old_method_shift) | + ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), + method()->size_of_parameters()); + set_f2(vtable_index); ++ ++ // (tw) save method holder in f1 for virtual calls ++ set_f1(method()); + } + byte_no = 2; + break; +@@ -206,7 +218,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. + set_method_flags(as_TosState(method->result_type()), + ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | +- ((method->is_final_method() ? 1 : 0) << is_final_shift), ++ ((method->is_final_method() ? 1 : 0) << is_final_shift) | ++ ((method->is_old() ? 1 : 0) << is_old_method_shift), + method()->size_of_parameters()); + set_f1(method()); + byte_no = 1; +@@ -259,7 +272,7 @@ void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) + set_f1(interf); + set_f2(index); + set_method_flags(as_TosState(method->result_type()), +- 0, // no option bits ++ ((method->is_old() ? 1 : 0) << is_old_method_shift), + method()->size_of_parameters()); + set_bytecode_1(Bytecodes::_invokeinterface); + } +@@ -520,27 +533,12 @@ void ConstantPoolCacheEntry::update_pointers() { + // If this constantPoolCacheEntry refers to old_method then update it + // to refer to new_method. + bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, +- methodOop new_method, bool * trace_name_printed) { ++ methodOop new_method) { + + if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- if (f2_as_vfinal_method() == old_method) { +- // match old_method so need an update +- // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values +- _f2 = (intptr_t)new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- ++ // virtual and final so f2() contains method ptr instead of vtable index ++ if (f2_as_vfinal_method() != NULL && f2_as_vfinal_method()->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); + return true; + } + +@@ -548,84 +546,27 @@ bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, + return false; + } + +- if ((oop)_f1 == NULL) { +- // NULL f1() means this is a virtual entry so bail out +- // We are assuming that the vtable index does not need change. ++ // (tw) check how to update interface methods! ++ if (bytecode_1() == Bytecodes::_invokevirtual || bytecode_2() == Bytecodes::_invokevirtual) { ++ ++ if(f1_as_method()->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); ++ return true; ++ } ++ + return false; + } + + if ((oop)_f1 == old_method) { + _f1 = new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- + return true; ++ } else if(_f1 != NULL && (bytecode_1() != Bytecodes::_invokeinterface && f1_as_method()->method_holder()->klass_part()->new_version())) { ++ initialize_entry(constant_pool_index()); + } + + return false; + } + +-// a constant pool cache entry should never contain old or obsolete methods +-bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- methodOop m = (methodOop)_f2; +- // Return false if _f2 refers to an old or an obsolete method. +- // _f2 == NULL || !m->is_method() are just as unexpected here. +- return (m != NULL && m->is_method() && !m->is_old() && !m->is_obsolete()); +- } else if ((oop)_f1 == NULL || !((oop)_f1)->is_method()) { +- // _f1 == NULL || !_f1->is_method() are OK here +- return true; +- } +- +- methodOop m = (methodOop)_f1; +- // return false if _f1 refers to an old or an obsolete method +- return (!m->is_old() && !m->is_obsolete()); +-} +- +-bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { +- if (!is_method_entry()) { +- // not a method entry so not interesting by default +- return false; +- } +- +- methodOop m = NULL; +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- m = f2_as_vfinal_method(); +- } else if (is_f1_null()) { +- // NULL _f1 means this is a virtual entry so also not interesting +- return false; +- } else { +- oop f1 = _f1; // _f1 is volatile +- if (!f1->is_method()) { +- // _f1 can also contain a klassOop for an interface +- return false; +- } +- m = f1_as_method(); +- } +- +- assert(m != NULL && m->is_method(), "sanity check"); +- if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) { +- // robustness for above sanity checks or method is not in +- // the interesting class +- return false; +- } +- +- // the method is in the interesting class so the entry is interesting +- return true; +-} +- + void ConstantPoolCacheEntry::print(outputStream* st, int index) const { + // print separator + if (index == 0) st->print_cr(" -------------"); +@@ -663,60 +604,18 @@ void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { + } + } + +-// RedefineClasses() API support: +-// If any entry of this constantPoolCache points to any of +-// old_methods, replace it with the corresponding new_method. +-void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed) { +- +- if (methods_length == 0) { +- // nothing to do if there are no methods +- return; +- } +- +- // get shorthand for the interesting class +- klassOop old_holder = old_methods[0]->method_holder(); ++void constantPoolCacheOopDesc::adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length) { + + for (int i = 0; i < length(); i++) { +- if (!entry_at(i)->is_interesting_method_entry(old_holder)) { +- // skip uninteresting methods +- continue; +- } +- +- // The constantPoolCache contains entries for several different +- // things, but we only care about methods. In fact, we only care +- // about methods in the same class as the one that contains the +- // old_methods. At this point, we have an interesting entry. +- +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; +- methodOop new_method = new_methods[j]; +- +- if (entry_at(i)->adjust_method_entry(old_method, new_method, +- trace_name_printed)) { +- // current old_method matched this entry and we updated it so +- // break out and get to the next interesting entry if there one +- break; +- } ++ if (entry_at(i)->is_field_entry()) { ++ // (tw) TODO: Update only field offsets and modify only constant pool entries that ++ // point to changed fields ++ entry_at(i)->initialize_entry(entry_at(i)->constant_pool_index()); ++ } else if(entry_at(i)->is_method_entry()) { ++ entry_at(i)->adjust_method_entry(NULL, NULL); + } + } + } + +-// the constant pool cache should never contain old or obsolete methods +-bool constantPoolCacheOopDesc::check_no_old_or_obsolete_entries() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL) && +- !entry_at(i)->check_no_old_or_obsolete_entries()) { +- return false; +- } +- } +- return true; +-} + +-void constantPoolCacheOopDesc::dump_cache() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL)) { +- entry_at(i)->print(tty, i); +- } +- } +-} +diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp +index ef26775..6f37d81 100644 +--- a/src/share/vm/oops/cpCacheOop.hpp ++++ b/src/share/vm/oops/cpCacheOop.hpp +@@ -136,7 +136,8 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + void set_bytecode_2(Bytecodes::Code code); + void set_f1(oop f1) { + oop existing_f1 = _f1; // read once +- assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); ++ // (tw) need to relax assertion for redefinition ++ // assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); + oop_store(&_f1, f1); + } + void release_set_f1(oop f1); +@@ -167,6 +168,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + tos_state_mask = right_n_bits(tos_state_bits), + tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below + // misc. option bits; can be any bit position in [16..27] ++ is_old_method_shift = 19, + is_vfinal_shift = 20, + is_volatile_shift = 21, + is_final_shift = 22, +@@ -200,6 +202,8 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + void initialize_entry(int original_index); // initialize primary entry + void initialize_secondary_entry(int main_index); // initialize secondary entry + ++ void copy_from(ConstantPoolCacheEntry *other); ++ + void set_field( // sets entry to resolved field state + Bytecodes::Code get_code, // the bytecode used for reading the field + Bytecodes::Code put_code, // the bytecode used for writing the field +@@ -361,10 +365,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + // trace_name_printed is set to true if the current call has + // printed the klass name so that other routines in the adjust_* + // group don't print the klass name. +- bool adjust_method_entry(methodOop old_method, methodOop new_method, +- bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- bool is_interesting_method_entry(klassOop k); ++ bool adjust_method_entry(methodOop old_method, methodOop new_method); + + // Debugging & Printing + void print (outputStream* st, int index) const; +@@ -485,16 +486,9 @@ class constantPoolCacheOopDesc: public oopDesc { + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); + } + +- // RedefineClasses() API support: +- // If any entry of this constantPoolCache points to any of +- // old_methods, replace it with the corresponding new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- void adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- void dump_cache(); ++ // (tw) Update method and field references ++ void adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length); + }; + + #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP +diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp +index cd3dce0..2a0a2aa 100644 +--- a/src/share/vm/oops/instanceKlass.cpp ++++ b/src/share/vm/oops/instanceKlass.cpp +@@ -250,12 +250,118 @@ void instanceKlass::initialize(TRAPS) { + } + + ++void instanceKlass::initialize_redefined_class() { ++ RC_TRACE(0x00000400, ("initializing redefined class %s", ++ name()->as_C_string())); ++ ++ assert(!is_initialized(), ""); ++ assert(this->old_version() != NULL, ""); ++ assert(is_linked(), "must be linked before"); ++ ++ ++ instanceKlassHandle this_oop(Thread::current(), this->as_klassOop()); ++ class UpdateStaticFieldClosure : public FieldClosure { ++ ++ private: ++ instanceKlassHandle this_oop; ++ ++ public: ++ UpdateStaticFieldClosure(instanceKlassHandle this_oop) { ++ this->this_oop = this_oop; ++ } ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ fieldDescriptor result; ++ bool found = ((instanceKlass *)(this_oop->old_version()->klass_part()))->find_local_field(fd->name(), fd->signature(), &result); ++ ++ if (found && result.is_static()) { ++ int old_offset = result.offset(); ++ assert(result.field_type() == fd->field_type(), "Old and new field type does not match"); ++ ++ oop new_location = this_oop()->java_mirror(); ++ oop old_location = this_oop->old_version()->java_mirror(); ++ int offset = fd->offset(); ++ RC_TRACE(0x00000400, ("Copying static field value for field '%s' old_offset=%d new_offset=%d", ++ fd->name()->as_C_string(), old_offset, offset)); ++ ++ oop cur_oop; ++ ++ switch(result.field_type()) { ++ ++ // Found static field with same name and type in the old klass => copy value from old to new klass ++ ++ case T_BOOLEAN: ++ new_location->bool_field_put(offset, old_location->bool_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_CHAR: ++ new_location->char_field_put(offset, old_location->char_field(old_offset)); ++ DEBUG_ONLY(old_location->char_field_put(old_offset, 0)); ++ break; ++ ++ case T_FLOAT: ++ new_location->float_field_put(offset, old_location->float_field(old_offset)); ++ DEBUG_ONLY(old_location->float_field_put(old_offset, 0)); ++ break; ++ ++ case T_DOUBLE: ++ new_location->double_field_put(offset, old_location->double_field(old_offset)); ++ DEBUG_ONLY(old_location->double_field_put(old_offset, 0)); ++ break; ++ ++ case T_BYTE: ++ new_location->byte_field_put(offset, old_location->byte_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_SHORT: ++ new_location->short_field_put(offset, old_location->short_field(old_offset)); ++ DEBUG_ONLY(old_location->short_field_put(old_offset, 0)); ++ break; ++ ++ case T_INT: ++ new_location->int_field_put(offset, old_location->int_field(old_offset)); ++ DEBUG_ONLY(old_location->int_field_put(old_offset, 0)); ++ break; ++ ++ case T_LONG: ++ new_location->long_field_put(offset, old_location->long_field(old_offset)); ++ DEBUG_ONLY(old_location->long_field_put(old_offset, 0)); ++ break; ++ ++ case T_OBJECT: ++ case T_ARRAY: ++ cur_oop = old_location->obj_field(old_offset); ++ new_location->obj_field_put_raw(offset, cur_oop); ++ old_location->obj_field_put_raw(old_offset, NULL); ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ } else { ++ RC_TRACE(0x00000200, ("New static field %s has_initial_value=%d", ++ fd->name()->as_C_string(), (int)(fd->has_initial_value()))); ++ // field not found ++ // (tw) TODO: Probably this call is not necessary here! ++ // FIXME: idubrov ++ //ClassFileParser::initialize_static_field(fd, Thread::current()); ++ } ++ } ++ }; ++ ++ UpdateStaticFieldClosure cl(this_oop); ++ this->do_local_static_fields(&cl); ++} ++ ++ + bool instanceKlass::verify_code( + instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { + // 1) Verify the bytecodes + Verifier::Mode mode = + throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; +- return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); ++ return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), true, CHECK_false); + } + + +@@ -362,7 +468,13 @@ bool instanceKlass::link_class_impl( + jt->get_thread_stat()->perf_recursion_counts_addr(), + jt->get_thread_stat()->perf_timers_addr(), + PerfClassTraceTime::CLASS_VERIFY); +- bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(true); ++ } ++ bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(false); ++ } + if (!verify_ok) { + return false; + } +@@ -400,7 +512,8 @@ bool instanceKlass::link_class_impl( + } + #endif + this_oop->set_init_state(linked); +- if (JvmtiExport::should_post_class_prepare()) { ++ // (tw) Must check for old version in order to prevent infinite loops. ++ if (JvmtiExport::should_post_class_prepare() && this_oop->old_version() == NULL /* JVMTI deadlock otherwise */) { + Thread *thread = THREAD; + assert(thread->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); +@@ -673,6 +786,18 @@ bool instanceKlass::implements_interface(klassOop k) const { + return false; + } + ++bool instanceKlass::implements_interface_any_version(klassOop k) const { ++ k = k->klass_part()->newest_version(); ++ if (this->newest_version() == k) return true; ++ assert(Klass::cast(k)->is_interface(), "should be an interface class"); ++ for (int i = 0; i < transitive_interfaces()->length(); i++) { ++ if (((klassOop)transitive_interfaces()->obj_at(i))->klass_part()->newest_version() == k) { ++ return true; ++ } ++ } ++ return false; ++} ++ + objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { + if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + if (length > arrayOopDesc::max_array_length(T_OBJECT)) { +@@ -801,7 +926,25 @@ methodOop instanceKlass::class_initializer() { + } + + void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { ++ ++ ResourceMark rm(THREAD); + methodHandle h_method(THREAD, this_oop->class_initializer()); ++ ++ if (this_oop->revision_number() != -1){ ++ methodOop m = NULL; ++ if (AllowAdvancedClassRedefinition) { ++ m = this_oop->find_method(vmSymbols::static_transformer_name(), vmSymbols::void_method_signature()); ++ } ++ methodHandle method(m); ++ if (method() != NULL && method()->is_static()) { ++ RC_TRACE(0x00000200, ("Calling static transformer instead of static initializer")); ++ h_method = method; ++ } else if (!((instanceKlass*)this_oop->old_version()->klass_part())->is_not_initialized()) { ++ // Only execute the static initializer, if it was not yet executed for the old version of the class. ++ return; ++ } ++ } ++ + assert(!this_oop->is_initialized(), "we cannot initialize twice"); + if (TraceClassInitialization) { + tty->print("%d Initializing ", call_class_initializer_impl_counter++); +@@ -949,6 +1092,137 @@ void instanceKlass::methods_do(void f(methodOop method)) { + } + } + ++void instanceKlass::store_update_information(GrowableArray<int> &values) { ++ int *arr = NEW_C_HEAP_ARRAY(int, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_update_information(arr); ++} ++ ++void instanceKlass::clear_update_information() { ++ FREE_C_HEAP_ARRAY(int, update_information(), mtClass); ++ set_update_information(NULL); ++} ++ ++typedef Pair<int, klassOop> typeInfoPair; ++ ++void instanceKlass::store_type_check_information(GrowableArray< Pair<int, klassOop> > &values) { ++ Pair<int, klassOop> *arr = NEW_C_HEAP_ARRAY(typeInfoPair, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_type_check_information(arr); ++} ++ ++void instanceKlass::clear_type_check_information() { ++ FREE_C_HEAP_ARRAY(typeInfoPair, type_check_information(), mtClass); ++ set_type_check_information(NULL); ++} ++ ++void instanceKlass::do_fields_evolution(FieldEvolutionClosure* cl) { ++ ++ assert (old_version() != NULL, "must have old version!"); ++ ++ klassOop old_klass_oop = old_version(); ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = this; ++ ++ fieldDescriptor fd; ++ fieldDescriptor old_fd; ++ ++ instanceKlass *cur_new_klass = new_klass; ++ klassOop cur_new_klass_oop = this->as_klassOop(); ++ ++ if (_fields_not_changed) { ++ ++ class MyFieldClosure : public FieldClosure { ++ ++ FieldEvolutionClosure *_cl; ++ public: ++ MyFieldClosure(FieldEvolutionClosure *cl) {_cl = cl; } ++ virtual void do_field(fieldDescriptor* fd) { ++ _cl->do_changed_field(fd, fd); ++ } ++ }; ++ ++ MyFieldClosure mfc(cl); ++ do_nonstatic_fields(&mfc); ++ } else { ++ ++ _fields_not_changed = true; ++ GrowableArray<fieldDescriptor> fds; ++ while (true) { ++ for (JavaFieldStream fs(cur_new_klass); !fs.done(); fs.next()) { ++ fd.initialize(cur_new_klass_oop, fs.index()); ++ if (fd.is_static()) { ++ continue; ++ } ++ fds.append(fd); ++ } ++ ++ if (cur_new_klass->super() != NULL) { ++ cur_new_klass_oop = cur_new_klass->super(); ++ cur_new_klass = instanceKlass::cast(cur_new_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ GrowableArray<fieldDescriptor> sortedFds; ++ while (fds.length() > 0) { ++ int minOffset = 0x7fffffff; ++ int minIndex = -1; ++ for (int i=0; i<fds.length(); i++) { ++ int curOffset = fds.adr_at(i)->offset(); ++ if (curOffset < minOffset) { ++ minOffset = curOffset; ++ minIndex = i; ++ } ++ } ++ ++ sortedFds.append(fds.at(minIndex)); ++ fds.remove_at(minIndex); ++ } ++ ++ ++ for (int i=0; i<sortedFds.length(); i++) { ++ fieldDescriptor &fd = *sortedFds.adr_at(i); ++ ++ char found = 0; ++ instanceKlass *cur_old_klass = old_klass; ++ klassOop cur_old_klass_oop = old_klass_oop; ++ while (true) { ++ for (JavaFieldStream fs(cur_old_klass); !fs.done(); fs.next()) { ++ old_fd.initialize(cur_old_klass_oop, fs.index()); ++ if (old_fd.is_static()) { ++ continue; ++ } ++ if (old_fd.name() == fd.name() && old_fd.signature() == fd.signature()) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found && cur_old_klass->super()) { ++ cur_old_klass_oop = cur_old_klass->super(); ++ cur_old_klass = instanceKlass::cast(cur_old_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ if (found) { ++ if (old_fd.offset() != fd.offset()) { ++ _fields_not_changed = false; ++ } ++ cl->do_changed_field(&old_fd, &fd); ++ } else { ++ _fields_not_changed = false; ++ cl->do_new_field(&fd); ++ } ++ } ++ } ++} + + void instanceKlass::do_local_static_fields(FieldClosure* cl) { + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { +@@ -1368,6 +1642,20 @@ jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { + return id; + } + ++bool instanceKlass::update_jmethod_id(methodOop method, jmethodID newMethodID) { ++ size_t idnum = (size_t)method->method_idnum(); ++ jmethodID* jmeths = methods_jmethod_ids_acquire(); ++ size_t length; // length assigned as debugging crumb ++ jmethodID id = NULL; ++ if (jmeths != NULL && // If there is a cache ++ (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, ++ jmeths[idnum+1] = newMethodID; // Set the id (may be NULL) ++ return true; ++ } ++ ++ return false; ++} ++ + + // Cache an itable index + void instanceKlass::set_cached_itable_index(size_t idnum, int index) { +@@ -1527,6 +1815,13 @@ void instanceKlass::remove_dependent_nmethod(nmethod* nm) { + last = b; + b = b->next(); + } ++ ++ // (tw) Hack as dependencies get wrong version of klassOop ++ if(this->old_version() != NULL) { ++ ((instanceKlass *)this->old_version()->klass_part())->remove_dependent_nmethod(nm); ++ return; ++ } ++ + #ifdef ASSERT + tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); + nm->print(); +@@ -2382,6 +2677,9 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) { + klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); + st->print(BULLET"fake entry for mirror: "); + mirrored_klass->print_value_on(st); ++ if (mirrored_klass != NULL) { ++ st->print_cr("revision: %d (oldest=%d, newest=%d)", mirrored_klass->klass_part()->revision_number(), mirrored_klass->klass_part()->oldest_version()->klass_part()->revision_number(), mirrored_klass->klass_part()->newest_version()->klass_part()->revision_number()); ++ } + st->cr(); + st->print(BULLET"fake entry resolved_constructor: "); + methodOop ctor = java_lang_Class::resolved_constructor(obj); +diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp +index 8a849cb..f41f13c 100644 +--- a/src/share/vm/oops/instanceKlass.hpp ++++ b/src/share/vm/oops/instanceKlass.hpp +@@ -101,6 +101,22 @@ public: + virtual void do_field(fieldDescriptor* fd) = 0; + }; + ++// (tw) Iterates over the fields of the old and new class ++class FieldEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_field(fieldDescriptor* fd) = 0; ++ virtual void do_old_field(fieldDescriptor* fd) = 0; ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd) = 0; ++}; ++ ++// (tw) Iterates over the methods of the old and new class ++class MethodEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_method(methodOop oop) = 0; ++ virtual void do_old_method(methodOop oop) = 0; ++ virtual void do_changed_method(methodOop oldOop, methodOop newOop) = 0; ++}; ++ + #ifndef PRODUCT + // Print fields. + // If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields. +@@ -285,6 +301,11 @@ class instanceKlass: public Klass { + // _idnum_allocated_count. + u1 _init_state; // state of class + ++ // (tw) Field that allows for a short-path when calculating updated fields for the second time and ++ // no fields changed. Testing performance impact with this, can be removed later when the update ++ // information is cached. ++ bool _fields_not_changed; ++ + u1 _reference_type; // reference type + + // embedded Java vtable follows here +@@ -452,6 +473,7 @@ class instanceKlass: public Klass { + // initialization (virtuals from Klass) + bool should_be_initialized() const; // means that initialize should be called + void initialize(TRAPS); ++ void initialize_redefined_class(); + void link_class(TRAPS); + bool link_class_or_fail(TRAPS); // returns false on failure + void unlink_class(); +@@ -629,6 +651,7 @@ class instanceKlass: public Klass { + static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum, + size_t *length_p, jmethodID* id_p); + jmethodID jmethod_id_or_null(methodOop method); ++ bool update_jmethod_id(methodOop method, jmethodID newMethodID); + + // cached itable index support + void set_cached_itable_index(size_t idnum, int index); +@@ -711,6 +734,7 @@ class instanceKlass: public Klass { + + // subclass/subinterface checks + bool implements_interface(klassOop k) const; ++ bool implements_interface_any_version(klassOop k) const; + + // Access to the implementor of an interface. + klassOop implementor() const +@@ -760,6 +784,12 @@ class instanceKlass: public Klass { + void do_local_static_fields(FieldClosure* cl); + void do_nonstatic_fields(FieldClosure* cl); // including inherited fields + void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS); ++ void do_fields_evolution(FieldEvolutionClosure *cl); ++ void store_update_information(GrowableArray<int> &values); ++ void clear_update_information(); ++ void store_type_check_information(GrowableArray< Pair<int, klassOop> > &values); ++ void clear_type_check_information(); ++ + + void methods_do(void f(methodOop method)); + void array_klasses_do(void f(klassOop k)); +diff --git a/src/share/vm/oops/instanceKlassKlass.cpp b/src/share/vm/oops/instanceKlassKlass.cpp +index 8e7dc12..63d6dc4 100644 +--- a/src/share/vm/oops/instanceKlassKlass.cpp ++++ b/src/share/vm/oops/instanceKlassKlass.cpp +@@ -480,6 +480,28 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { + instanceKlass* ik = instanceKlass::cast(klassOop(obj)); + klassKlass::oop_print_on(obj, st); + ++ // (tw) Output revision number and revision numbers of older / newer and oldest / newest version of this class. ++ ++ st->print(BULLET"revision: %d", ik->revision_number()); ++ ++ if (ik->new_version() != NULL) { ++ st->print(" (newer=%d)", ik->new_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->newest_version() != ik->new_version() && ik->newest_version() != obj) { ++ st->print(" (newest=%d)", ik->newest_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->old_version() != NULL) { ++ st->print(" (old=%d)", ik->old_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->oldest_version() != ik->old_version() && ik->oldest_version() != obj) { ++ st->print(" (oldest=%d)", ik->oldest_version()->klass_part()->revision_number()); ++ } ++ ++ st->cr(); ++ + st->print(BULLET"instance size: %d", ik->size_helper()); st->cr(); + st->print(BULLET"klass size: %d", ik->object_size()); st->cr(); + st->print(BULLET"access: "); ik->access_flags().print_on(st); st->cr(); +@@ -663,7 +685,7 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) { + } + guarantee(sib->as_klassOop()->is_klass(), "should be klass"); + guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); +- guarantee(sib->super() == super, "siblings should have same superklass"); ++ guarantee(sib->super() == super || super->klass_part()->newest_version() == SystemDictionary::Object_klass(), "siblings should have same superklass"); + sib = sib->next_sibling(); + } + +diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp +index 7db4f03..1171487 100644 +--- a/src/share/vm/oops/instanceRefKlass.cpp ++++ b/src/share/vm/oops/instanceRefKlass.cpp +@@ -455,10 +455,13 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) { + instanceKlass* ik = instanceKlass::cast(k); + + // Check that we have the right class +- debug_only(static bool first_time = true); +- assert(k == SystemDictionary::Reference_klass() && first_time, +- "Invalid update of maps"); +- debug_only(first_time = false); ++ ++ // (tw) Asserts no longer valid for class redefinition ++ // debug_only(static bool first_time = true); ++ ++ //assert(k == SystemDictionary::Reference_klass() && first_time, ++ // "Invalid update of maps"); ++ //debug_only(first_time = false); + assert(ik->nonstatic_oop_map_count() == 1, "just checking"); + + OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); +diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp +index 596d5ad..767588c 100644 +--- a/src/share/vm/oops/klass.cpp ++++ b/src/share/vm/oops/klass.cpp +@@ -55,6 +55,26 @@ bool Klass::is_subclass_of(klassOop k) const { + return false; + } + ++void Klass::update_supers_to_newest_version() { ++ ++ if (super() != NULL) set_super(super()->klass_part()->newest_version()); ++ ++ for (uint i=0; i<primary_super_limit(); i++) { ++ klassOop cur = _primary_supers[i]; ++ if (cur != NULL) { ++ _primary_supers[i] = cur->klass_part()->newest_version(); ++ } ++ } ++ ++ // Scan the array-of-objects ++ int cnt = secondary_supers()->length(); ++ for (int i = 0; i < cnt; i++) { ++ klassOop cur = (klassOop)secondary_supers()->obj_at(i); ++ if (cur != NULL) { ++ secondary_supers()->obj_at_put(i, cur->klass_part()->newest_version()); ++ } ++ } ++} + bool Klass::search_secondary_supers(klassOop k) const { + // Put some extra logic here out-of-line, before the search proper. + // This cuts down the size of the inline method. +@@ -161,6 +181,16 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size, + kl->set_alloc_size(0); + TRACE_INIT_ID(kl); + ++ kl->set_redefinition_flags(Klass::NoRedefinition); ++ kl->set_redefining(false); ++ kl->set_new_version(NULL); ++ kl->set_old_version(NULL); ++ kl->set_redefinition_index(-1); ++ kl->set_revision_number(-1); ++ kl->set_field_redefinition_policy(DynamicCheck); ++ kl->set_static_field_redefinition_policy(AccessDeletedMembers); ++ kl->set_method_redefinition_policy(AccessDeletedMembers); ++ + kl->set_prototype_header(markOopDesc::prototype()); + kl->set_biased_lock_revocation_count(0); + kl->set_last_biased_lock_bulk_revocation_time(0); +@@ -232,7 +262,7 @@ void Klass::initialize_supers(klassOop k, TRAPS) { + set_super(NULL); + oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop()); + assert(super_depth() == 0, "Object must already be initialized properly"); +- } else if (k != super() || k == SystemDictionary::Object_klass()) { ++ } else if (k != super() || k->klass_part()->super() == NULL) { + assert(super() == NULL || super() == SystemDictionary::Object_klass(), + "initialize this only once to a non-trivial value"); + set_super(k); +diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp +index bcbd4e7..d086b5d 100644 +--- a/src/share/vm/oops/klass.hpp ++++ b/src/share/vm/oops/klass.hpp +@@ -170,6 +170,7 @@ class Klass_vtbl { + void* operator new(size_t ignored, KlassHandle& klass, int size, TRAPS); + }; + ++template<class L, class R> class Pair; + + class Klass : public Klass_vtbl { + friend class VMStructs; +@@ -222,6 +223,39 @@ class Klass : public Klass_vtbl { + oop* oop_block_beg() const { return adr_secondary_super_cache(); } + oop* oop_block_end() const { return adr_next_sibling() + 1; } + ++ // (tw) Different class redefinition flags of code evolution. ++ enum RedefinitionFlags { ++ ++ // This class is not redefined at all! ++ NoRedefinition, ++ ++ // There are changes to the class meta data. ++ ModifyClass = 1, ++ ++ // The size of the class meta data changes. ++ ModifyClassSize = ModifyClass << 1, ++ ++ // There are change to the instance format. ++ ModifyInstances = ModifyClassSize << 1, ++ ++ // The size of instances changes. ++ ModifyInstanceSize = ModifyInstances << 1, ++ ++ // A super type of this class is removed. ++ RemoveSuperType = ModifyInstanceSize << 1, ++ ++ // This class (or one of its super classes) has an instance transformer method. ++ HasInstanceTransformer = RemoveSuperType << 1, ++ }; ++ ++ // (tw) Different policies dealing with deleted fields / methods in old code. ++ enum RedefinitionPolicy { ++ StaticCheck, ++ DynamicCheck, ++ AccessDeletedMembers, ++ AccessOldMembers ++ }; ++ + protected: + // + // The oop block. All oop fields must be declared here and only oop fields +@@ -241,6 +275,10 @@ class Klass : public Klass_vtbl { + oop _java_mirror; + // Superclass + klassOop _super; ++ // Old class ++ klassOop _old_version; ++ // New class ++ klassOop _new_version; + // First subclass (NULL if none); _subklass->next_sibling() is next one + klassOop _subklass; + // Sibling link (or NULL); links all subklasses of a klass +@@ -253,6 +291,19 @@ class Klass : public Klass_vtbl { + jint _modifier_flags; // Processed access flags, for use by Class.getModifiers. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ // (tw) Non-oop fields for enhanced class redefinition ++ jint _revision_number; // The revision number for redefined classes ++ jint _redefinition_index; // Index of this class when performing the redefinition ++ bool _subtype_changed; ++ int _redefinition_flags; // Level of class redefinition ++ bool _is_copying_backwards; // Does the class need to copy fields backwards? => possibly overwrite itself? ++ int * _update_information; // Update information ++ Pair<int, klassOop> * _type_check_information; // Offsets of object fields that need a type check ++ char _method_redefinition_policy; ++ char _field_redefinition_policy; ++ char _static_field_redefinition_policy; ++ bool _is_redefining; ++ + #ifndef PRODUCT + int _verify_count; // to avoid redundant verifies + #endif +@@ -301,6 +352,99 @@ class Klass : public Klass_vtbl { + klassOop secondary_super_cache() const { return _secondary_super_cache; } + void set_secondary_super_cache(klassOop k) { oop_store_without_check((oop*) &_secondary_super_cache, (oop) k); } + ++ // BEGIN class redefinition utilities ++ ++ // double links between new and old version of a class ++ klassOop old_version() const { return _old_version; } ++ void set_old_version(klassOop klass) { assert(_old_version == NULL || klass == NULL, "Can only be set once!"); _old_version = klass; } ++ klassOop new_version() const { return _new_version; } ++ void set_new_version(klassOop klass) { assert(_new_version == NULL || klass == NULL, "Can only be set once!"); _new_version = klass; } ++ ++ // A subtype of this class is no longer a subtype ++ bool has_subtype_changed() const { return _subtype_changed; } ++ void set_subtype_changed(bool b) { assert(is_newest_version() || new_version()->klass_part()->is_newest_version(), "must be newest or second newest version"); ++ _subtype_changed = b; } ++ // state of being redefined ++ int redefinition_index() const { return _redefinition_index; } ++ void set_redefinition_index(int index) { _redefinition_index = index; } ++ void set_redefining(bool b) { _is_redefining = b; } ++ bool is_redefining() const { return _is_redefining; } ++ int redefinition_flags() const { return _redefinition_flags; } ++ bool check_redefinition_flag(int flags) const { return (_redefinition_flags & flags) != 0; } ++ void set_redefinition_flags(int flags) { _redefinition_flags = flags; } ++ bool is_copying_backwards() const { return _is_copying_backwards; } ++ void set_copying_backwards(bool b) { _is_copying_backwards = b; } ++ ++ // update information ++ int *update_information() const { return _update_information; } ++ void set_update_information(int *info) { _update_information = info; } ++ Pair<int, klassOop> *type_check_information() const { return _type_check_information; } ++ void set_type_check_information(Pair<int, klassOop> *info) { _type_check_information = info; } ++ ++ bool is_same_or_older_version(klassOop klass) const { ++ if (Klass::cast(klass) == this) { return true; } ++ else if (_old_version == NULL) { return false; } ++ else { return _old_version->klass_part()->is_same_or_older_version(klass); } ++ } ++ ++ // Revision number for redefined classes, -1 for originally loaded classes ++ jint revision_number() const { ++ return _revision_number; ++ } ++ ++ bool was_redefined() const { ++ return _revision_number != -1; ++ } ++ ++ void set_revision_number(jint number) { ++ _revision_number = number; ++ } ++ ++ char method_redefinition_policy() { ++ return _method_redefinition_policy; ++ } ++ ++ void set_method_redefinition_policy(char v) { ++ _method_redefinition_policy = v; ++ } ++ ++ char field_redefinition_policy() { ++ return _field_redefinition_policy; ++ } ++ ++ void set_field_redefinition_policy(char v) { ++ _field_redefinition_policy = v; ++ } ++ ++ char static_field_redefinition_policy() { ++ return _static_field_redefinition_policy; ++ } ++ ++ void set_static_field_redefinition_policy(char v) { ++ _static_field_redefinition_policy = v; ++ } ++ ++ klassOop oldest_version() const { ++ if (_old_version == NULL) { return this->as_klassOop(); } ++ else { return _old_version->klass_part()->oldest_version(); }; ++ } ++ ++ klassOop newest_version() const { ++ if (_new_version == NULL) { return this->as_klassOop(); } ++ else { return _new_version->klass_part()->newest_version(); }; ++ } ++ ++ klassOop active_version() const { ++ if (_new_version == NULL || _new_version->klass_part()->is_redefining()) { return this->as_klassOop(); assert(!this->is_redefining(), "just checking"); } ++ else { return _new_version->klass_part()->active_version(); }; ++ } ++ ++ bool is_newest_version() const { ++ return _new_version == NULL; ++ } ++ ++ // END class redefinition utilities ++ + objArrayOop secondary_supers() const { return _secondary_supers; } + void set_secondary_supers(objArrayOop k) { oop_store_without_check((oop*) &_secondary_supers, (oop) k); } + +@@ -361,6 +505,8 @@ class Klass : public Klass_vtbl { + void set_next_sibling(klassOop s); + + oop* adr_super() const { return (oop*)&_super; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } + oop* adr_primary_supers() const { return (oop*)&_primary_supers[0]; } + oop* adr_secondary_super_cache() const { return (oop*)&_secondary_super_cache; } + oop* adr_secondary_supers()const { return (oop*)&_secondary_supers; } +@@ -490,6 +636,7 @@ class Klass : public Klass_vtbl { + return search_secondary_supers(k); + } + } ++ void update_supers_to_newest_version(); + bool search_secondary_supers(klassOop k) const; + + // Find LCA in class hierarchy +@@ -816,6 +963,8 @@ class Klass : public Klass_vtbl { + + + inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); } ++inline klassOop klassOopDesc::old_version() const { return klass_part()->old_version(); } ++inline klassOop klassOopDesc::new_version() const { return klass_part()->new_version(); } + + + #endif // SHARE_VM_OOPS_KLASS_HPP +diff --git a/src/share/vm/oops/klassKlass.cpp b/src/share/vm/oops/klassKlass.cpp +index 06809d5..9c08f32 100644 +--- a/src/share/vm/oops/klassKlass.cpp ++++ b/src/share/vm/oops/klassKlass.cpp +@@ -68,6 +68,8 @@ void klassKlass::oop_follow_contents(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + MarkSweep::mark_and_push(k->adr_super()); ++ MarkSweep::mark_and_push(k->adr_old_version()); ++ MarkSweep::mark_and_push(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::mark_and_push(k->adr_primary_supers()+i); + MarkSweep::mark_and_push(k->adr_secondary_super_cache()); +@@ -87,6 +89,8 @@ void klassKlass::oop_follow_contents(ParCompactionManager* cm, + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + PSParallelCompact::mark_and_push(cm, k->adr_super()); ++ PSParallelCompact::mark_and_push(cm, k->adr_old_version()); ++ PSParallelCompact::mark_and_push(cm, k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + PSParallelCompact::mark_and_push(cm, k->adr_primary_supers()+i); + PSParallelCompact::mark_and_push(cm, k->adr_secondary_super_cache()); +@@ -106,6 +110,8 @@ int klassKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + int size = oop_size(obj); + Klass* k = Klass::cast(klassOop(obj)); + blk->do_oop(k->adr_super()); ++ blk->do_oop(k->adr_old_version()); ++ blk->do_oop(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + blk->do_oop(k->adr_primary_supers()+i); + blk->do_oop(k->adr_secondary_super_cache()); +@@ -134,6 +140,10 @@ int klassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = k->adr_super(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + for (juint i = 0; i < Klass::primary_super_limit(); i++) { + adr = k->adr_primary_supers()+i; + if (mr.contains(adr)) blk->do_oop(adr); +@@ -147,6 +157,8 @@ int klassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + // The following are "weak links" in the perm gen and are + // treated specially in a later phase of a perm gen collection. + assert(oop(k)->is_perm(), "should be in perm"); ++ assert(oop(k->adr_old_version())->is_perm(), "should be in perm"); ++ assert(oop(k->adr_new_version())->is_perm(), "should be in perm"); + assert(oop(k->adr_subklass())->is_perm(), "should be in perm"); + assert(oop(k->adr_next_sibling())->is_perm(), "should be in perm"); + if (blk->should_remember_klasses() +@@ -167,6 +179,8 @@ int klassKlass::oop_adjust_pointers(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + + MarkSweep::adjust_pointer(k->adr_super()); ++ MarkSweep::adjust_pointer(k->adr_new_version()); ++ MarkSweep::adjust_pointer(k->adr_old_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::adjust_pointer(k->adr_primary_supers()+i); + MarkSweep::adjust_pointer(k->adr_secondary_super_cache()); +diff --git a/src/share/vm/oops/klassOop.hpp b/src/share/vm/oops/klassOop.hpp +index f212fc5..9731a9c 100644 +--- a/src/share/vm/oops/klassOop.hpp ++++ b/src/share/vm/oops/klassOop.hpp +@@ -41,8 +41,10 @@ class klassOopDesc : public oopDesc { + // returns the Klass part containing dispatching behavior + Klass* klass_part() const { return (Klass*)((address)this + sizeof(klassOopDesc)); } + +- // Convenience wrapper ++ // Convenience wrappers + inline oop java_mirror() const; ++ inline klassOop old_version() const; ++ inline klassOop new_version() const; + + private: + // These have no implementation since klassOop should never be accessed in this fashion +diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp +index 94e2e04..09d3088 100644 +--- a/src/share/vm/oops/klassVtable.cpp ++++ b/src/share/vm/oops/klassVtable.cpp +@@ -97,7 +97,8 @@ void klassVtable::compute_vtable_size_and_num_mirandas(int &vtable_length, + vtable_length = Universe::base_vtable_size(); + } + +- if (super == NULL && !Universe::is_bootstrapping() && ++ // (tw) TODO: Check if we can relax the condition on a fixed base vtable size ++ /*if (super == NULL && !Universe::is_bootstrapping() && + vtable_length != Universe::base_vtable_size()) { + // Someone is attempting to redefine java.lang.Object incorrectly. The + // only way this should happen is from +@@ -107,9 +108,9 @@ void klassVtable::compute_vtable_size_and_num_mirandas(int &vtable_length, + vtable_length = Universe::base_vtable_size(); + } + assert(super != NULL || vtable_length == Universe::base_vtable_size(), +- "bad vtable size for class Object"); ++ "bad vtable size for class Object");*/ + assert(vtable_length % vtableEntry::size() == 0, "bad vtable length"); +- assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); ++ //assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); + } + + int klassVtable::index_of(methodOop m, int len) const { +@@ -657,20 +658,6 @@ bool klassVtable::check_no_old_or_obsolete_entries() { + return true; + } + +-void klassVtable::dump_vtable() { +- tty->print_cr("vtable dump --"); +- for (int i = 0; i < length(); i++) { +- methodOop m = unchecked_method_at(i); +- if (m != NULL) { +- tty->print(" (%5d) ", i); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- } +-} +- + // CDS/RedefineClasses support - clear vtables so they can be reinitialized + void klassVtable::clear_vtable() { + for (int i = 0; i < _length; i++) table()[i].clear(); +@@ -1241,6 +1228,7 @@ void klassVtable::verify(outputStream* st, bool forced) { + + void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { + vtableEntry* vte = &vt->table()[index]; ++ if (vte->method() == NULL || table()[index].method() == NULL) return; + if (vte->method()->name() != table()[index].method()->name() || + vte->method()->signature() != table()[index].method()->signature()) { + fatal("mismatched name/signature of vtable entries"); +@@ -1260,6 +1248,8 @@ void klassVtable::print() { + + void vtableEntry::verify(klassVtable* vt, outputStream* st) { + NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); ++ // (tw) TODO: Check: Does not hold? ++ if (method() != NULL) { + assert(method() != NULL, "must have set method"); + method()->verify(); + // we sub_type, because it could be a miranda method +@@ -1267,7 +1257,13 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + #ifndef PRODUCT + print(); + #endif +- fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); ++ klassOop first_klass = vt->klass()(); ++ klassOop second_klass = method()->method_holder(); ++ // (tw) the following fatal does not work for old versions of classes ++ if (first_klass->klass_part()->is_newest_version()) { ++ //fatal1("vtableEntry %#lx: method is from subclass", this); ++ } ++ } + } + } + +@@ -1275,7 +1271,7 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + + void vtableEntry::print() { + ResourceMark rm; +- tty->print("vtableEntry %s: ", method()->name()->as_C_string()); ++ tty->print("vtableEntry %s: ", (method() == NULL) ? "null" : method()->name()->as_C_string()); + if (Verbose) { + tty->print("m %#lx ", (address)method()); + } +@@ -1342,6 +1338,33 @@ void klassVtable::print_statistics() { + tty->print_cr("%6d bytes total", total); + } + ++bool klassVtable::check_no_old_entries() { ++ // Check that there really is no entry ++ for (int i = 0; i < length(); i++) { ++ methodOop m = unchecked_method_at(i); ++ if (m != NULL) { ++ if (m->is_old() || !m->method_holder()->klass_part()->is_newest_version()) { ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ ++void klassVtable::dump_vtable() { ++ tty->print_cr("vtable dump --"); ++ for (int i = 0; i < length(); i++) { ++ methodOop m = unchecked_method_at(i); ++ if (m != NULL) { ++ tty->print(" (%5d) ", i); ++ m->access_flags().print_on(tty); ++ tty->print(" -- "); ++ m->print_name(tty); ++ tty->cr(); ++ } ++ } ++} ++ + int klassItable::_total_classes; // Total no. of classes with itables + long klassItable::_total_size; // Total no. of bytes used for itables + +diff --git a/src/share/vm/oops/klassVtable.hpp b/src/share/vm/oops/klassVtable.hpp +index 405b0c7..0c8d2f7 100644 +--- a/src/share/vm/oops/klassVtable.hpp ++++ b/src/share/vm/oops/klassVtable.hpp +@@ -100,6 +100,7 @@ class klassVtable : public ResourceObj { + int methods_length, bool * trace_name_printed); + bool check_no_old_or_obsolete_entries(); + void dump_vtable(); ++ bool check_no_old_entries(); + + // Garbage collection + void oop_follow_contents(); +diff --git a/src/share/vm/oops/methodKlass.cpp b/src/share/vm/oops/methodKlass.cpp +index 75d0b09..f1b7d2f 100644 +--- a/src/share/vm/oops/methodKlass.cpp ++++ b/src/share/vm/oops/methodKlass.cpp +@@ -93,6 +93,10 @@ methodOop methodKlass::allocate(constMethodHandle xconst, + m->set_adapter_entry(NULL); + m->clear_code(); // from_c/from_i get set to c2i/i2i + ++ m->set_forward_method(NULL); ++ m->set_new_version(NULL); ++ m->set_old_version(NULL); ++ + if (access_flags.is_native()) { + m->clear_native_function(); + m->set_signature_handler(NULL); +@@ -122,6 +126,9 @@ void methodKlass::oop_follow_contents(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::mark_and_push(m->adr_constMethod()); ++ MarkSweep::mark_and_push(m->adr_forward_method()); ++ MarkSweep::mark_and_push(m->adr_new_version()); ++ MarkSweep::mark_and_push(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::mark_and_push(m->adr_method_data()); + } +@@ -135,6 +142,9 @@ void methodKlass::oop_follow_contents(ParCompactionManager* cm, + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + PSParallelCompact::mark_and_push(cm, m->adr_constMethod()); ++ PSParallelCompact::mark_and_push(cm, m->adr_forward_method()); ++ PSParallelCompact::mark_and_push(cm, m->adr_new_version()); ++ PSParallelCompact::mark_and_push(cm, m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::mark_and_push(cm, m->adr_method_data()); +@@ -152,6 +162,9 @@ int methodKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves + blk->do_oop(m->adr_constMethod()); ++ blk->do_oop(m->adr_forward_method()); ++ blk->do_oop(m->adr_new_version()); ++ blk->do_oop(m->adr_old_version()); + if (m->method_data() != NULL) { + blk->do_oop(m->adr_method_data()); + } +@@ -170,6 +183,12 @@ int methodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = m->adr_constMethod(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_forward_method(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + if (m->method_data() != NULL) { + adr = m->adr_method_data(); + if (mr.contains(adr)) blk->do_oop(adr); +@@ -187,6 +206,9 @@ int methodKlass::oop_adjust_pointers(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::adjust_pointer(m->adr_constMethod()); ++ MarkSweep::adjust_pointer(m->adr_forward_method()); ++ MarkSweep::adjust_pointer(m->adr_new_version()); ++ MarkSweep::adjust_pointer(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::adjust_pointer(m->adr_method_data()); + } +@@ -202,6 +224,9 @@ int methodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { + assert(obj->is_method(), "should be method"); + methodOop m = methodOop(obj); + PSParallelCompact::adjust_pointer(m->adr_constMethod()); ++ PSParallelCompact::adjust_pointer(m->adr_forward_method()); ++ PSParallelCompact::adjust_pointer(m->adr_new_version()); ++ PSParallelCompact::adjust_pointer(m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::adjust_pointer(m->adr_method_data()); +@@ -222,7 +247,18 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) { + methodOop m = methodOop(obj); + // get the effect of PrintOopAddress, always, for methods: + st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m); +- st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr(); ++ st->print (" - method holder: "); m->method_holder()->print_value_on(st); ++ ++ if (m->method_holder()->klass_part()->new_version() != NULL) { ++ st->print(" (old)"); ++ } ++ st->cr(); ++ ++ st->print_cr(" - is obsolete: %d", (int)(m->is_obsolete())); ++ st->print_cr(" - is old: %d", (int)(m->is_old())); ++ st->print_cr(" - new version: "INTPTR_FORMAT" ", (address)(m->new_version())); ++ st->print_cr(" - old version: "INTPTR_FORMAT" ", (address)(m->old_version())); ++ st->print_cr(" - holder revision: %d", m->method_holder()->klass_part()->revision_number()); + st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants()); + m->constants()->print_value_on(st); st->cr(); + st->print (" - access: 0x%x ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr(); +diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp +index 4f59d3a..5cdf147 100644 +--- a/src/share/vm/oops/methodOop.cpp ++++ b/src/share/vm/oops/methodOop.cpp +@@ -328,6 +328,70 @@ void methodOopDesc::cleanup_inline_caches() { + } + + ++bool methodOopDesc::is_in_code_section(int bci) { ++ // There is no table => every bci is in the code section table. ++ if (!constMethod()->has_code_section_table()) return true; ++ ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++int methodOopDesc::calculate_forward_bci(int bci, methodOop new_method) { ++ int original_bci = -1; ++ if (constMethod()->has_code_section_table()) { ++ assert(is_in_code_section(bci), "can only forward in section"); ++ // First calculate back to original bci. ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ original_bci = bci - new_index + original_index; ++ break; ++ } ++ } ++ assert (original_bci != -1, "must have been in code section"); ++ } else { ++ // No code sections specified => we are in an original method. ++ original_bci = bci; ++ } ++ ++ // We know the original bci => match to new method. ++ int new_bci = -1; ++ if (new_method->constMethod()->has_code_section_table()) { ++ // Map to new bci. ++ constMethodOop m = new_method->constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (original_bci >= original_index && original_bci < original_index + length) { ++ new_bci = original_bci - original_index + new_index; ++ break; ++ } ++ } ++ assert (new_bci != -1, "must have found new code section"); ++ ++ } else { ++ // We are in an original method. ++ new_bci = original_bci; ++ } ++ ++ return new_bci; ++} ++ ++ + int methodOopDesc::extra_stack_words() { + // not an inline function, to avoid a header dependency on Interpreter + return extra_stack_entries() * Interpreter::stackElementSize; +@@ -1061,6 +1125,9 @@ methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code + + // Reset correct method/const method, method size, and parameter info + newm->set_constMethod(newcm); ++ newm->set_forward_method(newm->forward_method()); ++ newm->set_new_version(newm->new_version()); ++ newm->set_old_version(newm->old_version()); + newm->constMethod()->set_code_size(new_code_length); + newm->constMethod()->set_constMethod_size(new_const_method_size); + newm->set_method_size(new_method_size); +diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp +index 486e106..11e52bb 100644 +--- a/src/share/vm/oops/methodOop.hpp ++++ b/src/share/vm/oops/methodOop.hpp +@@ -114,6 +114,11 @@ class methodOopDesc : public oopDesc { + AccessFlags _access_flags; // Access flags + int _vtable_index; // vtable index of this method (see VtableIndexFlag) + // note: can have vtables with >2**16 elements (because of inheritance) ++ // (tw) Newer version of method available? ++ methodOop _forward_method; ++ methodOop _new_version; ++ methodOop _old_version; ++ + #ifdef CC_INTERP + int _result_index; // C++ interpreter needs for converting results to/from stack + #endif +@@ -175,6 +180,32 @@ class methodOopDesc : public oopDesc { + int name_index() const { return constMethod()->name_index(); } + void set_name_index(int index) { constMethod()->set_name_index(index); } + ++ methodOop forward_method() const {return _forward_method; } ++ void set_forward_method(methodOop m) { _forward_method = m; } ++ bool has_forward_method() const { return forward_method() != NULL; } ++ methodOop new_version() const {return _new_version; } ++ void set_new_version(methodOop m) { _new_version = m; } ++ methodOop newest_version() { if(_new_version == NULL) return this; else return new_version()->newest_version(); } ++ ++ methodOop old_version() const {return _old_version; }; ++ void set_old_version(methodOop m) { ++ if (m == NULL) { ++ _old_version = NULL; ++ return; ++ } ++ ++ assert(_old_version == NULL, "may only be set once"); ++ assert(this->code_size() == m->code_size(), "must have same code length"); ++ _old_version = m; ++ } ++ ++ methodOop oldest_version() const { ++ if(_old_version == NULL) return (methodOop)this; ++ else { ++ return old_version()->oldest_version(); ++ } ++ } ++ + // signature + Symbol* signature() const { return constants()->symbol_at(signature_index()); } + int signature_index() const { return constMethod()->signature_index(); } +@@ -670,6 +701,10 @@ class methodOopDesc : public oopDesc { + // Inline cache support + void cleanup_inline_caches(); + ++ // (tw) Method forwarding support. ++ bool is_in_code_section(int bci); ++ int calculate_forward_bci(int bci, methodOop new_method); ++ + // Find if klass for method is loaded + bool is_klass_loaded_by_klass_index(int klass_index) const; + bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; +@@ -734,6 +769,9 @@ class methodOopDesc : public oopDesc { + + // Garbage collection support + oop* adr_constMethod() const { return (oop*)&_constMethod; } ++ oop* adr_forward_method() const { return (oop*)&_forward_method; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } + oop* adr_method_data() const { return (oop*)&_method_data; } + }; + +diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp +index 5982c88..4873fca 100644 +--- a/src/share/vm/oops/oop.hpp ++++ b/src/share/vm/oops/oop.hpp +@@ -95,6 +95,7 @@ class oopDesc { + narrowOop* compressed_klass_addr(); + + void set_klass(klassOop k); ++ void set_klass_no_check(klassOop k); + + // For klass field compression + int klass_gap() const; +@@ -135,6 +136,7 @@ class oopDesc { + bool is_array() const; + bool is_objArray() const; + bool is_klass() const; ++ bool is_instanceKlass() const; + bool is_thread() const; + bool is_method() const; + bool is_constMethod() const; +diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp +index f4eb2f7..0acb346 100644 +--- a/src/share/vm/oops/oop.inline.hpp ++++ b/src/share/vm/oops/oop.inline.hpp +@@ -123,6 +123,14 @@ inline void oopDesc::set_klass(klassOop k) { + } + } + ++inline void oopDesc::set_klass_no_check(klassOop k) { ++ if (UseCompressedOops) { ++ oop_store_without_check(compressed_klass_addr(), (oop)k); ++ } else { ++ oop_store_without_check(klass_addr(), (oop) k); ++ } ++} ++ + inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); + } +@@ -156,6 +164,7 @@ inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_ + inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } + inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } + inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } ++inline bool oopDesc::is_instanceKlass() const { return blueprint()->oop_is_instanceKlass(); } + inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } + inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } + inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } +diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp +index 2123991..6cbd78c 100644 +--- a/src/share/vm/prims/jni.cpp ++++ b/src/share/vm/prims/jni.cpp +@@ -406,7 +406,7 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR + } + } + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, +- Handle(), &st, true, ++ Handle(), &st, true, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp +index 7dcd968..d59052f 100644 +--- a/src/share/vm/prims/jvm.cpp ++++ b/src/share/vm/prims/jvm.cpp +@@ -872,7 +872,7 @@ static jclass jvm_define_class_common(JNIEnv *env, const char *name, + Handle protection_domain (THREAD, JNIHandles::resolve(pd)); + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, + protection_domain, &st, +- verify != 0, ++ verify != 0, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp +index 4ac6b82..30b8e84 100644 +--- a/src/share/vm/prims/jvmtiEnv.cpp ++++ b/src/share/vm/prims/jvmtiEnv.cpp +@@ -290,7 +290,10 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + class_definitions[index].klass = jcls; + } + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RetransformClasses */ + +@@ -299,9 +302,12 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + // class_definitions - pre-checked for NULL + jvmtiError + JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) { +-//TODO: add locking ++ + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RedefineClasses */ + +diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp +index ec8ede3..2bd5983 100644 +--- a/src/share/vm/prims/jvmtiExport.cpp ++++ b/src/share/vm/prims/jvmtiExport.cpp +@@ -2296,7 +2296,7 @@ JvmtiDynamicCodeEventCollector::JvmtiDynamicCodeEventCollector() : _code_blobs(N + // iterate over any code blob descriptors collected and post a + // DYNAMIC_CODE_GENERATED event to the profiler. + JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { +- assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events"); ++ assert(!JavaThread::current()->owns_locks_but_redefine_classes_lock(), "all locks must be released to post deferred events"); + // iterate over any code blob descriptors that we collected + if (_code_blobs != NULL) { + for (int i=0; i<_code_blobs->length(); i++) { +diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp +index d3fa140..f4f8b57 100644 +--- a/src/share/vm/prims/jvmtiImpl.cpp ++++ b/src/share/vm/prims/jvmtiImpl.cpp +@@ -286,6 +286,8 @@ address JvmtiBreakpoint::getBcp() { + void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { + ((methodOopDesc*)_method->*meth_act)(_bci); + ++ // DCEVM: TODO: Check how we can implement this differently here! ++ + // add/remove breakpoint to/from versions of the method that + // are EMCP. Directly or transitively obsolete methods are + // not saved in the PreviousVersionInfo. +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp +index eb52388..640e7da 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -30,498 +30,637 @@ + #include "interpreter/rewriter.hpp" + #include "memory/gcLocker.hpp" + #include "memory/universe.inline.hpp" +-#include "oops/fieldStreams.hpp" ++#include "memory/cardTableRS.hpp" + #include "oops/klassVtable.hpp" ++#include "oops/fieldStreams.hpp" + #include "prims/jvmtiImpl.hpp" + #include "prims/jvmtiRedefineClasses.hpp" ++#include "prims/jvmtiClassFileReconstituter.hpp" + #include "prims/methodComparator.hpp" + #include "runtime/deoptimization.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "compiler/compileBroker.hpp" + + + objArrayOop VM_RedefineClasses::_old_methods = NULL; + objArrayOop VM_RedefineClasses::_new_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_old_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_new_methods = NULL; +-methodOop* VM_RedefineClasses::_deleted_methods = NULL; +-methodOop* VM_RedefineClasses::_added_methods = NULL; ++int* VM_RedefineClasses::_matching_old_methods = NULL; ++int* VM_RedefineClasses::_matching_new_methods = NULL; ++int* VM_RedefineClasses::_deleted_methods = NULL; ++int* VM_RedefineClasses::_added_methods = NULL; + int VM_RedefineClasses::_matching_methods_length = 0; + int VM_RedefineClasses::_deleted_methods_length = 0; + int VM_RedefineClasses::_added_methods_length = 0; + klassOop VM_RedefineClasses::_the_class_oop = NULL; + ++// Holds the revision number of the current class redefinition ++int VM_RedefineClasses::_revision_number = -1; + +-VM_RedefineClasses::VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind) { ++VM_RedefineClasses::VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind) ++ : VM_GC_Operation(Universe::heap()->total_full_collections(), GCCause::_jvmti_force_gc) { ++ RC_TIMER_START(_timer_total); + _class_count = class_count; + _class_defs = class_defs; + _class_load_kind = class_load_kind; +- _res = JVMTI_ERROR_NONE; ++ _updated_oops = NULL; ++ _result = JVMTI_ERROR_NONE; + } + +-bool VM_RedefineClasses::doit_prologue() { +- if (_class_count == 0) { +- _res = JVMTI_ERROR_NONE; +- return false; ++VM_RedefineClasses::~VM_RedefineClasses() { ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ Threads::set_wait_at_instrumentation_entry(false); ++ ml.notify_all(); ++ } ++ ++ unlock_threads(); ++ RC_TIMER_STOP(_timer_total); ++ ++ if (TimeRedefineClasses) { ++ tty->print_cr("Timing Prologue: %d", _timer_prologue.milliseconds()); ++ tty->print_cr("Timing Class Loading: %d", _timer_class_loading.milliseconds()); ++ tty->print_cr("Timing Waiting for Lock: %d", _timer_wait_for_locks.milliseconds()); ++ tty->print_cr("Timing Class Linking: %d", _timer_class_linking.milliseconds()); ++ tty->print_cr("Timing Check Type: %d", _timer_check_type.milliseconds()); ++ tty->print_cr("Timing Prepare Redefinition: %d", _timer_prepare_redefinition.milliseconds()); ++ tty->print_cr("Timing Redefinition GC: %d", _timer_redefinition.milliseconds()); ++ tty->print_cr("Timing Epilogue: %d", _timer_vm_op_epilogue.milliseconds()); ++ tty->print_cr("------------------------------------------------------------------"); ++ tty->print_cr("Total Time: %d", _timer_total.milliseconds()); + } +- if (_class_defs == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++} ++ ++// Searches for all affected classes and performs a sorting such that a supertype is always before a subtype. ++jvmtiError VM_RedefineClasses::find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses) { ++ ++ // Create array with all classes for which the redefine command was given ++ GrowableArray<instanceKlassHandle> klasses_to_redefine; ++ for (int i=0; i<_class_count; i++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); ++ instanceKlassHandle klass_handle(Thread::current(), java_lang_Class::as_klassOop(mirror)); ++ klasses_to_redefine.append(klass_handle); ++ assert(klass_handle->new_version() == NULL, "Must be new class"); + } +- for (int i = 0; i < _class_count; i++) { +- if (_class_defs[i].klass == NULL) { +- _res = JVMTI_ERROR_INVALID_CLASS; +- return false; +- } +- if (_class_defs[i].class_byte_count == 0) { +- _res = JVMTI_ERROR_INVALID_CLASS_FORMAT; +- return false; +- } +- if (_class_defs[i].class_bytes == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++ ++ // Find classes not directly redefined, but affected by a redefinition (because one of its supertypes is redefined) ++ GrowableArray<instanceKlassHandle> affected_classes; ++ FindAffectedKlassesClosure closure(&klasses_to_redefine, &affected_classes); ++ ++ // Trace affected classes ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Klasses affected: %d", ++ affected_classes.length())); ++ for (int i=0; i<affected_classes.length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ affected_classes.at(i)->name()->as_C_string())); + } + } + +- // Start timer after all the sanity checks; not quite accurate, but +- // better than adding a bunch of stop() calls. +- RC_TIMER_START(_timer_vm_op_prologue); ++ // Add the array of affected classes and the array of redefined classes to get a list of all classes that need a redefinition ++ all_affected_klasses->appendAll(&klasses_to_redefine); ++ all_affected_klasses->appendAll(&affected_classes); + +- // We first load new class versions in the prologue, because somewhere down the +- // call chain it is required that the current thread is a Java thread. +- _res = load_new_class_versions(Thread::current()); +- if (_res != JVMTI_ERROR_NONE) { +- // Free os::malloc allocated memory in load_new_class_version. +- os::free(_scratch_classes); +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return false; ++ // Sort the affected klasses such that a supertype is always on a smaller array index than its subtype. ++ jvmtiError result = do_topological_class_sorting(_class_defs, _class_count, &affected_classes, all_affected_klasses, Thread::current()); ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Redefine order: ")); ++ for (int i=0; i<all_affected_klasses->length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ all_affected_klasses->at(i)->name()->as_C_string())); ++ } + } + +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return true; ++ return result; + } + +-void VM_RedefineClasses::doit() { +- Thread *thread = Thread::current(); ++// Searches for the class bytes of the given class and returns them as a byte array. ++jvmtiError VM_RedefineClasses::find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed) { + +- if (UseSharedSpaces) { +- // Sharing is enabled so we remap the shared readonly space to +- // shared readwrite, private just in case we need to redefine +- // a shared class. We do the remap during the doit() phase of +- // the safepoint to be safer. +- if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { +- RC_TRACE_WITH_THREAD(0x00000001, thread, +- ("failed to remap shared readonly space to readwrite, private")); +- _res = JVMTI_ERROR_INTERNAL; +- return; ++ *not_changed = false; ++ ++ // Search for the index in the redefinition array that corresponds to the current class ++ int j; ++ for (j=0; j<_class_count; j++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[j].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ if (the_class_oop == the_class()) { ++ break; + } + } + +- for (int i = 0; i < _class_count; i++) { +- redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread); +- } +- // Disable any dependent concurrent compilations +- SystemDictionary::notice_modification(); ++ if (j == _class_count) { + +- // Set flag indicating that some invariants are no longer true. +- // See jvmtiExport.hpp for detailed explanation. +- JvmtiExport::set_has_redefined_a_class(); ++ *not_changed = true; + +-// check_class() is optionally called for product bits, but is +-// always called for non-product bits. +-#ifdef PRODUCT +- if (RC_TRACE_ENABLED(0x00004000)) { +-#endif +- RC_TRACE_WITH_THREAD(0x00004000, thread, ("calling check_class")); +- SystemDictionary::classes_do(check_class, thread); +-#ifdef PRODUCT +- } +-#endif +-} ++ // Redefine with same bytecodes. This is a class that is only indirectly affected by redefinition, ++ // so the user did not specify a different bytecode for that class. + +-void VM_RedefineClasses::doit_epilogue() { +- // Free os::malloc allocated memory. +- // The memory allocated in redefine will be free'ed in next VM operation. +- os::free(_scratch_classes); +- +- if (RC_TRACE_ENABLED(0x00000004)) { +- // Used to have separate timers for "doit" and "all", but the timer +- // overhead skewed the measurements. +- jlong doit_time = _timer_rsc_phase1.milliseconds() + +- _timer_rsc_phase2.milliseconds(); +- jlong all_time = _timer_vm_op_prologue.milliseconds() + doit_time; +- +- RC_TRACE(0x00000004, ("vm_op: all=" UINT64_FORMAT +- " prologue=" UINT64_FORMAT " doit=" UINT64_FORMAT, all_time, +- _timer_vm_op_prologue.milliseconds(), doit_time)); +- RC_TRACE(0x00000004, +- ("redefine_single_class: phase1=" UINT64_FORMAT " phase2=" UINT64_FORMAT, +- _timer_rsc_phase1.milliseconds(), _timer_rsc_phase2.milliseconds())); ++ if (the_class->get_cached_class_file_bytes() == NULL) { ++ // not cached, we need to reconstitute the class file from VM representation ++ constantPoolHandle constants(Thread::current(), the_class->constants()); ++ ObjectLocker ol(constants, Thread::current()); // lock constant pool while we query it ++ ++ JvmtiClassFileReconstituter reconstituter(the_class); ++ if (reconstituter.get_error() != JVMTI_ERROR_NONE) { ++ return reconstituter.get_error(); ++ } ++ ++ *class_byte_count = (jint)reconstituter.class_file_size(); ++ *class_bytes = (unsigned char*)reconstituter.class_file_bytes(); ++ ++ } else { ++ ++ // it is cached, get it from the cache ++ *class_byte_count = the_class->get_cached_class_file_len(); ++ *class_bytes = the_class->get_cached_class_file_bytes(); ++ } ++ ++ } else { ++ ++ // Redefine with bytecodes at index j ++ *class_bytes = _class_defs[j].class_bytes; ++ *class_byte_count = _class_defs[j].class_byte_count; + } ++ ++ return JVMTI_ERROR_NONE; + } + +-bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { +- // classes for primitives cannot be redefined +- if (java_lang_Class::is_primitive(klass_mirror)) { ++// Prologue of the VM operation, called on the Java thread in parallel to normal program execution ++bool VM_RedefineClasses::doit_prologue() { ++ ++ _revision_number++; ++ RC_TRACE(0x00000001, ("Redefinition with revision number %d started!", _revision_number)); ++ ++ assert(Thread::current()->is_Java_thread(), "must be Java thread"); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (!check_arguments()) { ++ RC_TIMER_STOP(_timer_prologue); + return false; + } +- klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); +- // classes for arrays cannot be redefined +- if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ ++ // We first load new class versions in the prologue, because somewhere down the ++ // call chain it is required that the current thread is a Java thread. ++ _new_classes = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(5, true); ++ _result = load_new_class_versions(Thread::current()); ++ ++ RC_TRACE(0x00000001, ("Loaded new class versions!")); ++ if (_result != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("error occured: %d!", _result)); ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TIMER_STOP(_timer_prologue); + return false; + } ++ ++ RC_TRACE(0x00000001, ("nearly finished")); ++ VM_GC_Operation::doit_prologue(); ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TRACE(0x00000001, ("doit_prologue finished!")); + return true; + } + +-// Append the current entry at scratch_i in scratch_cp to *merge_cp_p +-// where the end of *merge_cp_p is specified by *merge_cp_length_p. For +-// direct CP entries, there is just the current entry to append. For +-// indirect and double-indirect CP entries, there are zero or more +-// referenced CP entries along with the current entry to append. +-// Indirect and double-indirect CP entries are handled by recursive +-// calls to append_entry() as needed. The referenced CP entries are +-// always appended to *merge_cp_p before the referee CP entry. These +-// referenced CP entries may already exist in *merge_cp_p in which case +-// there is nothing extra to append and only the current entry is +-// appended. +-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, +- int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, +- TRAPS) { +- +- // append is different depending on entry tag type +- switch (scratch_cp->tag_at(scratch_i).value()) { +- +- // The old verifier is implemented outside the VM. It loads classes, +- // but does not resolve constant pool entries directly so we never +- // see Class entries here with the old verifier. Similarly the old +- // verifier does not like Class entries in the input constant pool. +- // The split-verifier is implemented in the VM so it can optionally +- // and directly resolve constant pool entries to load classes. The +- // split-verifier can accept either Class entries or UnresolvedClass +- // entries in the input constant pool. We revert the appended copy +- // back to UnresolvedClass so that either verifier will be happy +- // with the constant pool entry. +- case JVM_CONSTANT_Class: +- { +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- (*merge_cp_p)->unresolved_klass_at_put(*merge_cp_length_p, +- scratch_cp->klass_name_at(scratch_i)); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // these are direct CP entries so they can be directly appended, +- // but double and long take two constant pool entries +- case JVM_CONSTANT_Double: // fall through +- case JVM_CONSTANT_Long: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p) += 2; +- } break; +- +- // these are direct CP entries so they can be directly appended +- case JVM_CONSTANT_Float: // fall through +- case JVM_CONSTANT_Integer: // fall through +- case JVM_CONSTANT_Utf8: // fall through +- +- // This was an indirect CP entry, but it has been changed into +- // an interned string so this entry can be directly appended. +- case JVM_CONSTANT_String: // fall through +- +- // These were indirect CP entries, but they have been changed into +- // Symbol*s so these entries can be directly appended. +- case JVM_CONSTANT_UnresolvedClass: // fall through +- case JVM_CONSTANT_UnresolvedString: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); ++// Checks basic properties of the arguments of the redefinition command. ++bool VM_RedefineClasses::check_arguments() { + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ if (_class_count == 0) RC_ABORT(JVMTI_ERROR_NONE); ++ if (_class_defs == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); ++ for (int i = 0; i < _class_count; i++) { ++ if (_class_defs[i].klass == NULL) RC_ABORT(JVMTI_ERROR_INVALID_CLASS); ++ if (_class_defs[i].class_byte_count == 0) RC_ABORT(JVMTI_ERROR_INVALID_CLASS_FORMAT); ++ if (_class_defs[i].class_bytes == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); ++ } + +- // this is an indirect CP entry so it needs special handling +- case JVM_CONSTANT_NameAndType: +- { +- int name_ref_i = scratch_cp->name_ref_index_at(scratch_i); +- int new_name_ref_i = 0; +- bool match = (name_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_ref_i, *merge_cp_p, name_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match ++ return true; ++} + +- int found_i = scratch_cp->find_matching_entry(name_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_ref_i = found_i; +- map_index(scratch_cp, name_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_name_ref_i = *merge_cp_length_p - 1; ++jvmtiError VM_RedefineClasses::check_exception() const { ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000001, ("parse_stream exception: '%s'", ++ ex_name->as_C_string())); ++ if (TraceRedefineClasses >= 1) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->print_cr(""); ++ } ++ CLEAR_PENDING_EXCEPTION; ++ ++ if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { ++ return JVMTI_ERROR_UNSUPPORTED_VERSION; ++ } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { ++ return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { ++ // The message will be "XXX (wrong name: YYY)" ++ return JVMTI_ERROR_NAMES_DONT_MATCH; ++ } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { ++ return JVMTI_ERROR_OUT_OF_MEMORY; ++ } else { ++ // Just in case more exceptions can be thrown.. ++ return JVMTI_ERROR_FAILS_VERIFICATION; ++ } ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++// Loads all new class versions and stores the instanceKlass handles in an array. ++jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { ++ ++ ResourceMark rm(THREAD); ++ ++ RC_TRACE(0x00000001, ("===================================================================")); ++ RC_TRACE(0x00000001, ("load new class versions (%d)", ++ _class_count)); ++ ++ // Retrieve an array of all classes that need to be redefined ++ GrowableArray<instanceKlassHandle> all_affected_klasses; ++ jvmtiError err = find_sorted_affected_classes(&all_affected_klasses); ++ if (err != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding sorted affected classes: %d", ++ (int)err)); ++ return err; ++ } ++ ++ ++ JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); ++ ++ _max_redefinition_flags = Klass::NoRedefinition; ++ jvmtiError result = JVMTI_ERROR_NONE; ++ ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ RC_TRACE(0x00000002, ("Processing affected class %d of %d", ++ i+1, all_affected_klasses.length())); ++ ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ RC_TRACE(0x00000002, ("name=%s", ++ the_class->name()->as_C_string())); ++ ++ the_class->link_class(THREAD); ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ // Find new class bytes ++ const unsigned char* class_bytes; ++ jint class_byte_count; ++ jvmtiError error; ++ jboolean not_changed; ++ if ((error = find_class_bytes(the_class, &class_bytes, &class_byte_count, ¬_changed)) != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding class bytes: %d", ++ (int)error)); ++ result = error; ++ break; ++ } ++ assert(class_bytes != NULL && class_byte_count != 0, "Class bytes defined at this point!"); ++ ++ ++ // Set redefined class handle in JvmtiThreadState class. ++ // This redefined class is sent to agent event handler for class file ++ // load hook event. ++ state->set_class_being_redefined(&the_class, _class_load_kind); ++ ++ RC_TRACE(0x00000002, ("Before resolving from stream")); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_loading); ++ ++ ++ // Parse the stream. ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ Symbol* the_class_sym = the_class->name(); ++ ClassFileStream st((u1*) class_bytes, class_byte_count, (char *)"__VM_RedefineClasses__"); ++ instanceKlassHandle new_class(THREAD, SystemDictionary::resolve_from_stream(the_class_sym, ++ the_class_loader, ++ protection_domain, ++ &st, ++ true, ++ the_class, ++ THREAD)); ++ ++ not_changed = false; ++ ++ RC_TIMER_STOP(_timer_class_loading); ++ RC_TIMER_START(_timer_prologue); ++ ++ RC_TRACE(0x00000002, ("After resolving class from stream!")); ++ // Clear class_being_redefined just to be sure. ++ state->clear_class_being_redefined(); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++#ifdef ASSERT ++ ++ assert(new_class() != NULL, "Class could not be loaded!"); ++ assert(new_class() != the_class(), "must be different"); ++ assert(new_class->new_version() == NULL && new_class->old_version() != NULL, ""); ++ ++ ++ objArrayOop k_interfaces = new_class->local_interfaces(); ++ for (int j=0; j<k_interfaces->length(); j++) { ++ assert(((klassOop)k_interfaces->obj_at(j))->klass_part()->is_newest_version(), "just checking"); ++ } ++ ++ if (!THREAD->is_Compiler_thread()) { ++ ++ RC_TRACE(0x00000002, ("name=%s loader="INTPTR_FORMAT" protection_domain="INTPTR_FORMAT" ", ++ the_class->name()->as_C_string(), ++ (address)(the_class->class_loader()), ++ (address)(the_class->protection_domain()))); ++ // If we are on the compiler thread, we must not try to resolve a class. ++ klassOop systemLookup = SystemDictionary::resolve_or_null(the_class->name(), the_class->class_loader(), the_class->protection_domain(), THREAD); ++ ++ if (systemLookup != NULL) { ++ assert(systemLookup == new_class->old_version(), "Old class must be in system dictionary!"); ++ ++ ++ Klass *subklass = new_class()->klass_part()->subklass(); ++ while (subklass != NULL) { ++ assert(subklass->new_version() == NULL, "Most recent version of class!"); ++ subklass = subklass->next_sibling(); + } ++ } else { ++ // This can happen for reflection generated classes.. ? ++ CLEAR_PENDING_EXCEPTION; + } ++ } + +- int signature_ref_i = scratch_cp->signature_ref_index_at(scratch_i); +- int new_signature_ref_i = 0; +- match = (signature_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(signature_ref_i, *merge_cp_p, +- signature_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(signature_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != signature_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_signature_ref_i = found_i; +- map_index(scratch_cp, signature_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, signature_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_signature_ref_i = *merge_cp_length_p - 1; +- } ++#endif ++ ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (new_class->layout_helper() != the_class->layout_helper()) { ++ RC_TRACE(0x00000001, ("Instance size change for class %s: new=%d old=%d", ++ new_class->name()->as_C_string(), ++ new_class->layout_helper(), ++ the_class->layout_helper())); + } ++ } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_name_ref_i and new_signature_ref_i will both be 0. +- // In that case, all we are appending is the current entry. +- if (new_name_ref_i == 0) { +- new_name_ref_i = name_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d name_ref_index change: %d to %d", +- *merge_cp_length_p, name_ref_i, new_name_ref_i)); ++ // Set the new version of the class ++ new_class->set_revision_number(_revision_number); ++ new_class->set_redefinition_index(i); ++ the_class->set_new_version(new_class()); ++ _new_classes->append(new_class); ++ ++ assert(new_class->new_version() == NULL, ""); ++ ++ int redefinition_flags = Klass::NoRedefinition; ++ ++ if (not_changed) { ++ redefinition_flags = Klass::NoRedefinition; ++ } else if (AllowAdvancedClassRedefinition) { ++ redefinition_flags = calculate_redefinition_flags(new_class); ++ } else { ++ jvmtiError allowed = check_redefinition_allowed(new_class); ++ if (allowed != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error redefinition not allowed!")); ++ result = allowed; ++ break; + } +- if (new_signature_ref_i == 0) { +- new_signature_ref_i = signature_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d signature_ref_index change: %d to %d", +- *merge_cp_length_p, signature_ref_i, new_signature_ref_i)); ++ redefinition_flags = Klass::ModifyClass; ++ } ++ ++ if (new_class->super() != NULL) { ++ redefinition_flags = redefinition_flags | new_class->super()->klass_part()->redefinition_flags(); ++ } ++ ++ for (int j=0; j<new_class->local_interfaces()->length(); j++) { ++ redefinition_flags = redefinition_flags | ((klassOop)new_class->local_interfaces()->obj_at(j))->klass_part()->redefinition_flags(); ++ } ++ ++ new_class->set_redefinition_flags(redefinition_flags); ++ ++ _max_redefinition_flags = _max_redefinition_flags | redefinition_flags; ++ ++ if ((redefinition_flags & Klass::ModifyInstances) != 0) { ++ // TODO: Check if watch access flags of static fields are updated correctly. ++ calculate_instance_update_information(_new_classes->at(i)()); ++ } else { ++ assert(new_class->layout_helper() >> 1 == new_class->old_version()->klass_part()->layout_helper() >> 1, "must be equal"); ++ assert(new_class->fields()->length() == ((instanceKlass*)new_class->old_version()->klass_part())->fields()->length(), "must be equal"); ++ ++ fieldDescriptor fd_new; ++ fieldDescriptor fd_old; ++ for (JavaFieldStream fs(new_class); !fs.done(); fs.next()) { ++ fd_new.initialize(new_class(), fs.index()); ++ fd_old.initialize(new_class->old_version(), fs.index()); ++ transfer_special_access_flags(&fd_old, &fd_new); + } ++ } + +- (*merge_cp_p)->name_and_type_at_put(*merge_cp_length_p, +- new_name_ref_i, new_signature_ref_i); +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); ++ if (RC_TRACE_ENABLED(0x00000008)) { ++ if (new_class->super() != NULL) { ++ RC_TRACE(0x00000008, ("Super class is %s", ++ new_class->super()->klass_part()->name()->as_C_string())); + } +- (*merge_cp_length_p)++; +- } break; ++ } + +- // this is a double-indirect CP entry so it needs special handling +- case JVM_CONSTANT_Fieldref: // fall through +- case JVM_CONSTANT_InterfaceMethodref: // fall through +- case JVM_CONSTANT_Methodref: +- { +- int klass_ref_i = scratch_cp->uncached_klass_ref_index_at(scratch_i); +- int new_klass_ref_i = 0; +- bool match = (klass_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(klass_ref_i, *merge_cp_p, klass_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match ++#ifdef ASSERT ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->new_version() == NULL, "Super klass must be newest version!"); + +- int found_i = scratch_cp->find_matching_entry(klass_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != klass_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_klass_ref_i = found_i; +- map_index(scratch_cp, klass_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, klass_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. Without the optimization where we +- // use JVM_CONSTANT_UnresolvedClass, then up to two entries +- // could be appended. +- new_klass_ref_i = *merge_cp_length_p - 1; +- } +- } ++ the_class->vtable()->verify(tty); ++ new_class->vtable()->verify(tty); ++#endif + +- int name_and_type_ref_i = +- scratch_cp->uncached_name_and_type_ref_index_at(scratch_i); +- int new_name_and_type_ref_i = 0; +- match = (name_and_type_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_and_type_ref_i, *merge_cp_p, +- name_and_type_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_and_type_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_and_type_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_and_type_ref_i = found_i; +- map_index(scratch_cp, name_and_type_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_and_type_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can append more than +- // one entry so the post call query of *merge_cp_length_p +- // is required in order to get the right index for the +- // JVM_CONSTANT_NameAndType entry. +- new_name_and_type_ref_i = *merge_cp_length_p - 1; ++ RC_TRACE(0x00000002, ("Verification done!")); ++ ++ if (i == all_affected_klasses.length() - 1) { ++ ++ // This was the last class processed => check if additional classes have been loaded in the meantime ++ ++ RC_TIMER_STOP(_timer_prologue); ++ lock_threads(); ++ RC_TIMER_START(_timer_prologue); ++ ++ for (int j=0; j<all_affected_klasses.length(); j++) { ++ ++ klassOop initial_klass = all_affected_klasses.at(j)(); ++ Klass *initial_subklass = initial_klass->klass_part()->subklass(); ++ Klass *cur_klass = initial_subklass; ++ while(cur_klass != NULL) { ++ ++ if(cur_klass->oop_is_instance() && cur_klass->is_newest_version()) { ++ instanceKlassHandle handle(THREAD, cur_klass->as_klassOop()); ++ if (!all_affected_klasses.contains(handle)) { ++ ++ int k = i + 1; ++ for (; k<all_affected_klasses.length(); k++) { ++ if (all_affected_klasses.at(k)->is_subtype_of(cur_klass->as_klassOop())) { ++ break; ++ } ++ } ++ all_affected_klasses.insert_before(k, handle); ++ RC_TRACE(0x00000002, ("Adding newly loaded class to affected classes: %s", ++ cur_klass->name()->as_C_string())); ++ } ++ } ++ ++ cur_klass = cur_klass->next_sibling(); + } + } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_klass_ref_i and new_name_and_type_ref_i will both be +- // 0. In that case, all we are appending is the current entry. +- if (new_klass_ref_i == 0) { +- new_klass_ref_i = klass_ref_i; +- } +- if (new_name_and_type_ref_i == 0) { +- new_name_and_type_ref_i = name_and_type_ref_i; +- } ++ int new_count = all_affected_klasses.length() - 1 - i; ++ if (new_count != 0) { + +- const char *entry_name; +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Fieldref: +- entry_name = "Fieldref"; +- (*merge_cp_p)->field_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- entry_name = "IFMethodref"; +- (*merge_cp_p)->interface_method_at_put(*merge_cp_length_p, +- new_klass_ref_i, new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_Methodref: +- entry_name = "Methodref"; +- (*merge_cp_p)->method_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- default: +- guarantee(false, "bad switch"); +- break; ++ unlock_threads(); ++ RC_TRACE(0x00000001, ("Found new number of affected classes: %d", ++ new_count)); + } ++ } ++ } + +- if (klass_ref_i != new_klass_ref_i) { +- RC_TRACE(0x00080000, ("%s entry@%d class_index changed: %d to %d", +- entry_name, *merge_cp_length_p, klass_ref_i, new_klass_ref_i)); +- } +- if (name_and_type_ref_i != new_name_and_type_ref_i) { +- RC_TRACE(0x00080000, +- ("%s entry@%d name_and_type_index changed: %d to %d", +- entry_name, *merge_cp_length_p, name_and_type_ref_i, +- new_name_and_type_ref_i)); +- } ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_linking); ++ // Link and verify new classes _after_ all classes have been updated in the system dictionary! ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ instanceKlassHandle new_class(the_class->new_version()); + +- // At this stage, Class or UnresolvedClass could be here, but not +- // ClassIndex +- case JVM_CONSTANT_ClassIndex: // fall through ++ RC_TRACE(0x00000002, ("Linking class %d/%d %s", ++ i, ++ all_affected_klasses.length(), ++ the_class->name()->as_C_string())); ++ new_class->link_class(THREAD); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ } ++ RC_TIMER_STOP(_timer_class_linking); ++ RC_TIMER_START(_timer_prologue); + +- // Invalid is used as the tag for the second constant pool entry +- // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should +- // not be seen by itself. +- case JVM_CONSTANT_Invalid: // fall through ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- // At this stage, String or UnresolvedString could be here, but not +- // StringIndex +- case JVM_CONSTANT_StringIndex: // fall through ++ RC_TRACE(0x00000002, ("All classes loaded!")); + +- // At this stage JVM_CONSTANT_UnresolvedClassInError should not be +- // here +- case JVM_CONSTANT_UnresolvedClassInError: // fall through ++#ifdef ASSERT ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ assert(the_class->new_version() != NULL, "Must have been redefined"); ++ instanceKlassHandle new_version = instanceKlassHandle(THREAD, the_class->new_version()); ++ assert(new_version->new_version() == NULL, "Must be newest version"); + +- default: +- { +- // leave a breadcrumb +- jbyte bad_value = scratch_cp->tag_at(scratch_i).value(); +- ShouldNotReachHere(); +- } break; +- } // end switch tag value +-} // end append_entry() ++ if (!(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL)) { ++ new_version()->print(); ++ new_version->super()->print(); ++ } ++ assert(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL, "Super class must be newest version"); ++ } ++ ++ SystemDictionary::classes_do(check_class, THREAD); ++ ++#endif ++ ++ RC_TRACE(0x00000001, ("Finished verification!")); ++ return JVMTI_ERROR_NONE; ++} ++ ++void VM_RedefineClasses::lock_threads() { ++ ++ RC_TIMER_START(_timer_wait_for_locks); ++ ++ ++ JavaThread *javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->set_should_bailout(true); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ int cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->compilation_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ RC_TRACE(0x00000002, ("Locked %d compiler threads", cnt)); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ javaThread->redefine_classes_mutex()->lock(); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ ++ RC_TRACE(0x00000002, ("Locked %d threads", cnt)); ++ ++ RC_TIMER_STOP(_timer_wait_for_locks); ++} + ++void VM_RedefineClasses::unlock_threads() { + +-void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { +- typeArrayOop save; ++ int cnt = 0; ++ JavaThread *javaThread = Threads::first(); ++ Thread *thread = Thread::current(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ if (compilerThread->compilation_mutex()->owned_by_self()) { ++ compilerThread->compilation_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); ++ } + +- save = scratch_class->get_method_annotations_of(i); +- scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); +- scratch_class->set_method_annotations_of(j, save); ++ RC_TRACE(0x00000002, ("Unlocked %d compiler threads", cnt)); + +- save = scratch_class->get_method_parameter_annotations_of(i); +- scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); +- scratch_class->set_method_parameter_annotations_of(j, save); ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ if (javaThread->redefine_classes_mutex()->owned_by_self()) { ++ javaThread->redefine_classes_mutex()->unlock(); ++ } ++ } ++ javaThread = javaThread->next(); ++ } + +- save = scratch_class->get_method_default_annotations_of(i); +- scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); +- scratch_class->set_method_default_annotations_of(j, save); ++ RC_TRACE(0x00000002, ("Unlocked %d threads", cnt)); + } + ++jvmtiError VM_RedefineClasses::check_redefinition_allowed(instanceKlassHandle scratch_class) { ++ ++ ++ ++ // Compatibility mode => check for unsupported modification ++ ++ ++ assert(scratch_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(scratch_class->old_version()); + +-jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( +- instanceKlassHandle the_class, +- instanceKlassHandle scratch_class) { + int i; + + // Check superclasses, or rather their names, since superclasses themselves can be + // requested to replace. + // Check for NULL superclass first since this might be java.lang.Object + if (the_class->super() != scratch_class->super() && +- (the_class->super() == NULL || scratch_class->super() == NULL || +- Klass::cast(the_class->super())->name() != +- Klass::cast(scratch_class->super())->name())) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ (the_class->super() == NULL || scratch_class->super() == NULL || ++ Klass::cast(the_class->super())->name() != ++ Klass::cast(scratch_class->super())->name())) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + + // Check if the number, names and order of directly implemented interfaces are the same. +@@ -539,8 +678,8 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + } + for (i = 0; i < n_intfs; i++) { + if (Klass::cast((klassOop) k_interfaces->obj_at(i))->name() != +- Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + } + +@@ -689,12 +828,8 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(old_num); +- swap_all_method_annotations(old_num, new_num, scratch_class); + } + } +- RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni, +- k_old_method->name_and_sig_as_C_string(), oi)); + // advance to next pair of methods + ++oi; + ++ni; +@@ -703,11 +838,11 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + // method added, see if it is OK + new_flags = (jushort) k_new_method->access_flags().get_flags(); + if ((new_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // new methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; + } + { + u2 num = the_class->next_method_idnum(); +@@ -722,24 +857,19 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(num); +- swap_all_method_annotations(new_num, num, scratch_class); + } +- RC_TRACE(0x00008000, ("Method added: new: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni)); + ++ni; // advance to next new method + break; + case deleted: + // method deleted, see if it is OK + old_flags = (jushort) k_old_method->access_flags().get_flags(); + if ((old_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // deleted methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; + } +- RC_TRACE(0x00008000, ("Method deleted: old: %s [%d]", +- k_old_method->name_and_sig_as_C_string(), oi)); + ++oi; // advance to next old method + break; + default: +@@ -750,2200 +880,1783 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + return JVMTI_ERROR_NONE; + } + ++int VM_RedefineClasses::calculate_redefinition_flags(instanceKlassHandle new_class) { + +-// Find new constant pool index value for old constant pool index value +-// by seaching the index map. Returns zero (0) if there is no mapped +-// value for the old constant pool index. +-int VM_RedefineClasses::find_new_index(int old_index) { +- if (_index_map_count == 0) { +- // map is empty so nothing can be found +- return 0; +- } +- +- if (old_index < 1 || old_index >= _index_map_p->length()) { +- // The old_index is out of range so it is not mapped. This should +- // not happen in regular constant pool merging use, but it can +- // happen if a corrupt annotation is processed. +- return 0; +- } +- +- int value = _index_map_p->at(old_index); +- if (value == -1) { +- // the old_index is not mapped +- return 0; +- } +- +- return value; +-} // end find_new_index() +- ++ int result = Klass::NoRedefinition; + +-// Returns true if the current mismatch is due to a resolved/unresolved +-// class pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { + +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } + +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_Class && t2 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } ++ RC_TRACE(0x00000002, ("Comparing different class versions of class %s", ++ new_class->name()->as_C_string())); + +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } ++ assert(new_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(new_class->old_version()); + +- char *s1 = cp1->klass_name_at(index1)->as_C_string(); +- char *s2 = cp2->klass_name_at(index2)->as_C_string(); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case ++ // Check whether class is in the error init state. ++ if (the_class->is_in_error_state()) { ++ // TBD #5057930: special error code is needed in 1.6 ++ //result = Klass::union_redefinition_level(result, Klass::Invalid); + } + +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_class_mismatch() ++ int i; + ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check superclasses ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->is_newest_version(), ""); ++ if (the_class->super() != new_class->super()) { ++ // Super class changed ++ ++ klassOop cur_klass = the_class->super(); ++ while (cur_klass != NULL) { ++ if (!new_class->is_subclass_of(cur_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000002, ("Removed super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::RemoveSuperType | Klass::ModifyInstances | Klass::ModifyClass; ++ ++ if (!cur_klass->klass_part()->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ cur_klass->klass_part()->set_subtype_changed(true); ++ } ++ } + +-// Returns true if the current mismatch is due to a resolved/unresolved +-// string pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_string_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { ++ cur_klass = cur_klass->klass_part()->super(); ++ } + +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_String && t1 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case ++ cur_klass = new_class->super(); ++ while (cur_klass != NULL) { ++ if (!the_class->is_subclass_of(cur_klass->klass_part()->old_version())) { ++ RC_TRACE(0x00000002, ("Added super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::ModifyClass | Klass::ModifyInstances; ++ } ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check interfaces ++ ++ // Interfaces removed? ++ objArrayOop old_interfaces = the_class->transitive_interfaces(); ++ for (i = 0; i<old_interfaces->length(); i++) { ++ instanceKlassHandle old_interface((klassOop)old_interfaces->obj_at(i)); ++ if (!new_class->implements_interface_any_version(old_interface())) { ++ result = result | Klass::RemoveSuperType | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Removed interface %s", ++ old_interface->name()->as_C_string())); ++ ++ if (!old_interface->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of interface %s", ++ old_interface->name()->as_C_string())); ++ old_interface->set_subtype_changed(true); ++ } ++ } + } + +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_String && t2 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case ++ // Interfaces added? ++ objArrayOop new_interfaces = new_class->transitive_interfaces(); ++ for (i = 0; i<new_interfaces->length(); i++) { ++ if (!the_class->implements_interface_any_version((klassOop)new_interfaces->obj_at(i))) { ++ result = result | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Added interface %s", ++ ((klassOop)new_interfaces->obj_at(i))->klass_part()->name()->as_C_string())); ++ } + } + +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } + +- char *s1 = cp1->string_at_noresolve(index1); +- char *s2 = cp2->string_at_noresolve(index2); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_string_mismatch() +- +- +-jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { +- // For consistency allocate memory using os::malloc wrapper. +- _scratch_classes = (instanceKlassHandle *) +- os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal); +- if (_scratch_classes == NULL) { +- return JVMTI_ERROR_OUT_OF_MEMORY; ++ // Check whether class modifiers are the same. ++ jushort old_flags = (jushort) the_class->access_flags().get_flags(); ++ jushort new_flags = (jushort) new_class->access_flags().get_flags(); ++ if (old_flags != new_flags) { ++ // TODO (tw): Can this have any effects? + } + +- ResourceMark rm(THREAD); +- +- JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); +- // state can only be NULL if the current thread is exiting which +- // should not happen since we're trying to do a RedefineClasses +- guarantee(state != NULL, "exiting thread calling load_new_class_versions"); +- for (int i = 0; i < _class_count; i++) { +- oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); +- // classes for primitives cannot be redefined +- if (!is_modifiable_class(mirror)) { +- return JVMTI_ERROR_UNMODIFIABLE_CLASS; ++ // Check if the number, names, types and order of fields declared in these classes ++ // are the same. ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ // access ++ old_flags = old_fs.access_flags().as_short(); ++ new_flags = new_fs.access_flags().as_short(); ++ if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) { ++ // (tw) Can this have any effects? + } +- klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- Symbol* the_class_sym = the_class->name(); +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loading name=%s kind=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), _class_load_kind, +- os::available_memory() >> 10)); ++ // offset ++ if (old_fs.offset() != new_fs.offset()) { ++ result = result | Klass::ModifyInstances; ++ } ++ // name and signature ++ Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index()); ++ Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index()); ++ Symbol* name_sym2 = new_class->constants()->symbol_at(new_fs.name_index()); ++ Symbol* sig_sym2 = new_class->constants()->symbol_at(new_fs.signature_index()); ++ if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { ++ result = result | Klass::ModifyInstances; ++ } ++ } + +- ClassFileStream st((u1*) _class_defs[i].class_bytes, +- _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ if (!old_fs.done() || !new_fs.done()) { ++ result = result | Klass::ModifyInstances; ++ } + +- // Parse the stream. +- Handle the_class_loader(THREAD, the_class->class_loader()); +- Handle protection_domain(THREAD, the_class->protection_domain()); +- // Set redefined class handle in JvmtiThreadState class. +- // This redefined class is sent to agent event handler for class file +- // load hook event. +- state->set_class_being_redefined(&the_class, _class_load_kind); ++ // Do a parallel walk through the old and new methods. Detect ++ // cases where they match (exist in both), have been added in ++ // the new methods, or have been deleted (exist only in the ++ // old methods). The class file parser places methods in order ++ // by method name, but does not order overloaded methods by ++ // signature. In order to determine what fate befell the methods, ++ // this code places the overloaded new methods that have matching ++ // old methods in the same order as the old methods and places ++ // new overloaded methods at the end of overloaded methods of ++ // that name. The code for this order normalization is adapted ++ // from the algorithm used in instanceKlass::find_method(). ++ // Since we are swapping out of order entries as we find them, ++ // we only have to search forward through the overloaded methods. ++ // Methods which are added and have the same name as an existing ++ // method (but different signature) will be put at the end of ++ // the methods with that name, and the name mismatch code will ++ // handle them. ++ objArrayHandle k_old_methods(the_class->methods()); ++ objArrayHandle k_new_methods(new_class->methods()); ++ int n_old_methods = k_old_methods->length(); ++ int n_new_methods = k_new_methods->length(); + +- klassOop k = SystemDictionary::parse_stream(the_class_sym, +- the_class_loader, +- protection_domain, +- &st, +- THREAD); +- // Clear class_being_redefined just to be sure. +- state->clear_class_being_redefined(); ++ int ni = 0; ++ int oi = 0; ++ while (true) { ++ methodOop k_old_method; ++ methodOop k_new_method; ++ enum { matched, added, deleted, undetermined } method_was = undetermined; + +- // TODO: if this is retransform, and nothing changed we can skip it +- +- instanceKlassHandle scratch_class (THREAD, k); +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("parse_stream exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- +- if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { +- return JVMTI_ERROR_UNSUPPORTED_VERSION; +- } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { +- return JVMTI_ERROR_INVALID_CLASS_FORMAT; +- } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { +- return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; +- } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { +- // The message will be "XXX (wrong name: YYY)" +- return JVMTI_ERROR_NAMES_DONT_MATCH; +- } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { // Just in case more exceptions can be thrown.. +- return JVMTI_ERROR_FAILS_VERIFICATION; +- } +- } +- +- // Ensure class is linked before redefine +- if (!the_class->is_linked()) { +- the_class->link_class(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("link_class exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; ++ if (oi >= n_old_methods) { ++ if (ni >= n_new_methods) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ method_was = added; ++ } else if (ni >= n_new_methods) { ++ // Old method, at the end, is deleted ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ method_was = deleted; ++ } else { ++ // There are more methods in both the old and new lists ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ if (k_old_method->name() != k_new_method->name()) { ++ // Methods are sorted by method name, so a mismatch means added ++ // or deleted ++ if (k_old_method->name()->fast_compare(k_new_method->name()) > 0) { ++ method_was = added; + } else { +- return JVMTI_ERROR_INTERNAL; ++ method_was = deleted; ++ } ++ } else if (k_old_method->signature() == k_new_method->signature()) { ++ // Both the name and signature match ++ method_was = matched; ++ } else { ++ // The name matches, but the signature doesn't, which means we have to ++ // search forward through the new overloaded methods. ++ int nj; // outside the loop for post-loop check ++ for (nj = ni + 1; nj < n_new_methods; nj++) { ++ methodOop m = (methodOop)k_new_methods->obj_at(nj); ++ if (k_old_method->name() != m->name()) { ++ // reached another method name so no more overloaded methods ++ method_was = deleted; ++ break; ++ } ++ if (k_old_method->signature() == m->signature()) { ++ // found a match so swap the methods ++ k_new_methods->obj_at_put(ni, m); ++ k_new_methods->obj_at_put(nj, k_new_method); ++ k_new_method = m; ++ method_was = matched; ++ break; ++ } ++ } ++ ++ if (nj >= n_new_methods) { ++ // reached the end without a match; so method was deleted ++ method_was = deleted; + } + } + } + +- // Do the validity checks in compare_and_normalize_class_versions() +- // before verifying the byte codes. By doing these checks first, we +- // limit the number of functions that require redirection from +- // the_class to scratch_class. In particular, we don't have to +- // modify JNI GetSuperclass() and thus won't change its performance. +- jvmtiError res = compare_and_normalize_class_versions(the_class, +- scratch_class); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ switch (method_was) { ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ // (tw) Can this have any effects? Probably yes on vtables? ++ result = result | Klass::ModifyClass; + } +- +- // verify what the caller passed us + { +- // The bug 6214132 caused the verification to fail. +- // Information about the_class and scratch_class is temporarily +- // recorded into jvmtiThreadState. This data is used to redirect +- // the_class to scratch_class in the JVM_* functions called by the +- // verifier. Please, refer to jvmtiThreadState.hpp for the detailed +- // description. +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify( +- scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes exception: '%s'", ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller the bytecodes are bad +- return JVMTI_ERROR_FAILS_VERIFICATION; ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = new_class->method_with_idnum(old_num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(old_num); ++ RC_TRACE(0x00000002, ("swapping idnum of new and old method %d / %d!", ++ new_num, ++ old_num)); ++ // swap_all_method_annotations(old_num, new_num, new_class); + } + } ++ RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni, ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ { ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ result = result | Klass::ModifyClass; ++ } ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = new_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ //swap_all_method_annotations(new_num, num, new_class); ++ } ++ RC_TRACE(0x00000001, ("Method added: new: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni)); ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ RC_TRACE(0x00000001, ("Method deleted: old: %s [%d]", ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ if (new_class()->size() != new_class->old_version()->size()) { ++ result |= Klass::ModifyClassSize; ++ } ++ ++ if (new_class->size_helper() != ((instanceKlass*)(new_class->old_version()->klass_part()))->size_helper()) { ++ result |= Klass::ModifyInstanceSize; ++ } ++ ++ methodHandle instanceTransformerMethod(new_class->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature())); ++ if (!instanceTransformerMethod.is_null() && !instanceTransformerMethod->is_static()) { ++ result |= Klass::HasInstanceTransformer; ++ } ++ ++ // (tw) Check method bodies to be able to return NoChange? ++ return result; ++} ++ ++void VM_RedefineClasses::calculate_instance_update_information(klassOop new_version) { ++ ++ class UpdateFieldsEvolutionClosure : public FieldEvolutionClosure { ++ ++ private: ++ ++ GrowableArray<int> info; ++ int curPosition; ++ bool copy_backwards; ++ ++ public: + +- res = merge_cp_and_rewrite(the_class, scratch_class, THREAD); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ bool does_copy_backwards() { ++ return copy_backwards; + } + +- if (VerifyMergedCPBytecodes) { +- // verify what we have done during constant pool merging +- { +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD); +- } ++ UpdateFieldsEvolutionClosure(klassOop klass) { + +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes post merge-CP exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller that constant pool merging screwed up +- return JVMTI_ERROR_INTERNAL; +- } ++ int base_offset = instanceOopDesc::base_offset_in_bytes(); ++ ++ if (klass->klass_part()->newest_version() == SystemDictionary::Reference_klass()->klass_part()->newest_version()) { ++ base_offset += java_lang_ref_Reference::number_of_fake_oop_fields*size_of_type(T_OBJECT); + } ++ ++ info.append(base_offset); ++ info.append(0); ++ curPosition = base_offset; ++ copy_backwards = false; ++ } ++ ++ GrowableArray<int> &finish() { ++ info.append(0); ++ return info; + } + +- Rewriter::rewrite(scratch_class, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- Rewriter::relocate_and_link(scratch_class, THREAD); ++ virtual void do_new_field(fieldDescriptor* fd){ ++ int alignment = fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } ++ ++ int size = size_of_type(fd->field_type()); ++ fill(size); + } +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; ++ ++ private: ++ ++ void fill(int size) { ++ if (info.length() > 0 && info.at(info.length() - 1) < 0) { ++ (*info.adr_at(info.length() - 1)) -= size; + } else { +- return JVMTI_ERROR_INTERNAL; ++ info.append(-size); + } ++ ++ curPosition += size; + } + +- _scratch_classes[i] = scratch_class; ++ int size_of_type(BasicType type) { ++ int size = 0; ++ switch(type) { ++ case T_BOOLEAN: ++ size = sizeof(jboolean); ++ break; + +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), os::available_memory() >> 10)); +- } ++ case T_CHAR: ++ size = (sizeof(jchar)); ++ break; + +- return JVMTI_ERROR_NONE; +-} ++ case T_FLOAT: ++ size = (sizeof(jfloat)); ++ break; + ++ case T_DOUBLE: ++ size = (sizeof(jdouble)); ++ break; + +-// Map old_index to new_index as needed. scratch_cp is only needed +-// for RC_TRACE() calls. +-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, +- int old_index, int new_index) { +- if (find_new_index(old_index) != 0) { +- // old_index is already mapped +- return; +- } ++ case T_BYTE: ++ size = (sizeof(jbyte)); ++ break; + +- if (old_index == new_index) { +- // no mapping is needed +- return; +- } ++ case T_SHORT: ++ size = (sizeof(jshort)); ++ break; + +- _index_map_p->at_put(old_index, new_index); +- _index_map_count++; ++ case T_INT: ++ size = (sizeof(jint)); ++ break; + +- RC_TRACE(0x00040000, ("mapped tag %d at index %d to %d", +- scratch_cp->tag_at(old_index).value(), old_index, new_index)); +-} // end map_index() ++ case T_LONG: ++ size = (sizeof(jlong)); ++ break; + ++ case T_OBJECT: ++ case T_ARRAY: ++ if (UseCompressedOops) { ++ size = sizeof(narrowOop); ++ } else { ++ size = (sizeof(oop)); ++ } ++ break; + +-// Merge old_cp and scratch_cp and return the results of the merge via +-// merge_cp_p. The number of entries in *merge_cp_p is returned via +-// merge_cp_length_p. The entries in old_cp occupy the same locations +-// in *merge_cp_p. Also creates a map of indices from entries in +-// scratch_cp to the corresponding entry in *merge_cp_p. Index map +-// entries are only created for entries in scratch_cp that occupy a +-// different location in *merged_cp_p. +-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS) { ++ default: ++ ShouldNotReachHere(); ++ } + +- if (merge_cp_p == NULL) { +- assert(false, "caller must provide scatch constantPool"); +- return false; // robustness +- } +- if (merge_cp_length_p == NULL) { +- assert(false, "caller must provide scatch CP length"); +- return false; // robustness +- } +- // Worst case we need old_cp->length() + scratch_cp()->length(), +- // but the caller might be smart so make sure we have at least +- // the minimum. +- if ((*merge_cp_p)->length() < old_cp->length()) { +- assert(false, "merge area too small"); +- return false; // robustness +- } ++ assert(size > 0, ""); ++ return size; + +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("old_cp_len=%d, scratch_cp_len=%d", old_cp->length(), +- scratch_cp->length())); ++ } ++ ++ public: + +- { +- // Pass 0: +- // The old_cp is copied to *merge_cp_p; this means that any code +- // using old_cp does not have to change. This work looks like a +- // perfect fit for constantPoolOop::copy_cp_to(), but we need to +- // handle one special case: +- // - revert JVM_CONSTANT_Class to JVM_CONSTANT_UnresolvedClass +- // This will make verification happy. +- +- int old_i; // index into old_cp +- +- // index zero (0) is not used in constantPools +- for (old_i = 1; old_i < old_cp->length(); old_i++) { +- // leave debugging crumb +- jbyte old_tag = old_cp->tag_at(old_i).value(); +- switch (old_tag) { +- case JVM_CONSTANT_Class: +- case JVM_CONSTANT_UnresolvedClass: +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- // May be resolving while calling this so do the same for +- // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) +- (*merge_cp_p)->unresolved_klass_at_put(old_i, +- old_cp->klass_name_at(old_i)); +- break; ++ virtual void do_old_field(fieldDescriptor* fd){} + +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // just copy the entry to *merge_cp_p, but double and long take +- // two constant pool entries +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- old_i++; +- break; ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd){ + +- default: +- // just copy the entry to *merge_cp_p +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- break; ++ int alignment = new_fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); + } +- } // end for each old_cp entry +- +- // We don't need to sanity check that *merge_cp_length_p is within +- // *merge_cp_p bounds since we have the minimum on-entry check above. +- (*merge_cp_length_p) = old_i; +- } + +- // merge_cp_len should be the same as old_cp->length() at this point +- // so this trace message is really a "warm-and-breathing" message. +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 0: merge_cp_len=%d", *merge_cp_length_p)); ++ assert(old_fd->field_type() == new_fd->field_type(), ""); ++ assert(curPosition == new_fd->offset(), "must be correct offset!"); + +- int scratch_i; // index into scratch_cp +- { +- // Pass 1a: +- // Compare scratch_cp entries to the old_cp entries that we have +- // already copied to *merge_cp_p. In this pass, we are eliminating +- // exact duplicates (matching entry at same index) so we only +- // compare entries in the common indice range. +- int increment = 1; +- int pass1a_length = MIN2(old_cp->length(), scratch_cp->length()); +- for (scratch_i = 1; scratch_i < pass1a_length; scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; ++ int offset = old_fd->offset(); ++ int size = size_of_type(old_fd->field_type()); + +- default: +- increment = 1; +- break; ++ int prevEnd = -1; ++ if (info.length() > 0 && info.at(info.length() - 1) > 0) { ++ prevEnd = info.at(info.length() - 2) + info.at(info.length() - 1); + } + +- bool match = scratch_cp->compare_entry_to(scratch_i, *merge_cp_p, +- scratch_i, CHECK_0); +- if (match) { +- // found a match at the same index so nothing more to do +- continue; +- } else if (is_unresolved_class_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved class entry at the same index +- // with the same string value. Since Pass 0 reverted any +- // class entries to unresolved class entries in *merge_cp_p, +- // we go with the unresolved class entry. +- continue; +- } else if (is_unresolved_string_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved string entry at the same index +- // with the same string value. We can live with whichever +- // happens to be at scratch_i in *merge_cp_p. +- continue; +- } +- +- int found_i = scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, +- CHECK_0); +- if (found_i != 0) { +- guarantee(found_i != scratch_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // The find_matching_entry() call above could fail to find a match +- // due to a resolved versus unresolved class or string entry situation +- // like we solved above with the is_unresolved_*_mismatch() calls. +- // However, we would have to call is_unresolved_*_mismatch() over +- // all of *merge_cp_p (potentially) and that doesn't seem to be +- // worth the time. +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1a: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- +- if (scratch_i < scratch_cp->length()) { +- // Pass 1b: +- // old_cp is smaller than scratch_cp so there are entries in +- // scratch_cp that we have not yet processed. We take care of +- // those now. +- int increment = 1; +- for (; scratch_i < scratch_cp->length(); scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; ++ if (prevEnd == offset) { ++ info.at_put(info.length() - 2, info.at(info.length() - 2) + size); ++ } else { ++ info.append(size); ++ info.append(offset); + } + +- int found_i = +- scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, CHECK_0); +- if (found_i != 0) { +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; ++ if (old_fd->offset() < new_fd->offset()) { ++ copy_backwards = true; + } + +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); ++ transfer_special_access_flags(old_fd, new_fd); ++ ++ curPosition += size; + } ++ }; ++ ++ UpdateFieldsEvolutionClosure cl(new_version); ++ ((instanceKlass*)new_version->klass_part())->do_fields_evolution(&cl); + +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); ++ GrowableArray<int> result = cl.finish(); ++ ((instanceKlass*)new_version->klass_part())->store_update_information(result); ++ ((instanceKlass*)new_version->klass_part())->set_copying_backwards(cl.does_copy_backwards()); ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Instance update information for %s:", ++ new_version->klass_part()->name()->as_C_string())); ++ if (cl.does_copy_backwards()) { ++ RC_TRACE(0x00000002, ("\tDoes copy backwards!")); ++ } ++ for (int i=0; i<result.length(); i++) { ++ int curNum = result.at(i); ++ if (curNum < 0) { ++ RC_TRACE(0x00000002, ("\t%d CLEAN", curNum)); ++ } else if (curNum > 0) { ++ RC_TRACE(0x00000002, ("\t%d COPY from %d", curNum, result.at(i + 1))); ++ i++; ++ } else { ++ RC_TRACE(0x00000002, ("\tEND")); ++ } ++ } + } ++} + +- return true; +-} // end merge_constant_pools() +- +- +-// Merge constant pools between the_class and scratch_class and +-// potentially rewrite bytecodes in scratch_class to use the merged +-// constant pool. +-jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class, +- TRAPS) { +- // worst case merged constant pool length is old and new combined +- int merge_cp_length = the_class->constants()->length() +- + scratch_class->constants()->length(); +- +- constantPoolHandle old_cp(THREAD, the_class->constants()); +- constantPoolHandle scratch_cp(THREAD, scratch_class->constants()); +- +- // Constant pools are not easily reused so we allocate a new one +- // each time. +- // merge_cp is created unsafe for concurrent GC processing. It +- // should be marked safe before discarding it. Even though +- // garbage, if it crosses a card boundary, it may be scanned +- // in order to find the start of the first complete object on the card. +- constantPoolHandle merge_cp(THREAD, +- oopFactory::new_constantPool(merge_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- int orig_length = old_cp->orig_length(); +- if (orig_length == 0) { +- // This old_cp is an actual original constant pool. We save +- // the original length in the merged constant pool so that +- // merge_constant_pools() can be more efficient. If a constant +- // pool has a non-zero orig_length() value, then that constant +- // pool was created by a merge operation in RedefineClasses. +- merge_cp->set_orig_length(old_cp->length()); +- } else { +- // This old_cp is a merged constant pool from a previous +- // RedefineClasses() calls so just copy the orig_length() +- // value. +- merge_cp->set_orig_length(old_cp->orig_length()); ++Symbol* VM_RedefineClasses::signature_to_class_name(Symbol* signature) { ++ assert(FieldType::is_obj(signature), ""); ++ return SymbolTable::new_symbol(signature->as_C_string() + 1, signature->utf8_length() - 2, Thread::current()); ++} ++ ++void VM_RedefineClasses::calculate_type_check_information(klassOop klass) { ++ if (klass->klass_part()->is_redefining()) { ++ klass = klass->klass_part()->old_version(); + } + +- ResourceMark rm(THREAD); +- _index_map_count = 0; +- _index_map_p = new intArray(scratch_cp->length(), -1); ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(klass); ++ GrowableArray< Pair<int, klassOop> > type_check_information; + +- bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, +- &merge_cp_length, THREAD); +- if (!result) { +- // The merge can fail due to memory allocation failure or due +- // to robustness checks. +- return JVMTI_ERROR_INTERNAL; +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("merge_cp_len=%d, index_map_len=%d", merge_cp_length, _index_map_count)); +- +- if (_index_map_count == 0) { +- // there is nothing to map between the new and merged constant pools +- +- if (old_cp->length() == scratch_cp->length()) { +- // The old and new constant pools are the same length and the +- // index map is empty. This means that the three constant pools +- // are equivalent (but not the same). Unfortunately, the new +- // constant pool has not gone through link resolution nor have +- // the new class bytecodes gone through constant pool cache +- // rewriting so we can't use the old constant pool with the new +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else if (old_cp->length() < scratch_cp->length()) { +- // The old constant pool has fewer entries than the new constant +- // pool and the index map is empty. This means the new constant +- // pool is a superset of the old constant pool. However, the old +- // class bytecodes have already gone through constant pool cache +- // rewriting so we can't use the new constant pool with the old +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else { +- // The old constant pool has more entries than the new constant +- // pool and the index map is empty. This means that both the old +- // and merged constant pools are supersets of the new constant +- // pool. +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool; the previous new constant pool will +- // get GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- // drop local ref to the merged constant pool +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); ++ class MyFieldClosure : public FieldClosure { ++ ++ public: ++ ++ GrowableArray< Pair<int, klassOop> > *_arr; ++ ++ MyFieldClosure(GrowableArray< Pair<int, klassOop> > *arr) { ++ _arr = arr; + } +- } else { +- if (RC_TRACE_ENABLED(0x00040000)) { +- // don't want to loop unless we are tracing +- int count = 0; +- for (int i = 1; i < _index_map_p->length(); i++) { +- int value = _index_map_p->at(i); +- +- if (value != -1) { +- RC_TRACE_WITH_THREAD(0x00040000, THREAD, +- ("index_map[%d]: old=%d new=%d", count, i, value)); +- count++; ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ if (fd->field_type() == T_OBJECT) { ++ Symbol* signature = fd->signature(); ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ klassOop field_klass; ++ if (is_field_dangerous(name, fd, field_klass)) { ++ RC_TRACE(0x00000002, ("Found dangerous field %s in klass %s of type %s", ++ fd->name()->as_C_string(), ++ fd->field_holder()->klass_part()->name()->as_C_string(), ++ name->as_C_string())); ++ _arr->append(Pair<int, klassOop>(fd->offset(), field_klass->klass_part()->newest_version())); ++ } + } ++ ++ // Array fields can never be a problem! + } + } + +- // We have entries mapped between the new and merged constant pools +- // so we have to rewrite some constant pool references. +- if (!rewrite_cp_refs(scratch_class, THREAD)) { +- return JVMTI_ERROR_INTERNAL; ++ bool is_field_dangerous(Symbol* klass_name, fieldDescriptor *fd, klassOop &field_klass) { ++ field_klass = SystemDictionary::find(klass_name, fd->field_holder()->klass_part()->class_loader(), ++ fd->field_holder()->klass_part()->protection_domain(), Thread::current()); ++ if(field_klass != NULL) { ++ if (field_klass->klass_part()->is_redefining()) { ++ field_klass = field_klass->klass_part()->old_version(); ++ } ++ if (field_klass->klass_part()->has_subtype_changed()) { ++ return true; ++ } ++ } ++ return false; + } ++ }; + +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool so now the rewritten bytecodes have +- // valid references; the previous new constant pool will get +- // GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- merge_cp()->set_is_conc_safe(true); ++ MyFieldClosure fieldClosure(&type_check_information); ++ cur_instance_klass->do_nonstatic_fields(&fieldClosure); ++ ++ if (type_check_information.length() > 0) { ++ type_check_information.append(Pair<int, klassOop>(-1, NULL)); ++ cur_instance_klass->store_type_check_information(type_check_information); + } +- assert(old_cp()->is_conc_safe(), "Just checking"); +- assert(scratch_cp()->is_conc_safe(), "Just checking"); ++} + +- return JVMTI_ERROR_NONE; +-} // end merge_cp_and_rewrite() ++bool VM_RedefineClasses::check_field_value_types() { + ++ Thread *THREAD = Thread::current(); ++ class CheckFieldTypesClosure : public ObjectClosure { + +-// Rewrite constant pool references in klass scratch_class. +-bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, +- TRAPS) { ++ private: + +- // rewrite constant pool references in the methods: +- if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } ++ bool _result; + +- // rewrite constant pool references in the class_annotations: +- if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } ++ public: + +- // rewrite constant pool references in the fields_annotations: +- if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } ++ CheckFieldTypesClosure() { ++ _result = true; ++ } + +- // rewrite constant pool references in the methods_annotations: +- if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } ++ bool result() { return _result; } + +- // rewrite constant pool references in the methods_parameter_annotations: +- if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; ++ virtual void do_object(oop obj) { ++ ++ if (!_result) { ++ return; ++ } ++ ++ if (obj->is_objArray()) { ++ ++ objArrayOop array = objArrayOop(obj); ++ ++ klassOop element_klass = objArrayKlass::cast(array->klass())->element_klass(); ++ ++ if (element_klass->klass_part()->has_subtype_changed()) { ++ int length = array->length(); ++ for (int i=0; i<length; i++) { ++ oop element = array->obj_at(i); ++ if (element != NULL && element->blueprint()->newest_version()->klass_part()->is_redefining()) { ++ // Check subtype relationship to static type of array ++ if (!element->blueprint()->newest_version()->klass_part()->is_subtype_of(element_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000001, ("Array value is INVALID - abort redefinition (static_type=%s, index=%d, dynamic_type=%s)", ++ element_klass->klass_part()->name()->as_C_string(), ++ i, ++ element->blueprint()->name()->as_C_string())); ++ _result = false; ++ break; ++ } ++ } ++ } ++ } ++ ++ } else { ++ Pair<int, klassOop> *cur = obj->klass()->klass_part()->type_check_information(); ++ if (cur != NULL) { ++ // Type check information exists for this oop ++ while ((*cur).left() != -1) { ++ check_field(obj, (*cur).left(), (*cur).right()); ++ cur++; ++ } ++ } ++ } ++ } ++ ++ void check_field(oop obj, int offset, klassOop static_type) { ++ oop field_value = obj->obj_field(offset); ++ if (field_value != NULL) { ++ // Field is not null ++ if (field_value->klass()->klass_part()->newest_version()->klass_part()->is_subtype_of(static_type)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Field value is OK (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Field value is INVALID - abort redefinition (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ _result = false; ++ } ++ } ++ } ++ }; ++ ++ CheckFieldTypesClosure myObjectClosure; ++ ++ // make sure that heap is parsable (fills TLABs with filler objects) ++ Universe::heap()->ensure_parsability(false); // no need to retire TLABs ++ ++ // do the iteration ++ // If this operation encounters a bad object when using CMS, ++ // consider using safe_object_iterate() which avoids perm gen ++ // objects that may contain bad references. ++ Universe::heap()->object_iterate(&myObjectClosure); ++ ++ // when sharing is enabled we must iterate over the shared spaces ++ if (UseSharedSpaces) { ++ GenCollectedHeap* gch = GenCollectedHeap::heap(); ++ CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); ++ gen->ro_space()->object_iterate(&myObjectClosure); ++ gen->rw_space()->object_iterate(&myObjectClosure); + } + +- // rewrite constant pool references in the methods_default_annotations: +- if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; ++ return myObjectClosure.result(); ++} ++ ++void VM_RedefineClasses::clear_type_check_information(klassOop k) { ++ if (k->klass_part()->is_redefining()) { ++ k = k->klass_part()->old_version(); + } + +- return true; +-} // end rewrite_cp_refs() ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(k); ++ cur_instance_klass->clear_type_check_information(); ++} + ++void VM_RedefineClasses::update_active_methods() { ++ ++ RC_TRACE(0x00000002, ("Updating active methods")); ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x0000000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ int bci = jvf->bci(); ++ RC_TRACE(0x00000400, ("found method: %s / bci=%d", jvf->method()->name()->as_C_string(), bci)); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ if (jvf->method()->new_version() != NULL && jvf->is_interpreted_frame()) { ++ ++ ++ RC_TRACE(0x00000002, ("Found method that should just be updated to the newest version %s", ++ jvf->method()->name_and_sig_as_C_string())); ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ int code_size = jvf->method()->code_size(); ++ char *code_base_old = (char*)jvf->method()->code_base(); ++ char *code_base_new = (char*)jvf->method()->new_version()->code_base(); ++ for (int i=0; i<code_size; i++) { ++ tty->print_cr("old=%d new=%d", *code_base_old++, *code_base_new++); ++ } ++ jvf->method()->print_codes_on(tty); ++ jvf->method()->new_version()->print_codes_on(tty); ++ } ++ ++ assert(jvf->is_interpreted_frame(), "Every frame must be interpreted!"); ++ interpretedVFrame *iframe = (interpretedVFrame *)jvf; ++ ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ constantPoolCacheOop cp_old = jvf->method()->constants()->cache(); ++ tty->print_cr("old cp"); ++ for (int i=0; i<cp_old->length(); i++) { ++ cp_old->entry_at(i)->print(tty, i); ++ } ++ constantPoolCacheOop cp_new = jvf->method()->new_version()->constants()->cache(); ++ tty->print_cr("new cp"); ++ for (int i=0; i<cp_new->length(); i++) { ++ cp_new->entry_at(i)->print(tty, i); ++ } ++ } + +-// Rewrite constant pool references in the methods. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods( +- instanceKlassHandle scratch_class, TRAPS) { ++ iframe->set_method(jvf->method()->new_version(), bci); ++ RC_TRACE(0x00000002, ("Updated method to newer version")); ++ assert(jvf->method()->new_version() == NULL, "must be latest version"); + +- objArrayHandle methods(THREAD, scratch_class->methods()); ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); ++ } ++} ++ ++void VM_RedefineClasses::method_forwarding() { ++ ++ int forwarding_count = 0; ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00008000, ("found method: %s", ++ jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ methodOop m = jvf->method(); ++ int bci = jvf->bci(); ++ RC_TRACE(0x00008000, ("klass redef %d", ++ klass->is_redefining())); ++ ++ if (klass->new_version() != NULL && m->new_version() == NULL) { ++ RC_TRACE(0x00008000, ("found potential forwarding method: %s", ++ m->name()->as_C_string())); ++ ++ klassOop new_klass = klass->newest_version(); ++ methodOop new_method = new_klass->klass_part()->lookup_method(m->name(), m->signature()); ++ RC_TRACE(0x00000002, ("%d %d", ++ new_method, ++ new_method->constMethod()->has_code_section_table())); ++ ++ if (new_method != NULL && new_method->constMethod()->has_code_section_table()) { ++ RC_TRACE(0x00008000, ("found code section table for method: %s", ++ new_method->name()->as_C_string())); ++ m->set_forward_method(new_method); ++ if (new_method->max_locals() != m->max_locals()) { ++ tty->print_cr("new_m max locals: %d old_m max locals: %d", new_method->max_locals(), m->max_locals()); ++ } ++ assert(new_method->max_locals() == m->max_locals(), "number of locals must match"); ++ assert(new_method->max_stack() == m->max_stack(), "number of stack values must match"); ++ if (jvf->is_interpreted_frame()) { ++ if (m->is_in_code_section(bci)) { ++ // We must transfer now and cannot delay until next NOP. ++ int new_bci = m->calculate_forward_bci(bci, new_method); ++ interpretedVFrame* iframe = interpretedVFrame::cast(jvf); ++ RC_TRACE(0x00000002, ("Transfering execution of %s to new method old_bci=%d new_bci=%d", ++ new_method->name()->as_C_string(), ++ bci, ++ new_bci)); ++ iframe->set_method(new_method, new_bci); ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because %d is not in a code section", ++ new_method->name()->as_C_string(), ++ bci)); ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because method is compiled", ++ new_method->name()->as_C_string())); ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } + +- if (methods.is_null() || methods->length() == 0) { +- // no methods so nothing to do +- return true; ++ // Advance to next thread ++ java_thread = java_thread->next(); + } + +- // rewrite constant pool references in the methods: +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- methodHandle new_method; +- rewrite_cp_refs_in_method(method, &new_method, CHECK_false); +- if (!new_method.is_null()) { +- // the method has been replaced so save the new method version +- methods->obj_at_put(i, new_method()); ++ RC_TRACE(0x00000001, ("Method forwarding applied to %d methods", ++ forwarding_count)); ++} ++ ++bool VM_RedefineClasses::check_method_stacks() { ++ ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00000400, ("found method: %s", jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ StackValueCollection *locals = jvf->locals(); ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ for (int i=0; i<locals->size(); i++) { ++ StackValue *stack_value = locals->at(i); ++ if (stack_value->type() == T_OBJECT) { ++ Handle obj = stack_value->get_obj(); ++ if (!obj.is_null() && obj->klass()->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType)) { ++ ++ // OK, so this is a possible failure => check local variable table, if it could be OK. ++ bool result = false; ++ methodOop method = jvf->method(); ++ if (method->has_localvariable_table()) { ++ LocalVariableTableElement *elem = jvf->method()->localvariable_table_start(); ++ for (int j=0; j<method->localvariable_table_length(); j++) { ++ ++ if (elem->slot == i) { ++ ++ // Matching index found ++ ++ if (elem->start_bci <= jvf->bci() && elem->start_bci + elem->length > jvf->bci()) { ++ ++ // Also in range!! ++ Symbol* signature = jvf->method()->constants()->symbol_at(elem->descriptor_cp_index); ++ Symbol* klass_name = signature_to_class_name(signature); ++ ++ klassOop local_klass = SystemDictionary::find(klass_name, jvf->method()->method_holder()->klass_part()->class_loader(), jvf->method()->method_holder()->klass_part()->protection_domain(), Thread::current())->klass_part()->newest_version(); ++ klassOop cur = obj->klass()->klass_part()->newest_version(); ++ ++ // Field is not null ++ if (cur->klass_part()->newest_version()->klass_part()->is_subtype_of(local_klass)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Local variable value is OK (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), cur->klass_part()->name()->as_C_string())); ++ result = true; ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Local variable value is INVALID - abort redefinition (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), ++ cur->klass_part()->name()->as_C_string())); ++ return false; ++ } ++ } ++ } ++ ++ elem++; ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Method %s does not have a local variable table => abort", ++ method->name_and_sig_as_C_string())); ++ } ++ ++ if (!result) { ++ return false; ++ } ++ ++ RC_TRACE(0x00008000, ("Verifying class %s", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ ++ Symbol* exception_name; ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ Thread::current()->set_pretend_new_universe(true); ++ ClassVerifier split_verifier(klass, Thread::current()); ++ split_verifier.verify_method(jvf->method(), Thread::current()); ++ exception_name = split_verifier.result(); ++ Thread::current()->set_pretend_new_universe(false); ++ ++ if (exception_name != NULL) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ RC_TRACE(0x00000001, ("Exception: %s", ++ exception_name->as_C_string())); ++ RC_TRACE(0x00000001, ("Message: %s", ++ message_buffer)); ++ Thread::current()->clear_pending_exception(); ++ return false; ++ } ++ ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } + } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); + } + + return true; + } + ++bool VM_RedefineClasses::check_method(methodOop method) { ++ ++ ++ return true; ++} + +-// Rewrite constant pool references in the specific method. This code +-// was adapted from Rewriter::rewrite_method(). +-void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method, +- methodHandle *new_method_p, TRAPS) { ++// Warning: destroys redefinition level values of klasses. ++bool VM_RedefineClasses::check_loaded_methods() { + +- *new_method_p = methodHandle(); // default is no new method ++ class CheckLoadedMethodsClosure : public ObjectClosure { + +- // We cache a pointer to the bytecodes here in code_base. If GC +- // moves the methodOop, then the bytecodes will also move which +- // will likely cause a crash. We create a No_Safepoint_Verifier +- // object to detect whether we pass a possible safepoint in this +- // code block. +- No_Safepoint_Verifier nsv; ++ private: ++ ++ bool _result; ++ GrowableArray<klassOop> *_dangerous_klasses; + +- // Bytecodes and their length +- address code_base = method->code_base(); +- int code_length = method->code_size(); ++ public: ++ CheckLoadedMethodsClosure(GrowableArray<klassOop> *dangerous_klasses) { ++ _result = true; ++ _dangerous_klasses = dangerous_klasses; ++ } + +- int bc_length; +- for (int bci = 0; bci < code_length; bci += bc_length) { +- address bcp = code_base + bci; +- Bytecodes::Code c = (Bytecodes::Code)(*bcp); ++ bool result() { ++ return _result; ++ } + +- bc_length = Bytecodes::length_for(c); +- if (bc_length == 0) { +- // More complicated bytecodes report a length of zero so +- // we have to try again a slightly different way. +- bc_length = Bytecodes::length_at(method(), bcp); ++ bool is_class_dangerous(klassOop k) { ++ return k->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType); + } + +- assert(bc_length != 0, "impossible bytecode length"); ++ bool can_be_affected(instanceKlass *klass) { + +- switch (c) { +- case Bytecodes::_ldc: +- { +- int cp_index = *(bcp + 1); +- int new_index = find_new_index(cp_index); ++ constantPoolOop cp = klass->constants(); + +- if (StressLdcRewrite && new_index == 0) { +- // If we are stressing ldc -> ldc_w rewriting, then we +- // always need a new_index value. +- new_index = cp_index; +- } +- if (new_index != 0) { +- // the original index is mapped so we have more work to do +- if (!StressLdcRewrite && new_index <= max_jubyte) { +- // The new value can still use ldc instead of ldc_w +- // unless we are trying to stress ldc -> ldc_w rewriting +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- *(bcp + 1) = new_index; +- } else { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s->ldc_w@" INTPTR_FORMAT " old=%d, new=%d", +- Bytecodes::name(c), bcp, cp_index, new_index)); +- // the new value needs ldc_w instead of ldc +- u_char inst_buffer[4]; // max instruction size is 4 bytes +- bcp = (address)inst_buffer; +- // construct new instruction sequence +- *bcp = Bytecodes::_ldc_w; +- bcp++; +- // Rewriter::rewrite_method() does not rewrite ldc -> ldc_w. +- // See comment below for difference between put_Java_u2() +- // and put_native_u2(). +- Bytes::put_Java_u2(bcp, new_index); +- +- Relocator rc(method, NULL /* no RelocatorListener needed */); +- methodHandle m; +- { +- Pause_No_Safepoint_Verifier pnsv(&nsv); +- +- // ldc is 2 bytes and ldc_w is 3 bytes +- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD); +- if (m.is_null() || HAS_PENDING_EXCEPTION) { +- guarantee(false, "insert_space_at() failed"); ++ Thread *THREAD = Thread::current(); ++ klassOop k; ++ Symbol* symbol; ++ ++ for (int i=1; i<cp->length(); i++) { ++ jbyte tag = cp->tag_at(i).value(); ++ switch(tag) { ++ case JVM_CONSTANT_Long: ++ case JVM_CONSTANT_Double: ++ i++; ++ break; ++ ++ case JVM_CONSTANT_Utf8: ++ case JVM_CONSTANT_Unicode: ++ case JVM_CONSTANT_Integer: ++ case JVM_CONSTANT_Float: ++ case JVM_CONSTANT_String: ++ case JVM_CONSTANT_Fieldref: ++ case JVM_CONSTANT_Methodref: ++ case JVM_CONSTANT_InterfaceMethodref: ++ case JVM_CONSTANT_ClassIndex: ++ case JVM_CONSTANT_UnresolvedString: ++ case JVM_CONSTANT_StringIndex: ++ case JVM_CONSTANT_UnresolvedClassInError: ++ case JVM_CONSTANT_Object: ++ // do nothing ++ break; ++ ++ case JVM_CONSTANT_Class: ++ k = cp->klass_at(i, CHECK_(true)); ++ if (is_class_dangerous(k)) { ++ RC_TRACE(0x00000002, ("Class %s is potentially affected, because at cp[%d] references class %s", ++ klass->name()->as_C_string(), ++ i, ++ k->klass_part()->name()->as_C_string())); ++ return true; ++ } ++ break; ++ ++ case JVM_CONSTANT_NameAndType: ++ symbol = cp->symbol_at(cp->signature_ref_index_at(i)); ++ if (symbol->byte_at(0) == '(') { ++ // This must be a method ++ SignatureStream signatureStream(symbol); ++ while (true) { ++ ++ if (signatureStream.is_array()) { ++ Symbol* cur_signature = signatureStream.as_symbol(Thread::current()); ++ if (is_type_signature_dangerous(cur_signature)) { ++ return true; ++ } ++ } else if (signatureStream.is_object()) { ++ if (is_symbol_dangerous(signatureStream.as_symbol(Thread::current()))) { ++ return true; ++ } ++ } ++ ++ if (signatureStream.at_return_type()) { ++ break; ++ } ++ ++ signatureStream.next(); + } ++ ++ } else if (is_type_signature_dangerous(symbol)) { ++ return true; + } ++ break; + +- // return the new method so that the caller can update +- // the containing class +- *new_method_p = method = m; +- // switch our bytecode processing loop from the old method +- // to the new method +- code_base = method->code_base(); +- code_length = method->code_size(); +- bcp = code_base + bci; +- c = (Bytecodes::Code)(*bcp); +- bc_length = Bytecodes::length_for(c); +- assert(bc_length != 0, "sanity check"); +- } // end we need ldc_w instead of ldc +- } // end if there is a mapped index +- } break; +- +- // these bytecodes have a two-byte constant pool index +- case Bytecodes::_anewarray : // fall through +- case Bytecodes::_checkcast : // fall through +- case Bytecodes::_getfield : // fall through +- case Bytecodes::_getstatic : // fall through +- case Bytecodes::_instanceof : // fall through +- case Bytecodes::_invokeinterface: // fall through +- case Bytecodes::_invokespecial : // fall through +- case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokevirtual : // fall through +- case Bytecodes::_ldc_w : // fall through +- case Bytecodes::_ldc2_w : // fall through +- case Bytecodes::_multianewarray : // fall through +- case Bytecodes::_new : // fall through +- case Bytecodes::_putfield : // fall through +- case Bytecodes::_putstatic : +- { +- address p = bcp + 1; +- int cp_index = Bytes::get_Java_u2(p); +- int new_index = find_new_index(cp_index); +- if (new_index != 0) { +- // the original index is mapped so update w/ new value +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- // Rewriter::rewrite_method() uses put_native_u2() in this +- // situation because it is reusing the constant pool index +- // location for a native index into the constantPoolCache. +- // Since we are updating the constant pool index prior to +- // verification and constantPoolCache initialization, we +- // need to keep the new index in Java byte order. +- Bytes::put_Java_u2(p, new_index); ++ case JVM_CONSTANT_UnresolvedClass: ++ symbol = cp->unresolved_klass_at(i); ++ if (is_symbol_dangerous(symbol)) { ++ return true; ++ } ++ break; ++ ++ default: ++ ShouldNotReachHere(); + } +- } break; ++ } ++ ++ return false; + } +- } // end for each bytecode +-} // end rewrite_cp_refs_in_method() + ++ bool is_type_signature_dangerous(Symbol* signature) { ++ // This must be a field type ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ if (is_symbol_dangerous(name)) { ++ return true; ++ } ++ } else if (FieldType::is_array(signature)) { ++ //jint dimension; ++ //Symbol* object_key; ++ FieldArrayInfo fd; ++ FieldType::get_array_info(signature, fd, Thread::current()); ++ if (is_symbol_dangerous(fd.object_key())) { ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ bool is_symbol_dangerous(Symbol* symbol) { ++ for (int i=0; i<_dangerous_klasses->length(); i++) { ++ if(_dangerous_klasses->at(i)->klass_part()->name() == symbol) { ++ RC_TRACE(0x00000002, ("Found constant pool index %d references class %s", ++ i, ++ symbol->as_C_string())); ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ virtual void do_object(oop obj) { + +-// Rewrite constant pool references in the class_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ if (!_result) return; + +- typeArrayHandle class_annotations(THREAD, +- scratch_class->class_annotations()); +- if (class_annotations.is_null() || class_annotations->length() == 0) { +- // no class_annotations so nothing to do +- return true; ++ klassOop klassObj = (klassOop)obj; ++ Thread *THREAD = Thread::current(); ++ ++ // We found an instance klass! ++ instanceKlass *klass = instanceKlass::cast(klassObj); ++ instanceKlassHandle handle(klassObj); ++ ++ RC_TRACE(0x00000400, ("Check if verification is necessary for class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ ++ if (!can_be_affected(klass)) { ++ RC_TRACE(0x00000400, ("Skipping verification of class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ return; ++ } ++ ++ if (handle->major_version() < Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION) { ++ RC_TRACE(0x00000001, ("Failing because cannot verify class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ _result = false; ++ return; ++ } ++ ++ RC_TRACE(0x00000001, ("Verifying class %s", handle->name()->as_C_string())); ++ ++ if (!Verifier::verify(handle, Verifier::NoException, true, false, Thread::current())) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", handle->name()->as_C_string())); ++ //Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ //RC_TRACE(0x00000002, ("exception when verifying class: '%s'", ex_name->as_C_string()); ++ //PENDING_EXCEPTION->print(); ++ CLEAR_PENDING_EXCEPTION; ++ _result = false; ++ } ++ ++ /*int method_count = klass->methods()->length(); ++ for (int i=0; i<method_count; i++) { ++ methodOop cur_method = (methodOop)klass->methods()->obj_at(i); ++ if (!check_method(cur_method)) { ++ RC_TRACE(0x00000001, ("Failed to verify consistency of method %s of klass %s", cur_method->name()->as_C_string(), klass->name()->as_C_string()); ++ } ++ }*/ ++ } ++ }; ++ ++ // TODO: Check bytecodes in case of interface => class or class => interface etc.. ++ ++ GrowableArray<klassOop> dangerous_klasses; ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle handle = _new_classes->at(i); ++ if (handle->check_redefinition_flag(Klass::RemoveSuperType)) { ++ dangerous_klasses.append(handle()); ++ } + } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_annotations length=%d", class_annotations->length())); ++ CheckLoadedMethodsClosure checkLoadedMethodsClosure(&dangerous_klasses); ++ Thread::current()->set_pretend_new_universe(true); ++ SystemDictionary::classes_do(&checkLoadedMethodsClosure); ++ Thread::current()->set_pretend_new_universe(false); + +- int byte_i = 0; // byte index into class_annotations +- return rewrite_cp_refs_in_annotations_typeArray(class_annotations, byte_i, +- THREAD); ++ ++ return checkLoadedMethodsClosure.result(); + } + ++bool VM_RedefineClasses::check_type_consistency() { + +-// Rewrite constant pool references in an annotations typeArray. This +-// "structure" is adapted from the RuntimeVisibleAnnotations_attribute +-// that is described in section 4.8.15 of the 2nd-edition of the VM spec: +-// +-// annotations_typeArray { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { ++ Universe::set_verify_in_progress(true); + +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for num_annotations field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for num_annotations field")); ++ SystemDictionary::classes_do(calculate_type_check_information); ++ bool result = check_field_value_types(); ++ SystemDictionary::classes_do(clear_type_check_information); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong field or array element value!")); ++ Universe::set_verify_in_progress(false); ++ return false; ++ } ++ ++ result = check_method_stacks(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong value on the stack")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- u2 num_annotations = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; ++ result = check_loaded_methods(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong loaded method")); ++ Universe::set_verify_in_progress(false); ++ return false; ++ } ++ ++ RC_TRACE(0x00000001, ("Verification passed => hierarchy change is valid!")); ++ Universe::set_verify_in_progress(false); ++ return true; ++} ++ ++void VM_RedefineClasses::rollback() { ++ RC_TRACE(0x00000001, ("Rolling back redefinition!")); ++ SystemDictionary::rollback_redefinition(); ++ ++ RC_TRACE(0x00000001, ("After rolling back system dictionary!")); ++ for (int i=0; i<_new_classes->length(); i++) { ++ SystemDictionary::remove_from_hierarchy(_new_classes->at(i)); ++ } ++ ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle new_class = _new_classes->at(i); ++ new_class->set_redefining(false); ++ new_class->old_version()->klass_part()->set_new_version(NULL); ++ new_class->set_old_version(NULL); ++ } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_annotations=%d", num_annotations)); ++} + +- int calc_num_annotations = 0; +- for (; calc_num_annotations < num_annotations; calc_num_annotations++) { +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad annotation_struct at %d", calc_num_annotations)); +- // propagate failure back to caller +- return false; ++template <class T> void VM_RedefineClasses::do_oop_work(T* p) { ++ T heap_oop = oopDesc::load_heap_oop(p); ++ if (!oopDesc::is_null(heap_oop)) { ++ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); ++ if (obj->is_instanceKlass()) { ++ klassOop klass = (klassOop)obj; ++ // DCEVM: note: can overwrite owner of old_klass constants pool with new_klass, so we need to fix it back later ++ if (klass->new_version() != NULL && klass->new_version()->klass_part()->is_redefining()) { ++ obj = klass->klass_part()->new_version(); ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ } ++ } else if (obj->blueprint()->newest_version() == SystemDictionary::Class_klass()->klass_part()->newest_version()) { ++ // update references to java.lang.Class to point to newest version. Only update references to non-primitive ++ // java.lang.Class instances. ++ klassOop klass_oop = java_lang_Class::as_klassOop(obj); ++ if (klass_oop != NULL) { ++ if (klass_oop->new_version() != NULL && klass_oop->new_version()->klass_part()->is_redefining()) { ++ obj = klass_oop->new_version()->java_mirror(); ++ } else if (klass_oop->klass_part()->is_redefining()) { ++ obj = klass_oop->java_mirror(); ++ } ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ ++ ++ // FIXME: DCEVM: better implementation? ++ // Starting from JDK 7 java_mirror can be kept in the regular heap. Therefore, it is possible ++ // that new java_mirror is in the young generation whereas p is in tenured generation. In that ++ // case we need to run write barrier to make sure card table is properly updated. This will ++ // allow JVM to detect reference in tenured generation properly during young generation GC. ++ if (Universe::heap()->is_in_reserved(p)) { ++ if (GenCollectedHeap::heap()->is_in_young(obj)) { ++ GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); ++ assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); ++ CardTableRS* _rs = (CardTableRS*)rs; ++ _rs->inline_write_ref_field_gc(p, obj); ++ } ++ } ++ } + } + } +- assert(num_annotations == calc_num_annotations, "sanity check"); ++} + +- return true; +-} // end rewrite_cp_refs_in_annotations_typeArray() ++void VM_RedefineClasses::swap_marks(oop first, oop second) { ++ markOop first_mark = first->mark(); ++ markOop second_mark = second->mark(); ++ first->set_mark(second_mark); ++ second->set_mark(first_mark); ++} + ++void VM_RedefineClasses::doit() { ++ Thread *thread = Thread::current(); + +-// Rewrite constant pool references in the annotation struct portion of +-// an annotations_typeArray. This "structure" is from section 4.8.15 of +-// the 2nd-edition of the VM spec: +-// +-// struct annotation { +-// u2 type_index; +-// u2 num_element_value_pairs; +-// { +-// u2 element_name_index; +-// element_value value; +-// } element_value_pairs[num_element_value_pairs]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- if ((byte_i_ref + 2 + 2) > annotations_typeArray->length()) { +- // not enough room for smallest annotation_struct +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for annotation_struct")); +- return false; +- } ++ RC_TRACE(0x00000001, ("Entering doit!")); + +- u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray, +- byte_i_ref, "mapped old type_index=%d", THREAD); + +- u2 num_element_value_pairs = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr( +- byte_i_ref)); +- byte_i_ref += 2; ++ if ((_max_redefinition_flags & Klass::RemoveSuperType) != 0) { + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_index=%d num_element_value_pairs=%d", type_index, +- num_element_value_pairs)); ++ RC_TIMER_START(_timer_check_type); + +- int calc_num_element_value_pairs = 0; +- for (; calc_num_element_value_pairs < num_element_value_pairs; +- calc_num_element_value_pairs++) { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for another element_name_index, let alone +- // the rest of another component +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for element_name_index")); +- return false; ++ if (!check_type_consistency()) { ++ // (tw) TODO: Rollback the class redefinition ++ rollback(); ++ RC_TRACE(0x00000001, ("Detected type inconsistency!")); ++ _result = JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ RC_TIMER_STOP(_timer_check_type); ++ return; + } + +- u2 element_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old element_name_index=%d", THREAD); ++ RC_TIMER_STOP(_timer_check_type); ++ ++ } else { ++ RC_TRACE(0x00000001, ("No type narrowing => skipping check for type inconsistency")); ++ } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("element_name_index=%d", element_name_index)); ++ if (UseMethodForwardPoints) { ++ RC_TRACE(0x00000001, ("Check stack for forwarding methods to new version")); ++ method_forwarding(); ++ } + +- if (!rewrite_cp_refs_in_element_value(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad element_value at %d", calc_num_element_value_pairs)); +- // propagate failure back to caller +- return false; ++ if (UseSharedSpaces) { ++ // Sharing is enabled so we remap the shared readonly space to ++ // shared readwrite, private just in case we need to redefine ++ // a shared class. We do the remap during the doit() phase of ++ // the safepoint to be safer. ++ if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { ++ RC_TRACE(0x00000001, ("failed to remap shared readonly space to readwrite, private")); ++ _result = JVMTI_ERROR_INTERNAL; ++ return; + } +- } // end for each component +- assert(num_element_value_pairs == calc_num_element_value_pairs, +- "sanity check"); ++ } + +- return true; +-} // end rewrite_cp_refs_in_annotation_struct() +- +- +-// Rewrite a constant pool reference at the current position in +-// annotations_typeArray if needed. Returns the original constant +-// pool reference if a rewrite was not needed or the new constant +-// pool reference if a rewrite was needed. +-u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS) { +- +- address cp_index_addr = (address) +- annotations_typeArray->byte_at_addr(byte_i_ref); +- u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr); +- u2 new_cp_index = find_new_index(old_cp_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index)); +- Bytes::put_Java_u2(cp_index_addr, new_cp_index); +- old_cp_index = new_cp_index; +- } +- byte_i_ref += 2; +- return old_cp_index; +-} ++ RC_TIMER_START(_timer_prepare_redefinition); ++ for (int i = 0; i < _new_classes->length(); i++) { ++ redefine_single_class(_new_classes->at(i), thread); ++ } + ++ // Deoptimize all compiled code that depends on this class ++ flush_dependent_code(instanceKlassHandle(Thread::current(), (klassOop)NULL), Thread::current()); + +-// Rewrite constant pool references in the element_value portion of an +-// annotations_typeArray. This "structure" is from section 4.8.15.1 of +-// the 2nd-edition of the VM spec: +-// +-// struct element_value { +-// u1 tag; +-// union { +-// u2 const_value_index; +-// { +-// u2 type_name_index; +-// u2 const_name_index; +-// } enum_const_value; +-// u2 class_info_index; +-// annotation annotation_value; +-// struct { +-// u2 num_values; +-// element_value values[num_values]; +-// } array_value; +-// } value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_element_value( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { ++ // Adjust constantpool caches and vtables for all classes ++ // that reference methods of the evolved class. ++ SystemDictionary::classes_do(adjust_cpool_cache, Thread::current()); + +- if ((byte_i_ref + 1) > annotations_typeArray->length()) { +- // not enough room for a tag let alone the rest of an element_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a tag")); +- return false; +- } ++ RC_TIMER_STOP(_timer_prepare_redefinition); ++ RC_TIMER_START(_timer_redefinition); + +- u1 tag = annotations_typeArray->byte_at(byte_i_ref); +- byte_i_ref++; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("tag='%c'", tag)); +- +- switch (tag) { +- // These BaseType tag values are from Table 4.2 in VM spec: +- case 'B': // byte +- case 'C': // char +- case 'D': // double +- case 'F': // float +- case 'I': // int +- case 'J': // long +- case 'S': // short +- case 'Z': // boolean +- +- // The remaining tag values are from Table 4.8 in the 2nd-edition of +- // the VM spec: +- case 's': +- { +- // For the above tag values (including the BaseType values), +- // value.const_value_index is right union field. ++ class ChangePointersOopClosure : public OopClosure { ++ virtual void do_oop(oop* o) { ++ do_oop_work(o); ++ } + +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a const_value_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a const_value_index")); +- return false; ++ virtual void do_oop(narrowOop* o) { ++ do_oop_work(o); + } ++ }; + +- u2 const_value_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_value_index=%d", THREAD); ++ class ChangePointersObjectClosure : public ObjectClosure { + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("const_value_index=%d", const_value_index)); +- } break; ++ private: + +- case 'e': +- { +- // for the above tag value, value.enum_const_value is right union field ++ OopClosure *_closure; ++ bool _needs_instance_update; ++ GrowableArray<oop> *_updated_oops; + +- if ((byte_i_ref + 4) > annotations_typeArray->length()) { +- // not enough room for a enum_const_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a enum_const_value")); +- return false; +- } ++ public: ++ ChangePointersObjectClosure(OopClosure *closure) : _closure(closure), _needs_instance_update(false), _updated_oops(NULL) {} + +- u2 type_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old type_name_index=%d", THREAD); ++ bool needs_instance_update() { ++ return _needs_instance_update; ++ } + +- u2 const_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_name_index=%d", THREAD); ++ GrowableArray<oop> *updated_oops() { return _updated_oops; } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_name_index=%d const_name_index=%d", type_name_index, +- const_name_index)); +- } break; ++ virtual void do_object(oop obj) { ++ if (!obj->is_instanceKlass()) { ++ obj->oop_iterate(_closure); ++ ++ if (obj->blueprint()->is_redefining()) { + +- case 'c': +- { +- // for the above tag value, value.class_info_index is right union field ++ if (obj->blueprint()->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ if (_updated_oops == NULL) { ++ _updated_oops = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(100, true); ++ } ++ _updated_oops->append(obj); ++ } + +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a class_info_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a class_info_index")); +- return false; +- } ++ if(obj->blueprint()->update_information() != NULL || obj->is_perm()) { + +- u2 class_info_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old class_info_index=%d", THREAD); ++ assert(obj->blueprint()->old_version() != NULL, "must have old version"); ++ obj->set_klass_no_check(obj->blueprint()->old_version()); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_info_index=%d", class_info_index)); +- } break; ++ if (obj->size() != obj->size_given_klass(obj->blueprint()->new_version()->klass_part()) || obj->is_perm()) { ++ // We need an instance update => set back to old klass ++ _needs_instance_update = true; + +- case '@': +- // For the above tag value, value.attr_value is the right union +- // field. This is a nested annotation. +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- break; ++ } else { ++ MarkSweep::update_fields(obj, obj); ++ assert(obj->blueprint()->is_redefining(), "update fields resets the klass"); ++ } ++ } ++ } + +- case '[': +- { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a num_values field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_values field")); +- return false; +- } +- +- // For the above tag value, value.array_value is the right union +- // field. This is an array of nested element_value. +- u2 num_values = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("num_values=%d", num_values)); +- +- int calc_num_values = 0; +- for (; calc_num_values < num_values; calc_num_values++) { +- if (!rewrite_cp_refs_in_element_value( +- annotations_typeArray, byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad nested element_value at %d", calc_num_values)); +- // propagate failure back to caller +- return false; ++ } else { ++ instanceKlass *klass = instanceKlass::cast((klassOop)obj); ++ if (klass->is_redefining()) { ++ // DCEVM: We need to restorte constants pool owner which was updated by do_oop_work ++ instanceKlass* old_klass = instanceKlass::cast(klass->old_version()); ++ old_klass->constants()->set_pool_holder(klass->old_version()); ++ ++ // Initialize the new class! Special static initialization that does not execute the ++ // static constructor but copies static field values from the old class if name ++ // and signature of a static field match. ++ klass->initialize_redefined_class(); ++ } ++ // idubrov: FIXME: we probably don't need that since oop's will be visited in a regular way... ++ // idubrov: need to check if there is a test to verify that fields referencing class being updated ++ // idubrov: will get new version of that class ++ //klass->iterate_static_fields(_closure); + } + } +- assert(num_values == calc_num_values, "sanity check"); +- } break; ++ }; + +- default: +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("bad tag=0x%x", tag)); +- return false; +- } // end decode tag field ++ ChangePointersOopClosure oopClosure; ++ ChangePointersObjectClosure objectClosure(&oopClosure); + +- return true; +-} // end rewrite_cp_refs_in_element_value() ++ { ++ SharedHeap::heap()->gc_prologue(true); ++ Universe::root_oops_do(&oopClosure); ++ Universe::heap()->object_iterate(&objectClosure); ++ SharedHeap::heap()->gc_epilogue(false); ++ } + ++ // Swap marks to have same hashcodes ++ for (int i=0; i<_new_classes->length(); i++) { ++ swap_marks(_new_classes->at(i)(), _new_classes->at(i)->old_version()); ++ swap_marks(_new_classes->at(i)->java_mirror(), _new_classes->at(i)->old_version()->java_mirror()); ++ } + +-// Rewrite constant pool references in a fields_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ _updated_oops = objectClosure.updated_oops(); + +- objArrayHandle fields_annotations(THREAD, +- scratch_class->fields_annotations()); ++ if (objectClosure.needs_instance_update()){ + +- if (fields_annotations.is_null() || fields_annotations->length() == 0) { +- // no fields_annotations so nothing to do +- return true; ++ // Do a full garbage collection to update the instance sizes accordingly ++ RC_TRACE(0x00000001, ("Before performing full GC!")); ++ Universe::set_redefining_gc_run(true); ++ JvmtiGCMarker jgcm; ++ notify_gc_begin(true); ++ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); ++ notify_gc_end(); ++ Universe::set_redefining_gc_run(false); ++ RC_TRACE(0x00000001, ("GC done!")); + } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("fields_annotations length=%d", fields_annotations->length())); +- +- for (int i = 0; i < fields_annotations->length(); i++) { +- typeArrayHandle field_annotations(THREAD, +- (typeArrayOop)fields_annotations->obj_at(i)); +- if (field_annotations.is_null() || field_annotations->length() == 0) { +- // this field does not have any annotations so skip it +- continue; +- } + +- int byte_i = 0; // byte index into field_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(field_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad field_annotations at %d", i)); +- // propagate failure back to caller +- return false; ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (_updated_oops != NULL) { ++ RC_TRACE(0x00000001, ("%d object(s) updated!", _updated_oops->length())); ++ } else { ++ RC_TRACE(0x00000001, ("No objects updated!")); + } + } + +- return true; +-} // end rewrite_cp_refs_in_fields_annotations() +- +- +-// Rewrite constant pool references in a methods_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ // Unmark klassOops as "redefining" ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur = _new_classes->at(i)(); ++ _new_classes->at(i)->set_redefining(false); ++ _new_classes->at(i)->clear_update_information(); ++ _new_classes->at(i)->update_supers_to_newest_version(); + +- objArrayHandle methods_annotations(THREAD, +- scratch_class->methods_annotations()); ++ if (((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses() != NULL) { ++ update_array_classes_to_newest_version(((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses()); + +- if (methods_annotations.is_null() || methods_annotations->length() == 0) { +- // no methods_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_annotations length=%d", methods_annotations->length())); ++ // Transfer the array classes, otherwise we might get cast exceptions when casting array types. ++ ((instanceKlass*)cur->klass_part())->set_array_klasses(((instanceKlass*)cur->klass_part()->old_version()->klass_part())->array_klasses()); + +- for (int i = 0; i < methods_annotations->length(); i++) { +- typeArrayHandle method_annotations(THREAD, +- (typeArrayOop)methods_annotations->obj_at(i)); +- if (method_annotations.is_null() || method_annotations->length() == 0) { +- // this method does not have any annotations so skip it +- continue; ++ oop new_mirror = _new_classes->at(i)->java_mirror(); ++ oop old_mirror = _new_classes->at(i)->old_version()->java_mirror(); ++ java_lang_Class::set_array_klass(new_mirror, java_lang_Class::array_klass(old_mirror)); + } ++ } + +- int byte_i = 0; // byte index into method_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(method_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_annotations at %d", i)); +- // propagate failure back to caller +- return false; +- } ++ for (int i=T_BOOLEAN; i<=T_LONG; i++) { ++ update_array_classes_to_newest_version(Universe::typeArrayKlassObj((BasicType)i)); + } + +- return true; +-} // end rewrite_cp_refs_in_methods_annotations() ++ // Disable any dependent concurrent compilations ++ SystemDictionary::notice_modification(); + ++ // Set flag indicating that some invariants are no longer true. ++ // See jvmtiExport.hpp for detailed explanation. ++ JvmtiExport::set_has_redefined_a_class(); + +-// Rewrite constant pool references in a methods_parameter_annotations +-// field. This "structure" is adapted from the +-// RuntimeVisibleParameterAnnotations_attribute described in section +-// 4.8.17 of the 2nd-edition of the VM spec: +-// +-// methods_parameter_annotations_typeArray { +-// u1 num_parameters; +-// { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } parameter_annotations[num_parameters]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ // Clean up caches in the compiler interface and compiler threads ++ CompileBroker::cleanup_after_redefinition(); + +- objArrayHandle methods_parameter_annotations(THREAD, +- scratch_class->methods_parameter_annotations()); ++#ifdef ASSERT + +- if (methods_parameter_annotations.is_null() +- || methods_parameter_annotations->length() == 0) { +- // no methods_parameter_annotations so nothing to do +- return true; +- } ++ // Universe::verify(); ++ // JNIHandles::verify(); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_parameter_annotations length=%d", +- methods_parameter_annotations->length())); ++ SystemDictionary::classes_do(check_class, thread); ++#endif + +- for (int i = 0; i < methods_parameter_annotations->length(); i++) { +- typeArrayHandle method_parameter_annotations(THREAD, +- (typeArrayOop)methods_parameter_annotations->obj_at(i)); +- if (method_parameter_annotations.is_null() +- || method_parameter_annotations->length() == 0) { +- // this method does not have any parameter annotations so skip it +- continue; +- } ++ update_active_methods(); ++ RC_TIMER_STOP(_timer_redefinition); + +- if (method_parameter_annotations->length() < 1) { +- // not enough room for a num_parameters field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_parameters field at %d", i)); +- return false; +- } ++} + +- int byte_i = 0; // byte index into method_parameter_annotations ++void VM_RedefineClasses::update_array_classes_to_newest_version(klassOop smallest_dimension) { + +- u1 num_parameters = method_parameter_annotations->byte_at(byte_i); +- byte_i++; ++ arrayKlass *curArrayKlass = arrayKlass::cast(smallest_dimension); ++ assert(curArrayKlass->lower_dimension() == NULL, "argument must be smallest dimension"); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_parameters=%d", num_parameters)); + +- int calc_num_parameters = 0; +- for (; calc_num_parameters < num_parameters; calc_num_parameters++) { +- if (!rewrite_cp_refs_in_annotations_typeArray( +- method_parameter_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_parameter_annotations at %d", calc_num_parameters)); +- // propagate failure back to caller +- return false; +- } ++ while (curArrayKlass != NULL) { ++ klassOop higher_dimension = curArrayKlass->higher_dimension(); ++ klassOop lower_dimension = curArrayKlass->lower_dimension(); ++ curArrayKlass->update_supers_to_newest_version(); ++ ++ curArrayKlass = NULL; ++ if (higher_dimension != NULL) { ++ curArrayKlass = arrayKlass::cast(higher_dimension); + } +- assert(num_parameters == calc_num_parameters, "sanity check"); + } + +- return true; +-} // end rewrite_cp_refs_in_methods_parameter_annotations() ++} ++ ++void VM_RedefineClasses::doit_epilogue() { + ++ RC_TIMER_START(_timer_vm_op_epilogue); + +-// Rewrite constant pool references in a methods_default_annotations +-// field. This "structure" is adapted from the AnnotationDefault_attribute +-// that is described in section 4.8.19 of the 2nd-edition of the VM spec: +-// +-// methods_default_annotations_typeArray { +-// element_value default_value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ unlock_threads(); + +- objArrayHandle methods_default_annotations(THREAD, +- scratch_class->methods_default_annotations()); ++ ResourceMark mark; + +- if (methods_default_annotations.is_null() +- || methods_default_annotations->length() == 0) { +- // no methods_default_annotations so nothing to do +- return true; +- } ++ VM_GC_Operation::doit_epilogue(); ++ RC_TRACE(0x00000001, ("GC Operation epilogue finished! ")); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_default_annotations length=%d", +- methods_default_annotations->length())); ++ GrowableArray<methodHandle> instanceTransformerMethods; + +- for (int i = 0; i < methods_default_annotations->length(); i++) { +- typeArrayHandle method_default_annotations(THREAD, +- (typeArrayOop)methods_default_annotations->obj_at(i)); +- if (method_default_annotations.is_null() +- || method_default_annotations->length() == 0) { +- // this method does not have any default annotations so skip it +- continue; ++ // Call static transformers ++ for (int i=0; i<_new_classes->length(); i++) { ++ ++ instanceKlassHandle klass = _new_classes->at(i); ++ ++ // Transfer init state ++ if (klass->old_version() != NULL) { ++ instanceKlass::ClassState state = instanceKlass::cast(klass->old_version())->init_state(); ++ if (state > instanceKlass::linked) { ++ klass->initialize(Thread::current()); ++ } + } ++ ++ // Find instance transformer method + +- int byte_i = 0; // byte index into method_default_annotations ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { + +- if (!rewrite_cp_refs_in_element_value( +- method_default_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad default element_value at %d", i)); +- // propagate failure back to caller +- return false; ++ RC_TRACE(0x00008000, ("Call instance transformer of %s instance", klass->name()->as_C_string())); ++ klassOop cur_klass = klass(); ++ while (cur_klass != NULL) { ++ methodOop method = ((instanceKlass*)cur_klass->klass_part())->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature()); ++ if (method != NULL) { ++ methodHandle instanceTransformerMethod(method); ++ instanceTransformerMethods.append(instanceTransformerMethod); ++ break; ++ } else { ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ assert(cur_klass != NULL, "must have instance transformer method"); ++ } else { ++ instanceTransformerMethods.append(methodHandle(Thread::current(), NULL)); + } + } + +- return true; +-} // end rewrite_cp_refs_in_methods_default_annotations() + ++ // Call instance transformers ++ if (_updated_oops != NULL) { + +-// Rewrite constant pool references in the method's stackmap table. +-// These "structures" are adapted from the StackMapTable_attribute that +-// is described in section 4.8.4 of the 6.0 version of the VM spec +-// (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// stack_map { +-// u2 number_of_entries; +-// stack_map_frame entries[number_of_entries]; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table( +- methodHandle method, TRAPS) { +- +- if (!method->has_stackmap_table()) { +- return; +- } +- +- typeArrayOop stackmap_data = method->stackmap_data(); +- address stackmap_p = (address)stackmap_data->byte_at_addr(0); +- address stackmap_end = stackmap_p + stackmap_data->length(); +- +- assert(stackmap_p + 2 <= stackmap_end, "no room for number_of_entries"); +- u2 number_of_entries = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("number_of_entries=%u", number_of_entries)); +- +- // walk through each stack_map_frame +- u2 calc_number_of_entries = 0; +- for (; calc_number_of_entries < number_of_entries; calc_number_of_entries++) { +- // The stack_map_frame structure is a u1 frame_type followed by +- // 0 or more bytes of data: +- // +- // union stack_map_frame { +- // same_frame; +- // same_locals_1_stack_item_frame; +- // same_locals_1_stack_item_frame_extended; +- // chop_frame; +- // same_frame_extended; +- // append_frame; +- // full_frame; +- // } +- +- assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type"); +- // The Linux compiler does not like frame_type to be u1 or u2. It +- // issues the following warning for the first if-statement below: +- // +- // "warning: comparison is always true due to limited range of data type" +- // +- u4 frame_type = *stackmap_p; +- stackmap_p++; +- +- // same_frame { +- // u1 frame_type = SAME; /* 0-63 */ +- // } +- if (frame_type >= 0 && frame_type <= 63) { +- // nothing more to do for same_frame +- } +- +- // same_locals_1_stack_item_frame { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */ +- // verification_type_info stack[1]; +- // } +- else if (frame_type >= 64 && frame_type <= 127) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // reserved for future use +- else if (frame_type >= 128 && frame_type <= 246) { +- // nothing more to do for reserved frame_types +- } +- +- // same_locals_1_stack_item_frame_extended { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */ +- // u2 offset_delta; +- // verification_type_info stack[1]; +- // } +- else if (frame_type == 247) { +- stackmap_p += 2; +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // chop_frame { +- // u1 frame_type = CHOP; /* 248-250 */ +- // u2 offset_delta; +- // } +- else if (frame_type >= 248 && frame_type <= 250) { +- stackmap_p += 2; +- } +- +- // same_frame_extended { +- // u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/ +- // u2 offset_delta; +- // } +- else if (frame_type == 251) { +- stackmap_p += 2; +- } +- +- // append_frame { +- // u1 frame_type = APPEND; /* 252-254 */ +- // u2 offset_delta; +- // verification_type_info locals[frame_type - 251]; +- // } +- else if (frame_type >= 252 && frame_type <= 254) { +- assert(stackmap_p + 2 <= stackmap_end, +- "no room for offset_delta"); +- stackmap_p += 2; +- u1 len = frame_type - 251; +- for (u1 i = 0; i < len; i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- +- // full_frame { +- // u1 frame_type = FULL_FRAME; /* 255 */ +- // u2 offset_delta; +- // u2 number_of_locals; +- // verification_type_info locals[number_of_locals]; +- // u2 number_of_stack_items; +- // verification_type_info stack[number_of_stack_items]; +- // } +- else if (frame_type == 255) { +- assert(stackmap_p + 2 + 2 <= stackmap_end, +- "no room for smallest full_frame"); +- stackmap_p += 2; +- +- u2 number_of_locals = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 locals_i = 0; locals_i < number_of_locals; locals_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // Use the largest size for the number_of_stack_items, but only get +- // the right number of bytes. +- u2 number_of_stack_items = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 stack_i = 0; stack_i < number_of_stack_items; stack_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- } // end while there is a stack_map_frame +- assert(number_of_entries == calc_number_of_entries, "sanity check"); +-} // end rewrite_cp_refs_in_stack_map_table() +- +- +-// Rewrite constant pool references in the verification type info +-// portion of the method's stackmap table. These "structures" are +-// adapted from the StackMapTable_attribute that is described in +-// section 4.8.4 of the 6.0 version of the VM spec (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// The verification_type_info structure is a u1 tag followed by 0 or +-// more bytes of data: +-// +-// union verification_type_info { +-// Top_variable_info; +-// Integer_variable_info; +-// Float_variable_info; +-// Long_variable_info; +-// Double_variable_info; +-// Null_variable_info; +-// UninitializedThis_variable_info; +-// Object_variable_info; +-// Uninitialized_variable_info; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_verification_type_info( +- address& stackmap_p_ref, address stackmap_end, u2 frame_i, +- u1 frame_type, TRAPS) { +- +- assert(stackmap_p_ref + 1 <= stackmap_end, "no room for tag"); +- u1 tag = *stackmap_p_ref; +- stackmap_p_ref++; +- +- switch (tag) { +- // Top_variable_info { +- // u1 tag = ITEM_Top; /* 0 */ +- // } +- // verificationType.hpp has zero as ITEM_Bogus instead of ITEM_Top +- case 0: // fall through +- +- // Integer_variable_info { +- // u1 tag = ITEM_Integer; /* 1 */ +- // } +- case ITEM_Integer: // fall through +- +- // Float_variable_info { +- // u1 tag = ITEM_Float; /* 2 */ +- // } +- case ITEM_Float: // fall through +- +- // Double_variable_info { +- // u1 tag = ITEM_Double; /* 3 */ +- // } +- case ITEM_Double: // fall through +- +- // Long_variable_info { +- // u1 tag = ITEM_Long; /* 4 */ +- // } +- case ITEM_Long: // fall through +- +- // Null_variable_info { +- // u1 tag = ITEM_Null; /* 5 */ +- // } +- case ITEM_Null: // fall through +- +- // UninitializedThis_variable_info { +- // u1 tag = ITEM_UninitializedThis; /* 6 */ +- // } +- case ITEM_UninitializedThis: +- // nothing more to do for the above tag types +- break; ++ for (int i=0; i<_updated_oops->length(); i++) { ++ assert(_updated_oops->at(i) != NULL, "must not be null!"); ++ Handle cur(_updated_oops->at(i)); ++ instanceKlassHandle klass(cur->klass()); + +- // Object_variable_info { +- // u1 tag = ITEM_Object; /* 7 */ +- // u2 cpool_index; +- // } +- case ITEM_Object: +- { +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for cpool_index"); +- u2 cpool_index = Bytes::get_Java_u2(stackmap_p_ref); +- u2 new_cp_index = find_new_index(cpool_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("mapped old cpool_index=%d", cpool_index)); +- Bytes::put_Java_u2(stackmap_p_ref, new_cp_index); +- cpool_index = new_cp_index; +- } +- stackmap_p_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, cpool_index=%d", frame_i, +- frame_type, cpool_index)); +- } break; +- +- // Uninitialized_variable_info { +- // u1 tag = ITEM_Uninitialized; /* 8 */ +- // u2 offset; +- // } +- case ITEM_Uninitialized: +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for offset"); +- stackmap_p_ref += 2; +- break; ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { + +- default: +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, bad tag=0x%x", frame_i, frame_type, tag)); +- ShouldNotReachHere(); +- break; +- } // end switch (tag) +-} // end rewrite_cp_refs_in_verification_type_info() +- +- +-// Change the constant pool associated with klass scratch_class to +-// scratch_cp. If shrink is true, then scratch_cp_length elements +-// are copied from scratch_cp to a smaller constant pool and the +-// smaller constant pool is associated with scratch_class. +-void VM_RedefineClasses::set_new_constant_pool( +- instanceKlassHandle scratch_class, constantPoolHandle scratch_cp, +- int scratch_cp_length, bool shrink, TRAPS) { +- assert(!shrink || scratch_cp->length() >= scratch_cp_length, "sanity check"); +- +- if (shrink) { +- // scratch_cp is a merged constant pool and has enough space for a +- // worst case merge situation. We want to associate the minimum +- // sized constant pool with the klass to save space. +- constantPoolHandle smaller_cp(THREAD, +- oopFactory::new_constantPool(scratch_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- // preserve orig_length() value in the smaller copy +- int orig_length = scratch_cp->orig_length(); +- assert(orig_length != 0, "sanity check"); +- smaller_cp->set_orig_length(orig_length); +- scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); +- scratch_cp = smaller_cp; +- smaller_cp()->set_is_conc_safe(true); +- } +- +- // attach new constant pool to klass +- scratch_cp->set_pool_holder(scratch_class()); +- +- // attach klass to new constant pool +- scratch_class->set_constants(scratch_cp()); +- +- int i; // for portability +- +- // update each field in klass to use new constant pool indices as needed +- for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) { +- jshort cur_index = fs.name_index(); +- jshort new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-name_index change: %d to %d", cur_index, new_index)); +- fs.set_name_index(new_index); +- } +- cur_index = fs.signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-signature_index change: %d to %d", cur_index, new_index)); +- fs.set_signature_index(new_index); +- } +- cur_index = fs.initval_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-initval_index change: %d to %d", cur_index, new_index)); +- fs.set_initval_index(new_index); +- } +- cur_index = fs.generic_signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-generic_signature change: %d to %d", cur_index, new_index)); +- fs.set_generic_signature_index(new_index); +- } +- } // end for each field +- +- // Update constant pool indices in the inner classes info to use +- // new constant indices as needed. The inner classes info is a +- // quadruple: +- // (inner_class_info, outer_class_info, inner_name, inner_access_flags) +- InnerClassesIterator iter(scratch_class); +- for (; !iter.done(); iter.next()) { +- int cur_index = iter.inner_class_info_index(); +- if (cur_index == 0) { +- continue; // JVM spec. allows null inner class refs so skip it +- } +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_class_info change: %d to %d", cur_index, new_index)); +- iter.set_inner_class_info_index(new_index); +- } +- cur_index = iter.outer_class_info_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("outer_class_info change: %d to %d", cur_index, new_index)); +- iter.set_outer_class_info_index(new_index); +- } +- cur_index = iter.inner_name_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_name change: %d to %d", cur_index, new_index)); +- iter.set_inner_name_index(new_index); +- } +- } // end for each inner class +- +- // Attach each method in klass to the new constant pool and update +- // to use new constant pool indices as needed: +- objArrayHandle methods(THREAD, scratch_class->methods()); +- for (i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- method->set_constants(scratch_cp()); +- +- int new_index = find_new_index(method->name_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-name_index change: %d to %d", method->name_index(), +- new_index)); +- method->set_name_index(new_index); +- } +- new_index = find_new_index(method->signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-signature_index change: %d to %d", +- method->signature_index(), new_index)); +- method->set_signature_index(new_index); +- } +- new_index = find_new_index(method->generic_signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-generic_signature_index change: %d to %d", +- method->generic_signature_index(), new_index)); +- method->set_generic_signature_index(new_index); +- } +- +- // Update constant pool indices in the method's checked exception +- // table to use new constant indices as needed. +- int cext_length = method->checked_exceptions_length(); +- if (cext_length > 0) { +- CheckedExceptionElement * cext_table = +- method->checked_exceptions_start(); +- for (int j = 0; j < cext_length; j++) { +- int cur_index = cext_table[j].class_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("cext-class_cp_index change: %d to %d", cur_index, new_index)); +- cext_table[j].class_cp_index = (u2)new_index; +- } +- } // end for each checked exception table entry +- } // end if there are checked exception table entries +- +- // Update each catch type index in the method's exception table +- // to use new constant pool indices as needed. The exception table +- // holds quadruple entries of the form: +- // (beg_bci, end_bci, handler_bci, klass_index) +- +- ExceptionTable ex_table(method()); +- int ext_length = ex_table.length(); +- +- for (int j = 0; j < ext_length; j ++) { +- int cur_index = ex_table.catch_type_index(j); +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("ext-klass_index change: %d to %d", cur_index, new_index)); +- ex_table.set_catch_type_index(j, new_index); +- } +- } // end for each exception table entry +- +- // Update constant pool indices in the method's local variable +- // table to use new constant indices as needed. The local variable +- // table hold sextuple entries of the form: +- // (start_pc, length, name_index, descriptor_index, signature_index, slot) +- int lvt_length = method->localvariable_table_length(); +- if (lvt_length > 0) { +- LocalVariableTableElement * lv_table = +- method->localvariable_table_start(); +- for (int j = 0; j < lvt_length; j++) { +- int cur_index = lv_table[j].name_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-name_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].name_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].descriptor_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-descriptor_cp_index change: %d to %d", cur_index, +- new_index)); +- lv_table[j].descriptor_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].signature_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-signature_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].signature_cp_index = (u2)new_index; +- } +- } // end for each local variable table entry +- } // end if there are local variable table entries ++ methodHandle method = instanceTransformerMethods.at(klass->redefinition_index()); + +- rewrite_cp_refs_in_stack_map_table(method, THREAD); +- } // end for each method +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +-} // end set_new_constant_pool() ++ RC_TRACE(0x00008000, ("executing transformer method")); ++ ++ Thread *__the_thread__ = Thread::current(); ++ JavaValue result(T_VOID); ++ JavaCallArguments args(cur); ++ JavaCalls::call(&result, ++ method, ++ &args, ++ THREAD); + ++ // TODO: What to do with an exception here? ++ if (HAS_PENDING_EXCEPTION) { ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000002, ("exception when executing transformer: '%s'", ++ ex_name->as_C_string())); ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ } + +-// Unevolving classes may point to methods of the_class directly +-// from their constant pool caches, itables, and/or vtables. We +-// use the SystemDictionary::classes_do() facility and this helper +-// to fix up these pointers. +-// +-// Note: We currently don't support updating the vtable in +-// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. +-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; ++ delete _updated_oops; ++ _updated_oops = NULL; ++ } + +- // HotSpot specific optimization! HotSpot does not currently +- // support delegation from the bootstrap class loader to a +- // user-defined class loader. This means that if the bootstrap +- // class loader is the initiating class loader, then it will also +- // be the defining class loader. This also means that classes +- // loaded by the bootstrap class loader cannot refer to classes +- // loaded by a user-defined class loader. Note: a user-defined +- // class loader can delegate to the bootstrap class loader. +- // +- // If the current class being redefined has a user-defined class +- // loader as its defining class loader, then we can skip all +- // classes loaded by the bootstrap class loader. +- bool is_user_defined = +- instanceKlass::cast(_the_class_oop)->class_loader() != NULL; +- if (is_user_defined && ik->class_loader() == NULL) { +- return; +- } ++ // Free the array of scratch classes ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TRACE(0x00000001, ("Redefinition finished!")); + +- // This is a very busy routine. We don't want too much tracing +- // printed out. +- bool trace_name_printed = false; +- +- // Very noisy: only enable this call if you are trying to determine +- // that a specific class gets found by this routine. +- // RC_TRACE macro has an embedded ResourceMark +- // RC_TRACE_WITH_THREAD(0x00100000, THREAD, +- // ("adjust check: name=%s", ik->external_name())); +- // trace_name_printed = true; +- +- // Fix the vtable embedded in the_class and subclasses of the_class, +- // if one exists. We discard scratch_class and we don't keep an +- // instanceKlass around to hold obsolete methods so we don't have +- // any other instanceKlass embedded vtables to update. The vtable +- // holds the methodOops for virtual (but not final) methods. +- if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { +- // ik->vtable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->vtable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // If the current class has an itable and we are either redefining an +- // interface or if the current class is a subclass of the_class, then +- // we potentially have to fix the itable. If we are redefining an +- // interface, then we have to call adjust_method_entries() for +- // every instanceKlass that has an itable since there isn't a +- // subclass relationship between an interface and an instanceKlass. +- if (ik->itable_length() > 0 && (Klass::cast(_the_class_oop)->is_interface() +- || ik->is_subclass_of(_the_class_oop))) { +- // ik->itable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->itable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // The constant pools in other classes (other_cp) can refer to +- // methods in the_class. We have to update method information in +- // other_cp's cache. If other_cp has a previous version, then we +- // have to repeat the process for each previous version. The +- // constant pool cache holds the methodOops for non-virtual +- // methods and for virtual, final methods. +- // +- // Special case: if the current class is the_class, then new_cp +- // has already been attached to the_class and old_cp has already +- // been added as a previous version. The new_cp doesn't have any +- // cached references to old methods so it doesn't need to be +- // updated. We can simply start with the previous version(s) in +- // that case. +- constantPoolHandle other_cp; +- constantPoolCacheOop cp_cache; +- +- if (k_oop != _the_class_oop) { +- // this klass' constant pool cache may need adjustment +- other_cp = constantPoolHandle(ik->constants()); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- { +- ResourceMark rm(THREAD); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // the previous versions' constant pool caches may need adjustment +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- other_cp = pv_info->prev_constant_pool_handle(); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up ++ RC_TIMER_STOP(_timer_vm_op_epilogue); ++} ++ ++bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { ++ // classes for primitives cannot be redefined ++ if (java_lang_Class::is_primitive(klass_mirror)) { ++ return false; ++ } ++ klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); ++ // classes for arrays cannot be redefined ++ if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ return false; + } ++ return true; + } + +-void VM_RedefineClasses::update_jmethod_ids() { +- for (int j = 0; j < _matching_methods_length; ++j) { +- methodOop old_method = _matching_old_methods[j]; +- jmethodID jmid = old_method->find_jmethod_id_or_null(); +- if (jmid != NULL) { +- // There is a jmethodID, change it to point to the new method +- methodHandle new_method_h(_matching_new_methods[j]); +- JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); +- assert(JNIHandles::resolve_jmethod_id(jmid) == _matching_new_methods[j], +- "should be replaced"); +- } ++#ifdef ASSERT ++ ++void VM_RedefineClasses::verify_classes(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ ++ instanceKlassHandle k_handle(THREAD, k_oop); ++ Verifier::verify(k_handle, Verifier::ThrowException, true, true, THREAD); ++ k_oop = k_oop->klass_part()->old_version(); + } + } + +-void VM_RedefineClasses::check_methods_and_mark_as_obsolete( +- BitMap *emcp_methods, int * emcp_method_count_p) { +- *emcp_method_count_p = 0; +- int obsolete_count = 0; +- int old_index = 0; +- for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { +- methodOop old_method = _matching_old_methods[j]; +- methodOop new_method = _matching_new_methods[j]; +- methodOop old_array_method; +- +- // Maintain an old_index into the _old_methods array by skipping +- // deleted methods +- while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) +- != old_method) { +- ++old_index; +- } +- +- if (MethodComparator::methods_EMCP(old_method, new_method)) { +- // The EMCP definition from JSR-163 requires the bytecodes to be +- // the same with the exception of constant pool indices which may +- // differ. However, the constants referred to by those indices +- // must be the same. +- // +- // We use methods_EMCP() for comparison since constant pool +- // merging can remove duplicate constant pool entries that were +- // present in the old method and removed from the rewritten new +- // method. A faster binary comparison function would consider the +- // old and new methods to be different when they are actually +- // EMCP. +- // +- // The old and new methods are EMCP and you would think that we +- // could get rid of one of them here and now and save some space. +- // However, the concept of EMCP only considers the bytecodes and +- // the constant pool entries in the comparison. Other things, +- // e.g., the line number table (LNT) or the local variable table +- // (LVT) don't count in the comparison. So the new (and EMCP) +- // method can have a new LNT that we need so we can't just +- // overwrite the new method with the old method. +- // +- // When this routine is called, we have already attached the new +- // methods to the_class so the old methods are effectively +- // overwritten. However, if an old method is still executing, +- // then the old method cannot be collected until sometime after +- // the old method call has returned. So the overwriting of old +- // methods by new methods will save us space except for those +- // (hopefully few) old methods that are still executing. +- // +- // A method refers to a constMethodOop and this presents another +- // possible avenue to space savings. The constMethodOop in the +- // new method contains possibly new attributes (LNT, LVT, etc). +- // At first glance, it seems possible to save space by replacing +- // the constMethodOop in the old method with the constMethodOop +- // from the new method. The old and new methods would share the +- // same constMethodOop and we would save the space occupied by +- // the old constMethodOop. However, the constMethodOop contains +- // a back reference to the containing method. Sharing the +- // constMethodOop between two methods could lead to confusion in +- // the code that uses the back reference. This would lead to +- // brittle code that could be broken in non-obvious ways now or +- // in the future. +- // +- // Another possibility is to copy the constMethodOop from the new +- // method to the old method and then overwrite the new method with +- // the old method. Since the constMethodOop contains the bytecodes +- // for the method embedded in the oop, this option would change +- // the bytecodes out from under any threads executing the old +- // method and make the thread's bcp invalid. Since EMCP requires +- // that the bytecodes be the same modulo constant pool indices, it +- // is straight forward to compute the correct new bcp in the new +- // constMethodOop from the old bcp in the old constMethodOop. The +- // time consuming part would be searching all the frames in all +- // of the threads to find all of the calls to the old method. +- // +- // It looks like we will have to live with the limited savings +- // that we get from effectively overwriting the old methods +- // when the new methods are attached to the_class. +- +- // track which methods are EMCP for add_previous_version() call +- emcp_methods->set_bit(old_index); +- (*emcp_method_count_p)++; +- +- // An EMCP method is _not_ obsolete. An obsolete method has a +- // different jmethodID than the current method. An EMCP method +- // has the same jmethodID as the current method. Having the +- // same jmethodID for all EMCP versions of a method allows for +- // a consistent view of the EMCP methods regardless of which +- // EMCP method you happen to have in hand. For example, a +- // breakpoint set in one EMCP method will work for all EMCP +- // versions of the method including the current one. +- } else { +- // mark obsolete methods as such +- old_method->set_is_obsolete(); +- obsolete_count++; ++#endif + +- // obsolete methods need a unique idnum +- u2 num = instanceKlass::cast(_the_class_oop)->next_method_idnum(); +- if (num != constMethodOopDesc::UNSET_IDNUM) { +-// u2 old_num = old_method->method_idnum(); +- old_method->set_method_idnum(num); +-// TO DO: attach obsolete annotations to obsolete method's new idnum ++// Rewrite faster byte-codes back to their slower equivalent. Undoes rewriting happening in templateTable_xxx.cpp ++// The reason is that once we zero cpool caches, we need to re-resolve all entries again. Faster bytecodes do not ++// do that, they assume that cache entry is resolved already. ++static void unpatch_bytecode(methodOop method) { ++ RawBytecodeStream bcs(method); ++ Bytecodes::Code code; ++ Bytecodes::Code java_code; ++ while (!bcs.is_last_bytecode()) { ++ code = bcs.raw_next(); ++ address bcp = bcs.bcp(); ++ ++ if (code == Bytecodes::_breakpoint) { ++ int bci = method->bci_from(bcp); ++ code = method->orig_bytecode_at(bci); ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ // Let breakpoint table handling unpatch bytecode ++ method->set_orig_bytecode_at(bci, java_code); + } +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); ++ } else { ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ *bcp = java_code; ++ } ++ } ++ ++ // Additionally, we need to unpatch bytecode at bcp+1 for fast_xaccess (which would be fast field access) ++ if (code == Bytecodes::_fast_iaccess_0 || code == Bytecodes::_fast_aaccess_0 || code == Bytecodes::_fast_faccess_0) { ++ Bytecodes::Code code2 = Bytecodes::code_or_bp_at(bcp + 1); ++ assert(code2 == Bytecodes::_fast_igetfield || ++ code2 == Bytecodes::_fast_agetfield || ++ code2 == Bytecodes::_fast_fgetfield, ""); ++ *(bcp + 1) = Bytecodes::java_code(code2); + } +- old_method->set_is_old(); +- } +- for (int i = 0; i < _deleted_methods_length; ++i) { +- methodOop old_method = _deleted_methods[i]; +- +- assert(old_method->vtable_index() < 0, +- "cannot delete methods with vtable entries");; +- +- // Mark all deleted methods as old and obsolete +- old_method->set_is_old(); +- old_method->set_is_obsolete(); +- ++obsolete_count; +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark deleted %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); +- } +- assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), +- "sanity check"); +- RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p, +- obsolete_count)); ++ } + } + +-// This internal class transfers the native function registration from old methods +-// to new methods. It is designed to handle both the simple case of unchanged +-// native methods and the complex cases of native method prefixes being added and/or +-// removed. +-// It expects only to be used during the VM_RedefineClasses op (a safepoint). +-// +-// This class is used after the new methods have been installed in "the_class". +-// +-// So, for example, the following must be handled. Where 'm' is a method and +-// a number followed by an underscore is a prefix. +-// +-// Old Name New Name +-// Simple transfer to new method m -> m +-// Add prefix m -> 1_m +-// Remove prefix 1_m -> m +-// Simultaneous add of prefixes m -> 3_2_1_m +-// Simultaneous removal of prefixes 3_2_1_m -> m +-// Simultaneous add and remove 1_m -> 2_m +-// Same, caused by prefix removal only 3_2_1_m -> 3_2_m ++// Unevolving classes may point to old methods directly ++// from their constant pool caches, itables, and/or vtables. We ++// use the SystemDictionary::classes_do() facility and this helper ++// to fix up these pointers. Additional field offsets and vtable indices ++// in the constant pool cache entries are fixed. + // +-class TransferNativeFunctionRegistration { +- private: +- instanceKlassHandle the_class; +- int prefix_count; +- char** prefixes; ++// Note: We currently don't support updating the vtable in ++// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. ++void VM_RedefineClasses::adjust_cpool_cache(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ //tty->print_cr("name=%s", k_oop->klass_part()->name()->as_C_string()); ++/* ++ methodOop *matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ methodOop *matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); + +- // Recursively search the binary tree of possibly prefixed method names. +- // Iteration could be used if all agents were well behaved. Full tree walk is +- // more resilent to agents not cleaning up intermediate methods. +- // Branch at each depth in the binary tree is: +- // (1) without the prefix. +- // (2) with the prefix. +- // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) +- methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, +- Symbol* signature) { +- TempNewSymbol name_symbol = SymbolTable::probe(name_str, (int)name_len); +- if (name_symbol != NULL) { +- methodOop method = Klass::cast(the_class())->lookup_method(name_symbol, signature); +- if (method != NULL) { +- // Even if prefixed, intermediate methods must exist. +- if (method->is_native()) { +- // Wahoo, we found a (possibly prefixed) version of the method, return it. +- return method; +- } +- if (depth < prefix_count) { +- // Try applying further prefixes (other than this one). +- method = search_prefix_name_space(depth+1, name_str, name_len, signature); +- if (method != NULL) { +- return method; // found +- } ++ for (int i=0; i<_matching_methods_length; i++) { ++ matching_old_methods[i] = (methodOop)_old_methods->obj_at(_matching_old_methods[i]); ++ matching_new_methods[i] = (methodOop)_new_methods->obj_at(_matching_new_methods[i]); ++ }*/ + +- // Try adding this prefix to the method name and see if it matches +- // another method name. +- char* prefix = prefixes[depth]; +- size_t prefix_len = strlen(prefix); +- size_t trial_len = name_len + prefix_len; +- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); +- strcpy(trial_name_str, prefix); +- strcat(trial_name_str, name_str); +- method = search_prefix_name_space(depth+1, trial_name_str, trial_len, +- signature); +- if (method != NULL) { +- // If found along this branch, it was prefixed, mark as such +- method->set_is_prefixed_native(); +- return method; // found +- } +- } +- } +- } +- return NULL; // This whole branch bore nothing +- } ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; + +- // Return the method name with old prefixes stripped away. +- char* method_name_without_prefixes(methodOop method) { +- Symbol* name = method->name(); +- char* name_str = name->as_utf8(); ++ constantPoolHandle other_cp; ++ constantPoolCacheOop cp_cache; + +- // Old prefixing may be defunct, strip prefixes, if any. +- for (int i = prefix_count-1; i >= 0; i--) { +- char* prefix = prefixes[i]; +- size_t prefix_len = strlen(prefix); +- if (strncmp(prefix, name_str, prefix_len) == 0) { +- name_str += prefix_len; +- } +- } +- return name_str; +- } ++ other_cp = constantPoolHandle(ik->constants()); + +- // Strip any prefixes off the old native method, then try to find a +- // (possibly prefixed) new native that matches it. +- methodOop strip_and_search_for_new_native(methodOop method) { +- ResourceMark rm; +- char* name_str = method_name_without_prefixes(method); +- return search_prefix_name_space(0, name_str, strlen(name_str), +- method->signature()); +- } ++ for (int i=0; i<other_cp->length(); i++) { ++ if (other_cp->tag_at(i).is_klass()) { ++ klassOop klass = other_cp->klass_at(i, THREAD); ++ if (klass->klass_part()->new_version() != NULL) { + +- public: ++ // (tw) TODO: check why/if this is necessary ++ other_cp->klass_at_put(i, klass->klass_part()->new_version()); ++ } ++ klass = other_cp->klass_at(i, THREAD); ++ assert(klass->klass_part()->new_version() == NULL, "Must be new klass!"); ++ } ++ } + +- // Construct a native method transfer processor for this class. +- TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { +- assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ cp_cache = other_cp->cache(); + +- the_class = _the_class; +- prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); ++ if (cp_cache != NULL) { ++ cp_cache->adjust_entries(NULL, ++ NULL, ++ 0); ++ } ++ ++ // If bytecode rewriting is enabled, we also need to unpatch bytecode to force resolution of zeroied entries ++ if (RewriteBytecodes) { ++ ik->methods_do(unpatch_bytecode); ++ } ++ } ++ k_oop = k_oop->klass_part()->old_version(); + } ++} + +- // Attempt to transfer any of the old or deleted methods that are native +- void transfer_registrations(methodOop* old_methods, int methods_length) { +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; ++void VM_RedefineClasses::update_jmethod_ids() { ++ for (int j = 0; j < _matching_methods_length; ++j) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ RC_TRACE(0x00008000, ("matching method %s", old_method->name_and_sig_as_C_string())); ++ ++ jmethodID jmid = old_method->find_jmethod_id_or_null(); ++ if (old_method->new_version() != NULL && jmid == NULL) { ++ // (tw) Have to create jmethodID in this case ++ jmid = old_method->jmethod_id(); ++ } ++ ++ if (jmid != NULL) { ++ // There is a jmethodID, change it to point to the new method ++ methodHandle new_method_h((methodOop)_new_methods->obj_at(_matching_new_methods[j])); ++ if (old_method->new_version() == NULL) { ++ methodHandle old_method_h((methodOop)_old_methods->obj_at(_matching_old_methods[j])); ++ jmethodID new_jmethod_id = JNIHandles::make_jmethod_id(old_method_h); ++ bool result = instanceKlass::cast(old_method_h->method_holder())->update_jmethod_id(old_method_h(), new_jmethod_id); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for old method assigned to %d / result=%d", new_jmethod_id, result); ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } else { ++ jmethodID mid = new_method_h->jmethod_id(); ++ bool result = instanceKlass::cast(new_method_h->method_holder())->update_jmethod_id(new_method_h(), jmid); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for new method assigned to %d / result=%d", jmid, result); + +- if (old_method->is_native() && old_method->has_native_function()) { +- methodOop new_method = strip_and_search_for_new_native(old_method); +- if (new_method != NULL) { +- // Actually set the native function in the new method. +- // Redefine does not send events (except CFLH), certainly not this +- // behind the scenes re-registration. +- new_method->set_native_function(old_method->native_function(), +- !methodOopDesc::native_bind_event_is_interesting); +- } + } ++ JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); ++ //RC_TRACE(0x00008000, ("changing method associated with jmethod id %d to %s", (int)jmid, new_method_h->name()->as_C_string()); ++ assert(JNIHandles::resolve_jmethod_id(jmid) == (methodOop)_new_methods->obj_at(_matching_new_methods[j]), "should be replaced"); ++ jmethodID mid = ((methodOop)_new_methods->obj_at(_matching_new_methods[j]))->jmethod_id(); ++ assert(JNIHandles::resolve_non_null((jobject)mid) == new_method_h(), "must match!"); ++ ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); + } + } +-}; +- +-// Don't lose the association between a native method and its JNI function. +-void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle the_class) { +- TransferNativeFunctionRegistration transfer(the_class); +- transfer.transfer_registrations(_deleted_methods, _deleted_methods_length); +- transfer.transfer_registrations(_matching_old_methods, _matching_methods_length); + } + ++ + // Deoptimize all compiled code that depends on this class. + // + // If the can_redefine_classes capability is obtained in the onload +@@ -2964,7 +2677,10 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { + + // All dependencies have been recorded from startup or this is a second or + // subsequent use of RedefineClasses +- if (JvmtiExport::all_dependencies_are_recorded()) { ++ ++ // For now deopt all ++ // (tw) TODO: Improve the dependency system such that we can safely deopt only a subset of the methods ++ if (0 && JvmtiExport::all_dependencies_are_recorded()) { + Universe::flush_evol_dependents_on(k_h); + } else { + CodeCache::mark_all_nmethods_for_deoptimization(); +@@ -2987,10 +2703,10 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + methodOop old_method; + methodOop new_method; + +- _matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _added_methods = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length()); +- _deleted_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ _matching_old_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _matching_new_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _added_methods = NEW_RESOURCE_ARRAY(int, _new_methods->length()); ++ _deleted_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); + + _matching_methods_length = 0; + _deleted_methods_length = 0; +@@ -3005,36 +2721,36 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + // New method at the end + new_method = (methodOop) _new_methods->obj_at(nj); +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj; + ++nj; + } else if (nj >= _new_methods->length()) { + // Old method, at the end, is deleted + old_method = (methodOop) _old_methods->obj_at(oj); +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj; + ++oj; + } else { + old_method = (methodOop) _old_methods->obj_at(oj); + new_method = (methodOop) _new_methods->obj_at(nj); + if (old_method->name() == new_method->name()) { + if (old_method->signature() == new_method->signature()) { +- _matching_old_methods[_matching_methods_length ] = old_method; +- _matching_new_methods[_matching_methods_length++] = new_method; ++ _matching_old_methods[_matching_methods_length ] = oj;//old_method; ++ _matching_new_methods[_matching_methods_length++] = nj;//new_method; + ++nj; + ++oj; + } else { + // added overloaded have already been moved to the end, + // so this is a deleted overloaded method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } else { // names don't match + if (old_method->name()->fast_compare(new_method->name()) > 0) { + // new method +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj;//new_method; + ++nj; + } else { + // deleted method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } +@@ -3042,6 +2758,8 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); + assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); ++ RC_TRACE(0x00008000, ("Matching methods = %d / deleted methods = %d / added methods = %d", ++ _matching_methods_length, _deleted_methods_length, _added_methods_length)); + } + + +@@ -3049,287 +2767,184 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + // Install the redefinition of a class: + // - house keeping (flushing breakpoints and caches, deoptimizing + // dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding a weak reference to track the obsolete but interesting +-// parts of the_class + // - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-void VM_RedefineClasses::redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS) { ++void VM_RedefineClasses::redefine_single_class(instanceKlassHandle the_new_class, TRAPS) { ++ ++ ResourceMark rm(THREAD); + +- RC_TIMER_START(_timer_rsc_phase1); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); + +- oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass); +- klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); ++ instanceKlassHandle the_old_class = instanceKlassHandle(THREAD, the_new_class->old_version()); + ++#ifndef JVMTI_KERNEL + // Remove all breakpoints in methods of this class + JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); +- jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); ++ jvmti_breakpoints.clearall_in_class_at_safepoint(the_old_class()); ++#endif // !JVMTI_KERNEL + +- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) { ++ if (the_old_class() == Universe::reflect_invoke_cache()->klass()) { + // We are redefining java.lang.reflect.Method. Method.invoke() is + // cached and users of the cache care about each active version of + // the method so we have to track this previous version. + // Do this before methods get switched + Universe::reflect_invoke_cache()->add_previous_version( +- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); ++ the_old_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); + } + +- // Deoptimize all compiled code that depends on this class +- flush_dependent_code(the_class, THREAD); +- +- _old_methods = the_class->methods(); +- _new_methods = scratch_class->methods(); +- _the_class_oop = the_class_oop; ++ _old_methods = the_old_class->methods(); ++ _new_methods = the_new_class->methods(); ++ _the_class_oop = the_old_class(); + compute_added_deleted_matching_methods(); +- update_jmethod_ids(); +- +- // Attach new constant pool to the original klass. The original +- // klass still refers to the old constant pool (for now). +- scratch_class->constants()->set_pool_holder(the_class()); +- +-#if 0 +- // In theory, with constant pool merging in place we should be able +- // to save space by using the new, merged constant pool in place of +- // the old constant pool(s). By "pool(s)" I mean the constant pool in +- // the klass version we are replacing now and any constant pool(s) in +- // previous versions of klass. Nice theory, doesn't work in practice. +- // When this code is enabled, even simple programs throw NullPointer +- // exceptions. I'm guessing that this is caused by some constant pool +- // cache difference between the new, merged constant pool and the +- // constant pool that was just being used by the klass. I'm keeping +- // this code around to archive the idea, but the code has to remain +- // disabled for now. +- +- // Attach each old method to the new constant pool. This can be +- // done here since we are past the bytecode verification and +- // constant pool optimization phases. +- for (int i = _old_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)_old_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- +- { +- // walk all previous versions of the klass +- instanceKlass *ik = (instanceKlass *)the_class()->klass_part(); +- PreviousVersionWalker pvw(ik); +- instanceKlassHandle ikh; +- do { +- ikh = pvw.next_previous_version(); +- if (!ikh.is_null()) { +- ik = ikh(); +- +- // attach previous version of klass to the new constant pool +- ik->set_constants(scratch_class->constants()); +- +- // Attach each method in the previous version of klass to the +- // new constant pool +- objArrayOop prev_methods = ik->methods(); +- for (int i = prev_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)prev_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- } +- } while (!ikh.is_null()); +- } +-#endif +- +- // Replace methods and constantpool +- the_class->set_methods(_new_methods); +- scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods, +- // and to be able to undo operation easily. +- +- constantPoolOop old_constants = the_class->constants(); +- the_class->set_constants(scratch_class->constants()); +- scratch_class->set_constants(old_constants); // See the previous comment. +-#if 0 +- // We are swapping the guts of "the new class" with the guts of "the +- // class". Since the old constant pool has just been attached to "the +- // new class", it seems logical to set the pool holder in the old +- // constant pool also. However, doing this will change the observable +- // class hierarchy for any old methods that are still executing. A +- // method can query the identity of its "holder" and this query uses +- // the method's constant pool link to find the holder. The change in +- // holding class from "the class" to "the new class" can confuse +- // things. +- // +- // Setting the old constant pool's holder will also cause +- // verification done during vtable initialization below to fail. +- // During vtable initialization, the vtable's class is verified to be +- // a subtype of the method's holder. The vtable's class is "the +- // class" and the method's holder is gotten from the constant pool +- // link in the method itself. For "the class"'s directly implemented +- // methods, the method holder is "the class" itself (as gotten from +- // the new constant pool). The check works fine in this case. The +- // check also works fine for methods inherited from super classes. +- // +- // Miranda methods are a little more complicated. A miranda method is +- // provided by an interface when the class implementing the interface +- // does not provide its own method. These interfaces are implemented +- // internally as an instanceKlass. These special instanceKlasses +- // share the constant pool of the class that "implements" the +- // interface. By sharing the constant pool, the method holder of a +- // miranda method is the class that "implements" the interface. In a +- // non-redefine situation, the subtype check works fine. However, if +- // the old constant pool's pool holder is modified, then the check +- // fails because there is no class hierarchy relationship between the +- // vtable's class and "the new class". +- +- old_constants->set_pool_holder(scratch_class()); +-#endif + + // track which methods are EMCP for add_previous_version() call below +- BitMap emcp_methods(_old_methods->length()); ++ ++ // (tw) TODO: Check if we need the concept of EMCP? ++ BitMap emcp_methods(_old_methods->length()); + int emcp_method_count = 0; + emcp_methods.clear(); // clears 0..(length() - 1) ++ ++ // We need to mark methods as old!! + check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); +- transfer_old_native_function_registrations(the_class); +- +- // The class file bytes from before any retransformable agents mucked +- // with them was cached on the scratch class, move to the_class. +- // Note: we still want to do this if nothing needed caching since it +- // should get cleared in the_class too. +- if (the_class->get_cached_class_file_bytes() == 0) { +- // the_class doesn't have a cache yet so copy it +- the_class->set_cached_class_file( +- scratch_class->get_cached_class_file_bytes(), +- scratch_class->get_cached_class_file_len()); +- } +-#ifndef PRODUCT +- else { +- assert(the_class->get_cached_class_file_bytes() == +- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match"); +- assert(the_class->get_cached_class_file_len() == +- scratch_class->get_cached_class_file_len(), "cache lens must match"); +- } +-#endif +- +- // Replace inner_classes +- typeArrayOop old_inner_classes = the_class->inner_classes(); +- the_class->set_inner_classes(scratch_class->inner_classes()); +- scratch_class->set_inner_classes(old_inner_classes); +- +- // Initialize the vtable and interface table after +- // methods have been rewritten +- { +- ResourceMark rm(THREAD); +- // no exception should happen here since we explicitly +- // do not check loader constraints. +- // compare_and_normalize_class_versions has already checked: +- // - classloaders unchanged, signatures unchanged +- // - all instanceKlasses for redefined classes reused & contents updated +- the_class->vtable()->initialize_vtable(false, THREAD); +- the_class->itable()->initialize_itable(false, THREAD); +- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception"); +- } +- +- // Leave arrays of jmethodIDs and itable index cache unchanged +- +- // Copy the "source file name" attribute from new class version +- the_class->set_source_file_name(scratch_class->source_file_name()); +- +- // Copy the "source debug extension" attribute from new class version +- the_class->set_source_debug_extension( +- scratch_class->source_debug_extension(), +- scratch_class->source_debug_extension() == NULL ? 0 : +- (int)strlen(scratch_class->source_debug_extension())); +- +- // Use of javac -g could be different in the old and the new +- if (scratch_class->access_flags().has_localvariable_table() != +- the_class->access_flags().has_localvariable_table()) { +- +- AccessFlags flags = the_class->access_flags(); +- if (scratch_class->access_flags().has_localvariable_table()) { +- flags.set_has_localvariable_table(); +- } else { +- flags.clear_has_localvariable_table(); +- } +- the_class->set_access_flags(flags); +- } +- +- // Replace class annotation fields values +- typeArrayOop old_class_annotations = the_class->class_annotations(); +- the_class->set_class_annotations(scratch_class->class_annotations()); +- scratch_class->set_class_annotations(old_class_annotations); +- +- // Replace fields annotation fields values +- objArrayOop old_fields_annotations = the_class->fields_annotations(); +- the_class->set_fields_annotations(scratch_class->fields_annotations()); +- scratch_class->set_fields_annotations(old_fields_annotations); +- +- // Replace methods annotation fields values +- objArrayOop old_methods_annotations = the_class->methods_annotations(); +- the_class->set_methods_annotations(scratch_class->methods_annotations()); +- scratch_class->set_methods_annotations(old_methods_annotations); +- +- // Replace methods parameter annotation fields values +- objArrayOop old_methods_parameter_annotations = +- the_class->methods_parameter_annotations(); +- the_class->set_methods_parameter_annotations( +- scratch_class->methods_parameter_annotations()); +- scratch_class->set_methods_parameter_annotations(old_methods_parameter_annotations); +- +- // Replace methods default annotation fields values +- objArrayOop old_methods_default_annotations = +- the_class->methods_default_annotations(); +- the_class->set_methods_default_annotations( +- scratch_class->methods_default_annotations()); +- scratch_class->set_methods_default_annotations(old_methods_default_annotations); +- +- // Replace minor version number of class file +- u2 old_minor_version = the_class->minor_version(); +- the_class->set_minor_version(scratch_class->minor_version()); +- scratch_class->set_minor_version(old_minor_version); +- +- // Replace major version number of class file +- u2 old_major_version = the_class->major_version(); +- the_class->set_major_version(scratch_class->major_version()); +- scratch_class->set_major_version(old_major_version); +- +- // Replace CP indexes for class and name+type of enclosing method +- u2 old_class_idx = the_class->enclosing_method_class_index(); +- u2 old_method_idx = the_class->enclosing_method_method_index(); +- the_class->set_enclosing_method_indices( +- scratch_class->enclosing_method_class_index(), +- scratch_class->enclosing_method_method_index()); +- scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx); ++ update_jmethod_ids(); + + // keep track of previous versions of this class +- the_class->add_previous_version(scratch_class, &emcp_methods, ++ the_new_class->add_previous_version(the_old_class, &emcp_methods, + emcp_method_count); + +- RC_TIMER_STOP(_timer_rsc_phase1); +- RC_TIMER_START(_timer_rsc_phase2); ++ // TODO: ++ transfer_old_native_function_registrations(the_old_class); + +- // Adjust constantpool caches and vtables for all classes +- // that reference methods of the evolved class. +- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); + +- if (the_class->oop_map_cache() != NULL) { +- // Flush references to any obsolete methods from the oop map cache +- // so that obsolete methods are not pinned. +- the_class->oop_map_cache()->flush_obsolete_entries(); ++#ifdef ASSERT ++ ++// klassOop systemLookup1 = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++// assert(systemLookup1 == the_new_class(), "New class must be in system dictionary!"); ++ ++ //JNIHandles::verify(); ++ ++// klassOop systemLookup = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++ ++// assert(systemLookup == the_new_class(), "New class must be in system dictionary!"); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ for (int i=0; i<the_new_class->methods()->length(); i++) { ++ assert(((methodOop)the_new_class->methods()->obj_at(i))->method_holder() == the_new_class(), "method holder must match!"); + } + ++ _old_methods->verify(); ++ _new_methods->verify(); ++ ++ the_new_class->vtable()->verify(tty); ++ the_old_class->vtable()->verify(tty); ++ ++#endif ++ + // increment the classRedefinedCount field in the_class and in any + // direct and indirect subclasses of the_class +- increment_class_counter((instanceKlass *)the_class()->klass_part(), THREAD); ++ increment_class_counter((instanceKlass *)the_old_class()->klass_part(), THREAD); ++ ++} ++ ++ ++void VM_RedefineClasses::check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p) { ++ RC_TRACE(0x00008000, ("Checking matching methods for EMCP")); ++ *emcp_method_count_p = 0; ++ int obsolete_count = 0; ++ int old_index = 0; ++ for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ methodOop new_method = (methodOop)_new_methods->obj_at(_matching_new_methods[j]); ++ methodOop old_array_method; ++ ++ // Maintain an old_index into the _old_methods array by skipping ++ // deleted methods ++ while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) ++ != old_method) { ++ ++old_index; ++ } ++ ++ if (MethodComparator::methods_EMCP(old_method, new_method)) { ++ // The EMCP definition from JSR-163 requires the bytecodes to be ++ // the same with the exception of constant pool indices which may ++ // differ. However, the constants referred to by those indices ++ // must be the same. ++ // ++ // We use methods_EMCP() for comparison since constant pool ++ // merging can remove duplicate constant pool entries that were ++ // present in the old method and removed from the rewritten new ++ // method. A faster binary comparison function would consider the ++ // old and new methods to be different when they are actually ++ // EMCP. ++ ++ // track which methods are EMCP for add_previous_version() call ++ emcp_methods->set_bit(old_index); ++ (*emcp_method_count_p)++; ++ ++ // An EMCP method is _not_ obsolete. An obsolete method has a ++ // different jmethodID than the current method. An EMCP method ++ // has the same jmethodID as the current method. Having the ++ // same jmethodID for all EMCP versions of a method allows for ++ // a consistent view of the EMCP methods regardless of which ++ // EMCP method you happen to have in hand. For example, a ++ // breakpoint set in one EMCP method will work for all EMCP ++ // versions of the method including the current one. ++ ++ old_method->set_new_version(new_method); ++ new_method->set_old_version(old_method); ++ ++ RC_TRACE(0x00008000, ("Found EMCP method %s", old_method->name_and_sig_as_C_string())); ++ ++ // Transfer breakpoints ++ instanceKlass *ik = instanceKlass::cast(old_method->method_holder()); ++ for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = bp->next()) { ++ RC_TRACE(0x00000002, ("Checking breakpoint")); ++ RC_TRACE(0x00000002, ("%d / %d", ++ bp->match(old_method), bp->match(new_method))); ++ if (bp->match(old_method)) { ++ assert(bp->match(new_method), "if old method is method, then new method must match too"); ++ RC_TRACE(0x00000002, ("Found a breakpoint in an old EMCP method")); ++ new_method->set_breakpoint(bp->bci()); ++ } ++ } ++ ++ + +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), +- java_lang_Class::classRedefinedCount(the_class_mirror), +- os::available_memory() >> 10)); ++ } else { ++ // mark obsolete methods as such ++ old_method->set_is_obsolete(); ++ obsolete_count++; ++ ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ old_method->set_is_old(); ++ } ++ for (int i = 0; i < _deleted_methods_length; ++i) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_deleted_methods[i]); + +- RC_TIMER_STOP(_timer_rsc_phase2); +-} // end redefine_single_class() ++ //assert(old_method->vtable_index() < 0, ++ // "cannot delete methods with vtable entries");; + ++ // Mark all deleted methods as old and obsolete ++ old_method->set_is_old(); ++ old_method->set_is_obsolete(); ++ ++obsolete_count; ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark deleted %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ //assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), "sanity check"); ++ RC_TRACE(0x00008000, ("EMCP_cnt=%d, obsolete_cnt=%d !", *emcp_method_count_p, obsolete_count)); ++} + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. +@@ -3338,134 +2953,324 @@ void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { + klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); + int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; + java_lang_Class::set_classRedefinedCount(class_mirror, new_count); ++ RC_TRACE(0x00008000, ("updated count for class=%s to %d", ik->external_name(), new_count)); ++} ++ ++#ifndef PRODUCT ++void VM_RedefineClasses::check_class(klassOop k_oop, TRAPS) { ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; ++ assert(ik->is_newest_version(), "must be latest version in system dictionary"); ++ ++ if (ik->vtable_length() > 0) { ++ ResourceMark rm(THREAD); ++ if (!ik->vtable()->check_no_old_entries()) { ++ RC_TRACE(0x00000001, ("size of class: %d\n", ++ k_oop->size())); ++ RC_TRACE(0x00000001, ("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ++ ik->signature_name())); ++ assert(false, "OLD method found"); ++ } + +- if (class_oop != _the_class_oop) { +- // _the_class_oop count is printed at end of redefine_single_class() +- RC_TRACE_WITH_THREAD(0x00000008, THREAD, +- ("updated count in subclass=%s to %d", ik->external_name(), new_count)); ++ ik->vtable()->verify(tty, true); ++ } + } ++} ++ ++#endif ++ ++VM_RedefineClasses::FindAffectedKlassesClosure::FindAffectedKlassesClosure( GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result ) ++{ ++ assert(original_klasses != NULL && result != NULL, ""); ++ this->_original_klasses = original_klasses; ++ this->_result = result; ++ SystemDictionary::classes_do(this); ++} + +- for (Klass *subk = ik->subklass(); subk != NULL; +- subk = subk->next_sibling()) { +- if (subk->oop_is_instance()) { +- // Only update instanceKlasses +- instanceKlass *subik = (instanceKlass*)subk; +- // recursively do subclasses of the current subclass +- increment_class_counter(subik, THREAD); ++void VM_RedefineClasses::FindAffectedKlassesClosure::do_object( oop obj ) ++{ ++ klassOop klass = (klassOop)obj; ++ assert(!_result->contains(klass), "must not occur more than once!"); ++ assert(klass->klass_part()->new_version() == NULL, "Only last version is valid entry in system dictionary"); ++ ++ for(int i=0; i<_original_klasses->length(); i++) { ++ instanceKlassHandle cur = _original_klasses->at(i); ++ if (cur() != klass && klass->klass_part()->is_subtype_of(cur()) && !_original_klasses->contains(klass)) { ++ RC_TRACE(0x00008000, ("Found affected class: %s", klass->klass_part()->name()->as_C_string())); ++ _result->append(klass); ++ break; + } + } + } + +-void VM_RedefineClasses::check_class(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; +- bool no_old_methods = true; // be optimistic +- ResourceMark rm(THREAD); ++jvmtiError VM_RedefineClasses::do_topological_class_sorting( const jvmtiClassDefinition *class_defs, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS) ++{ ++ GrowableArray< Pair<klassOop, klassOop> > *links = new GrowableArray< Pair<klassOop, klassOop> >(); + +- // a vtable should never contain old or obsolete methods +- if (ik->vtable_length() > 0 && +- !ik->vtable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassVtable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->vtable()->dump_vtable(); +- } +- no_old_methods = false; +- } +- +- // an itable should never contain old or obsolete methods +- if (ik->itable_length() > 0 && +- !ik->itable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassItable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->itable()->dump_itable(); +- } +- no_old_methods = false; +- } +- +- // the constant pool cache should never contain old or obsolete methods +- if (ik->constants() != NULL && +- ik->constants()->cache() != NULL && +- !ik->constants()->cache()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("cp-cache::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->constants()->cache()->dump_cache(); +- } +- no_old_methods = false; +- } +- +- if (!no_old_methods) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- dump_methods(); +- } else { +- tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " +- "to see more info about the following guarantee() failure."); ++ for (int i=0; i<class_count; i++) { ++ ++ oop mirror = JNIHandles::resolve_non_null(class_defs[i].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ instanceKlassHandle the_class(THREAD, the_class_oop); ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ++ ClassFileStream st((u1*) class_defs[i].class_bytes, ++ class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ ClassFileParser cfp(&st); ++ ++ GrowableArray<Symbol*> symbolArr; ++ RC_TRACE(0x00000002, ("Before find super symbols of class %s", ++ the_class->name()->as_C_string())); ++ cfp.findSuperSymbols(the_class->name(), the_class_loader, protection_domain, the_class, symbolArr, THREAD); ++ ++ for (int j=0; j<symbolArr.length(); j++) { ++ Symbol* sym = symbolArr.at(j); ++ ++ RC_TRACE(0x00008000, ("Before adding link to super class %s", sym->as_C_string())); ++ ++ for (int k=0; k<arr->length(); k++) { ++ klassOop curOop = arr->at(k)(); ++ // (tw) TODO: Check if we get aliasing problems with different class loaders? ++ if (curOop->klass_part()->name() == sym /*&& curOop->klass_part()->class_loader() == the_class_loader()*/) { ++ RC_TRACE(0x00000002, ("Found class to link")); ++ links->append(Pair<klassOop, klassOop>(curOop, the_class())); ++ break; ++ } ++ } ++ } ++ } ++ ++ ++ RC_TRACE(0x00000001, ("Identified links between classes! ")); ++ ++ for (int i=0; i<affected->length(); i++) { ++ ++ instanceKlassHandle klass = affected->at(i); ++ ++ klassOop superKlass = klass->super(); ++ if (affected->contains(superKlass)) { ++ links->append(Pair<klassOop, klassOop>(superKlass, klass())); ++ } ++ ++ objArrayOop superInterfaces = klass->local_interfaces(); ++ for (int j=0; j<superInterfaces->length(); j++) { ++ klassOop interfaceKlass = (klassOop)superInterfaces->obj_at(j); ++ if (arr->contains(interfaceKlass)) { ++ links->append(Pair<klassOop, klassOop>(interfaceKlass, klass())); ++ } ++ } ++ } ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Identified links: ")); ++ for (int i=0; i<links->length(); i++) { ++ RC_TRACE(0x00000002, ("%s to %s", ++ links->at(i).left()->klass_part()->name()->as_C_string(), ++ links->at(i).right()->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++ for (int i=0; i<arr->length(); i++) { ++ ++ int j; ++ for (j=i; j<arr->length(); j++) { ++ ++ int k; ++ for (k=0; k<links->length(); k++) { ++ ++ klassOop k1 = links->adr_at(k)->right(); ++ klassOop k2 = arr->at(j)(); ++ if (k1 == k2) { ++ break; ++ } ++ } ++ ++ if (k == links->length()) { ++ break; + } +- guarantee(false, "OLD and/or OBSOLETE method(s) found"); ++ } ++ ++ if (j == arr->length()) { ++ // circle detected ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } ++ ++ for (int k=0; k<links->length(); k++) { ++ if (links->adr_at(k)->left() == arr->at(j)()) { ++ links->at_put(k, links->at(links->length() - 1)); ++ links->remove_at(links->length() - 1); ++ k--; ++ } ++ } ++ ++ instanceKlassHandle tmp = arr->at(j); ++ arr->at_put(j, arr->at(i)); ++ arr->at_put(i, tmp); ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++void VM_RedefineClasses::oops_do(OopClosure *closure) { ++ ++ if (_updated_oops != NULL) { ++ for (int i=0; i<_updated_oops->length(); i++) { ++ closure->do_oop(_updated_oops->adr_at(i)); + } + } + } + +-void VM_RedefineClasses::dump_methods() { +- int j; +- RC_TRACE(0x00004000, ("_old_methods --")); +- for (j = 0; j < _old_methods->length(); ++j) { +- methodOop m = (methodOop) _old_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_new_methods --")); +- for (j = 0; j < _new_methods->length(); ++j) { +- methodOop m = (methodOop) _new_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_matching_(old/new)_methods --")); +- for (j = 0; j < _matching_methods_length; ++j) { +- methodOop m = _matching_old_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- m = _matching_new_methods[j]; +- RC_TRACE_NO_CR(0x00004000, (" (%5d) ", m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_deleted_methods --")); +- for (j = 0; j < _deleted_methods_length; ++j) { +- methodOop m = _deleted_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_added_methods --")); +- for (j = 0; j < _added_methods_length; ++j) { +- methodOop m = _added_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); ++void VM_RedefineClasses::transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to) { ++ to->set_is_field_modification_watched(from->is_field_modification_watched()); ++ to->set_is_field_access_watched(from->is_field_access_watched()); ++ if (from->is_field_modification_watched() || from->is_field_access_watched()) { ++ RC_TRACE(0x00000002, ("Transfered watch for field %s", ++ from->name()->as_C_string())); ++ } ++ update_klass_field_access_flag(to); ++} ++ ++void VM_RedefineClasses::update_klass_field_access_flag(fieldDescriptor *fd) { ++ instanceKlass* ik = instanceKlass::cast(fd->field_holder()); ++ FieldInfo* fi = FieldInfo::from_field_array(ik->fields(), fd->index()); ++ fi->set_access_flags(fd->access_flags().as_short()); ++} ++ ++ ++// This internal class transfers the native function registration from old methods ++// to new methods. It is designed to handle both the simple case of unchanged ++// native methods and the complex cases of native method prefixes being added and/or ++// removed. ++// It expects only to be used during the VM_RedefineClasses op (a safepoint). ++// ++// This class is used after the new methods have been installed in "the_class". ++// ++// So, for example, the following must be handled. Where 'm' is a method and ++// a number followed by an underscore is a prefix. ++// ++// Old Name New Name ++// Simple transfer to new method m -> m ++// Add prefix m -> 1_m ++// Remove prefix 1_m -> m ++// Simultaneous add of prefixes m -> 3_2_1_m ++// Simultaneous removal of prefixes 3_2_1_m -> m ++// Simultaneous add and remove 1_m -> 2_m ++// Same, caused by prefix removal only 3_2_1_m -> 3_2_m ++// ++class TransferNativeFunctionRegistration { ++private: ++ instanceKlassHandle the_class; ++ int prefix_count; ++ char** prefixes; ++ ++ // Recursively search the binary tree of possibly prefixed method names. ++ // Iteration could be used if all agents were well behaved. Full tree walk is ++ // more resilent to agents not cleaning up intermediate methods. ++ // Branch at each depth in the binary tree is: ++ // (1) without the prefix. ++ // (2) with the prefix. ++ // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) ++ methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, ++ Symbol* signature) { ++ Symbol* name_symbol = SymbolTable::probe(name_str, (int)name_len); ++ if (name_symbol != NULL) { ++ methodOop method = Klass::cast(the_class()->klass_part()->new_version())->lookup_method(name_symbol, signature); ++ if (method != NULL) { ++ // Even if prefixed, intermediate methods must exist. ++ if (method->is_native()) { ++ // Wahoo, we found a (possibly prefixed) version of the method, return it. ++ return method; ++ } ++ if (depth < prefix_count) { ++ // Try applying further prefixes (other than this one). ++ method = search_prefix_name_space(depth+1, name_str, name_len, signature); ++ if (method != NULL) { ++ return method; // found ++ } ++ ++ // Try adding this prefix to the method name and see if it matches ++ // another method name. ++ char* prefix = prefixes[depth]; ++ size_t prefix_len = strlen(prefix); ++ size_t trial_len = name_len + prefix_len; ++ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); ++ strcpy(trial_name_str, prefix); ++ strcat(trial_name_str, name_str); ++ method = search_prefix_name_space(depth+1, trial_name_str, trial_len, ++ signature); ++ if (method != NULL) { ++ // If found along this branch, it was prefixed, mark as such ++ method->set_is_prefixed_native(); ++ return method; // found ++ } ++ } ++ } ++ } ++ return NULL; // This whole branch bore nothing ++ } ++ ++ // Return the method name with old prefixes stripped away. ++ char* method_name_without_prefixes(methodOop method) { ++ Symbol* name = method->name(); ++ char* name_str = name->as_utf8(); ++ ++ // Old prefixing may be defunct, strip prefixes, if any. ++ for (int i = prefix_count-1; i >= 0; i--) { ++ char* prefix = prefixes[i]; ++ size_t prefix_len = strlen(prefix); ++ if (strncmp(prefix, name_str, prefix_len) == 0) { ++ name_str += prefix_len; ++ } ++ } ++ return name_str; ++ } ++ ++ // Strip any prefixes off the old native method, then try to find a ++ // (possibly prefixed) new native that matches it. ++ methodOop strip_and_search_for_new_native(methodOop method) { ++ ResourceMark rm; ++ char* name_str = method_name_without_prefixes(method); ++ return search_prefix_name_space(0, name_str, strlen(name_str), ++ method->signature()); ++ } ++ ++public: ++ ++ // Construct a native method transfer processor for this class. ++ TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { ++ assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ ++ the_class = _the_class; ++ prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); ++ } ++ ++ // Attempt to transfer any of the old or deleted methods that are native ++ void transfer_registrations(instanceKlassHandle old_klass, int* old_methods, int methods_length) { ++ for (int j = 0; j < methods_length; j++) { ++ methodOop old_method = (methodOop)old_klass->methods()->obj_at(old_methods[j]); ++ ++ if (old_method->is_native() && old_method->has_native_function()) { ++ methodOop new_method = strip_and_search_for_new_native(old_method); ++ if (new_method != NULL) { ++ // Actually set the native function in the new method. ++ // Redefine does not send events (except CFLH), certainly not this ++ // behind the scenes re-registration. ++ new_method->set_native_function(old_method->native_function(), ++ !methodOopDesc::native_bind_event_is_interesting); ++ ++ RC_TRACE(0x00008000, ("Transfering native function for method %s", old_method->name()->as_C_string())); ++ } ++ } ++ } + } ++}; ++ ++// Don't lose the association between a native method and its JNI function. ++void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle old_klass) { ++ TransferNativeFunctionRegistration transfer(old_klass); ++ transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); ++ transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); + } +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.hpp b/src/share/vm/prims/jvmtiRedefineClasses.hpp +index 671f2ae..88fdbac 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp +@@ -1,26 +1,29 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ ++* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. ++* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++* ++* This code is free software; you can redistribute it and/or modify it ++* under the terms of the GNU General Public License version 2 only, as ++* published by the Free Software Foundation. ++* ++* This code is distributed in the hope that it will be useful, but WITHOUT ++* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++* version 2 for more details (a copy is included in the LICENSE file that ++* accompanied this code). ++* ++* You should have received a copy of the GNU General Public License version ++* 2 along with this work; if not, write to the Free Software Foundation, ++* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++* ++* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++* or visit www.oracle.com if you need additional information or have any ++* questions. ++* ++*/ ++ ++// New version that allows arbitrary changes to already loaded classes. ++// Modifications done by: Thomas Wuerthinger <thomas.wuerthinger@gmail.com> + + #ifndef SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP + #define SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP +@@ -32,331 +35,28 @@ + #include "oops/objArrayOop.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" + #include "runtime/vm_operations.hpp" ++#include "gc_implementation/shared/vmGCOperations.hpp" + +-// Introduction: +-// +-// The RedefineClasses() API is used to change the definition of one or +-// more classes. While the API supports redefining more than one class +-// in a single call, in general, the API is discussed in the context of +-// changing the definition of a single current class to a single new +-// class. For clarity, the current class is will always be called +-// "the_class" and the new class will always be called "scratch_class". +-// +-// The name "the_class" is used because there is only one structure +-// that represents a specific class; redefinition does not replace the +-// structure, but instead replaces parts of the structure. The name +-// "scratch_class" is used because the structure that represents the +-// new definition of a specific class is simply used to carry around +-// the parts of the new definition until they are used to replace the +-// appropriate parts in the_class. Once redefinition of a class is +-// complete, scratch_class is thrown away. +-// +-// +-// Implementation Overview: +-// +-// The RedefineClasses() API is mostly a wrapper around the VM op that +-// does the real work. The work is split in varying degrees between +-// doit_prologue(), doit() and doit_epilogue(). +-// +-// 1) doit_prologue() is called by the JavaThread on the way to a +-// safepoint. It does parameter verification and loads scratch_class +-// which involves: +-// - parsing the incoming class definition using the_class' class +-// loader and security context +-// - linking scratch_class +-// - merging constant pools and rewriting bytecodes as needed +-// for the merged constant pool +-// - verifying the bytecodes in scratch_class +-// - setting up the constant pool cache and rewriting bytecodes +-// as needed to use the cache +-// - finally, scratch_class is compared to the_class to verify +-// that it is a valid replacement class +-// - if everything is good, then scratch_class is saved in an +-// instance field in the VM operation for the doit() call +-// +-// Note: A JavaThread must do the above work. +-// +-// 2) doit() is called by the VMThread during a safepoint. It installs +-// the new class definition(s) which involves: +-// - retrieving the scratch_class from the instance field in the +-// VM operation +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding weak reference(s) to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-// - telling the SystemDictionary to notice our changes +-// +-// Note: the above work must be done by the VMThread to be safe. +-// +-// 3) doit_epilogue() is called by the JavaThread after the VM op +-// is finished and the safepoint is done. It simply cleans up +-// memory allocated in doit_prologue() and used in doit(). +-// +-// +-// Constant Pool Details: +-// +-// When the_class is redefined, we cannot just replace the constant +-// pool in the_class with the constant pool from scratch_class because +-// that could confuse obsolete methods that may still be running. +-// Instead, the constant pool from the_class, old_cp, is merged with +-// the constant pool from scratch_class, scratch_cp. The resulting +-// constant pool, merge_cp, replaces old_cp in the_class. +-// +-// The key part of any merging algorithm is the entry comparison +-// function so we have to know the types of entries in a constant pool +-// in order to merge two of them together. Constant pools can contain +-// up to 12 different kinds of entries; the JVM_CONSTANT_Unicode entry +-// is not presently used so we only have to worry about the other 11 +-// entry types. For the purposes of constant pool merging, it is +-// helpful to know that the 11 entry types fall into 3 different +-// subtypes: "direct", "indirect" and "double-indirect". +-// +-// Direct CP entries contain data and do not contain references to +-// other CP entries. The following are direct CP entries: +-// JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} +-// +-// Indirect CP entries contain 1 or 2 references to a direct CP entry +-// and no other data. The following are indirect CP entries: +-// JVM_CONSTANT_{Class,NameAndType,String} +-// +-// Double-indirect CP entries contain two references to indirect CP +-// entries and no other data. The following are double-indirect CP +-// entries: +-// JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} +-// +-// When comparing entries between two constant pools, the entry types +-// are compared first and if they match, then further comparisons are +-// made depending on the entry subtype. Comparing direct CP entries is +-// simply a matter of comparing the data associated with each entry. +-// Comparing both indirect and double-indirect CP entries requires +-// recursion. +-// +-// Fortunately, the recursive combinations are limited because indirect +-// CP entries can only refer to direct CP entries and double-indirect +-// CP entries can only refer to indirect CP entries. The following is +-// an example illustration of the deepest set of indirections needed to +-// access the data associated with a JVM_CONSTANT_Fieldref entry: +-// +-// JVM_CONSTANT_Fieldref { +-// class_index => JVM_CONSTANT_Class { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-1> +-// } +-// } +-// name_and_type_index => JVM_CONSTANT_NameAndType { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-2> +-// } +-// descriptor_index => JVM_CONSTANT_Utf8 { +-// <data-3> +-// } +-// } +-// } +-// +-// The above illustration is not a data structure definition for any +-// computer language. The curly braces ('{' and '}') are meant to +-// delimit the context of the "fields" in the CP entry types shown. +-// Each indirection from the JVM_CONSTANT_Fieldref entry is shown via +-// "=>", e.g., the class_index is used to indirectly reference a +-// JVM_CONSTANT_Class entry where the name_index is used to indirectly +-// reference a JVM_CONSTANT_Utf8 entry which contains the interesting +-// <data-1>. In order to understand a JVM_CONSTANT_Fieldref entry, we +-// have to do a total of 5 indirections just to get to the CP entries +-// that contain the interesting pieces of data and then we have to +-// fetch the three pieces of data. This means we have to do a total of +-// (5 + 3) * 2 == 16 dereferences to compare two JVM_CONSTANT_Fieldref +-// entries. +-// +-// Here is the indirection, data and dereference count for each entry +-// type: +-// +-// JVM_CONSTANT_Class 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Double 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Fieldref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Float 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Integer 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_InterfaceMethodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Long 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Methodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_NameAndType 1 indir, 2 data, 4 derefs +-// JVM_CONSTANT_String 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Utf8 0 indir, 1 data, 1 deref +-// +-// So different subtypes of CP entries require different amounts of +-// work for a proper comparison. +-// +-// Now that we've talked about the different entry types and how to +-// compare them we need to get back to merging. This is not a merge in +-// the "sort -u" sense or even in the "sort" sense. When we merge two +-// constant pools, we copy all the entries from old_cp to merge_cp, +-// preserving entry order. Next we append all the unique entries from +-// scratch_cp to merge_cp and we track the index changes from the +-// location in scratch_cp to the possibly new location in merge_cp. +-// When we are done, any obsolete code that is still running that +-// uses old_cp should not be able to observe any difference if it +-// were to use merge_cp. As for the new code in scratch_class, it is +-// modified to use the appropriate index values in merge_cp before it +-// is used to replace the code in the_class. +-// +-// There is one small complication in copying the entries from old_cp +-// to merge_cp. Two of the CP entry types are special in that they are +-// lazily resolved. Before explaining the copying complication, we need +-// to digress into CP entry resolution. +-// +-// JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in +-// the class file, but are not stored in memory as such until they are +-// resolved. The entries are not resolved unless they are used because +-// resolution is expensive. During class file parsing the entries are +-// initially stored in memory as JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries. These special CP entry types +-// indicate that the JVM_CONSTANT_Class and JVM_CONSTANT_String entries +-// have been parsed, but the index values in the entries have not been +-// validated. After the entire constant pool has been parsed, the index +-// values can be validated and then the entries are converted into +-// JVM_CONSTANT_UnresolvedClass and JVM_CONSTANT_UnresolvedString +-// entries. During this conversion process, the UTF8 values that are +-// indirectly referenced by the JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries are changed into Symbol*s and the +-// entries are modified to refer to the Symbol*s. This optimization +-// eliminates one level of indirection for those two CP entry types and +-// gets the entries ready for verification. During class file parsing +-// it is also possible for JVM_CONSTANT_UnresolvedString entries to be +-// resolved into JVM_CONSTANT_String entries. Verification expects to +-// find JVM_CONSTANT_UnresolvedClass and either JVM_CONSTANT_String or +-// JVM_CONSTANT_UnresolvedString entries and not JVM_CONSTANT_Class +-// entries. +-// +-// Now we can get back to the copying complication. When we copy +-// entries from old_cp to merge_cp, we have to revert any +-// JVM_CONSTANT_Class entries to JVM_CONSTANT_UnresolvedClass entries +-// or verification will fail. +-// +-// It is important to explicitly state that the merging algorithm +-// effectively unresolves JVM_CONSTANT_Class entries that were in the +-// old_cp when they are changed into JVM_CONSTANT_UnresolvedClass +-// entries in the merge_cp. This is done both to make verification +-// happy and to avoid adding more brittleness between RedefineClasses +-// and the constant pool cache. By allowing the constant pool cache +-// implementation to (re)resolve JVM_CONSTANT_UnresolvedClass entries +-// into JVM_CONSTANT_Class entries, we avoid having to embed knowledge +-// about those algorithms in RedefineClasses. +-// +-// Appending unique entries from scratch_cp to merge_cp is straight +-// forward for direct CP entries and most indirect CP entries. For the +-// indirect CP entry type JVM_CONSTANT_NameAndType and for the double- +-// indirect CP entry types, the presence of more than one piece of +-// interesting data makes appending the entries more complicated. +-// +-// For the JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} entry types, +-// the entry is simply copied from scratch_cp to the end of merge_cp. +-// If the index in scratch_cp is different than the destination index +-// in merge_cp, then the change in index value is tracked. +-// +-// Note: the above discussion for the direct CP entries also applies +-// to the JVM_CONSTANT_Unresolved{Class,String} entry types. +-// +-// For the JVM_CONSTANT_{Class,String} entry types, since there is only +-// one data element at the end of the recursion, we know that we have +-// either one or two unique entries. If the JVM_CONSTANT_Utf8 entry is +-// unique then it is appended to merge_cp before the current entry. +-// If the JVM_CONSTANT_Utf8 entry is not unique, then the current entry +-// is updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// Note: the above discussion for JVM_CONSTANT_{Class,String} entry +-// types is theoretical. Since those entry types have already been +-// optimized into JVM_CONSTANT_Unresolved{Class,String} entry types, +-// they are handled as direct CP entries. +-// +-// For the JVM_CONSTANT_NameAndType entry type, since there are two +-// data elements at the end of the recursions, we know that we have +-// between one and three unique entries. Any unique JVM_CONSTANT_Utf8 +-// entries are appended to merge_cp before the current entry. For any +-// JVM_CONSTANT_Utf8 entries that are not unique, the current entry is +-// updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// For the JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} entry +-// types, since there are two indirect CP entries and three data +-// elements at the end of the recursions, we know that we have between +-// one and six unique entries. See the JVM_CONSTANT_Fieldref diagram +-// above for an example of all six entries. The uniqueness algorithm +-// for the JVM_CONSTANT_Class and JVM_CONSTANT_NameAndType entries is +-// covered above. Any unique entries are appended to merge_cp before +-// the current entry. For any entries that are not unique, the current +-// entry is updated to refer to the duplicate entry in merge_cp before +-// it is appended to merge_cp. Again, any changes in index values are +-// tracked as needed. +-// +-// +-// Other Details: +-// +-// Details for other parts of RedefineClasses need to be written. +-// This is a placeholder section. +-// +-// +-// Open Issues (in no particular order): +-// +-// - How do we serialize the RedefineClasses() API without deadlocking? +-// +-// - SystemDictionary::parse_stream() was called with a NULL protection +-// domain since the initial version. This has been changed to pass +-// the_class->protection_domain(). This change has been tested with +-// all NSK tests and nothing broke, but what will adding it now break +-// in ways that we don't test? +-// +-// - GenerateOopMap::rewrite_load_or_store() has a comment in its +-// (indirect) use of the Relocator class that the max instruction +-// size is 4 bytes. goto_w and jsr_w are 5 bytes and wide/iinc is +-// 6 bytes. Perhaps Relocator only needs a 4 byte buffer to do +-// what it does to the bytecodes. More investigation is needed. +-// +-// - java.lang.Object methods can be called on arrays. This is +-// implemented via the arrayKlassOop vtable which we don't +-// update. For example, if we redefine java.lang.Object.toString(), +-// then the new version of the method will not be called for array +-// objects. +-// +-// - How do we know if redefine_single_class() and the guts of +-// instanceKlass are out of sync? I don't think this can be +-// automated, but we should probably order the work in +-// redefine_single_class() to match the order of field +-// definitions in instanceKlass. We also need to add some +-// comments about keeping things in sync. +-// +-// - set_new_constant_pool() is huge and we should consider refactoring +-// it into smaller chunks of work. +-// +-// - The exception table update code in set_new_constant_pool() defines +-// const values that are also defined in a local context elsewhere. +-// The same literal values are also used in elsewhere. We need to +-// coordinate a cleanup of these constants with Runtime. +-// +- +-class VM_RedefineClasses: public VM_Operation { ++#define RC_ABORT(error) { _result = error; return false; } ++ ++class VM_RedefineClasses: public VM_GC_Operation { + private: ++ + // These static fields are needed by SystemDictionary::classes_do() + // facility and the adjust_cpool_cache_and_vtable() helper: + static objArrayOop _old_methods; + static objArrayOop _new_methods; +- static methodOop* _matching_old_methods; +- static methodOop* _matching_new_methods; +- static methodOop* _deleted_methods; +- static methodOop* _added_methods; ++ static int* _matching_old_methods; ++ static int* _matching_new_methods; ++ static int* _deleted_methods; ++ static int* _added_methods; + static int _matching_methods_length; + static int _deleted_methods_length; + static int _added_methods_length; + static klassOop _the_class_oop; + ++ static int _revision_number; ++ + // The instance fields are used to pass information from + // doit_prologue() to doit() and doit_epilogue(). + jint _class_count; +@@ -370,36 +70,29 @@ class VM_RedefineClasses: public VM_Operation { + // _index_map_p contains any entries. + int _index_map_count; + intArray * _index_map_p; +- // ptr to _class_count scratch_classes +- instanceKlassHandle * _scratch_classes; +- jvmtiError _res; ++ GrowableArray<instanceKlassHandle>* _new_classes; ++ GrowableArray<oop>* _updated_oops; ++ jvmtiError _result; ++ int _max_redefinition_flags; + + // Performance measurement support. These timers do not cover all + // the work done for JVM/TI RedefineClasses() but they do cover + // the heavy lifting. +- elapsedTimer _timer_rsc_phase1; +- elapsedTimer _timer_rsc_phase2; +- elapsedTimer _timer_vm_op_prologue; +- +- // These routines are roughly in call order unless otherwise noted. +- +- // Load the caller's new class definition(s) into _scratch_classes. +- // Constant pool merging work is done here as needed. Also calls +- // compare_and_normalize_class_versions() to verify the class +- // definition(s). ++ elapsedTimer _timer_total; ++ elapsedTimer _timer_prologue; ++ elapsedTimer _timer_class_linking; ++ elapsedTimer _timer_class_loading; ++ elapsedTimer _timer_check_type; ++ elapsedTimer _timer_prepare_redefinition; ++ elapsedTimer _timer_wait_for_locks; ++ elapsedTimer _timer_redefinition; ++ elapsedTimer _timer_vm_op_epilogue; ++ ++ jvmtiError check_redefinition_allowed(instanceKlassHandle new_class); ++ jvmtiError find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses); ++ jvmtiError find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed); + jvmtiError load_new_class_versions(TRAPS); + +- // Verify that the caller provided class definition(s) that meet +- // the restrictions of RedefineClasses. Normalize the order of +- // overloaded methods as needed. +- jvmtiError compare_and_normalize_class_versions( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class); +- +- // Swap annotations[i] with annotations[j] +- // Used by compare_and_normalize_class_versions() when normalizing +- // overloaded methods or changing idnum as when adding or deleting methods. +- void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); +- + // Figure out which new methods match old methods in name and signature, + // which methods have been added, and which are no longer present + void compute_added_deleted_matching_methods(); +@@ -407,95 +100,100 @@ class VM_RedefineClasses: public VM_Operation { + // Change jmethodIDs to point to the new methods + void update_jmethod_ids(); + +- // In addition to marking methods as obsolete, this routine +- // records which methods are EMCP (Equivalent Module Constant +- // Pool) in the emcp_methods BitMap and returns the number of +- // EMCP methods via emcp_method_count_p. This information is +- // used when information about the previous version of the_class +- // is squirreled away. +- void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, +- int * emcp_method_count_p); +- void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ class FindAffectedKlassesClosure : public ObjectClosure { + +- // Unevolving classes may point to methods of the_class directly +- // from their constant pool caches, itables, and/or vtables. We +- // use the SystemDictionary::classes_do() facility and this helper +- // to fix up these pointers. +- static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS); ++ private: ++ GrowableArray<instanceKlassHandle> *_original_klasses; ++ GrowableArray<instanceKlassHandle> *_result; ++ ++ public: ++ FindAffectedKlassesClosure(GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result); ++ ++ virtual void do_object(oop obj); ++ }; ++ ++ ++ static jvmtiError do_topological_class_sorting(const jvmtiClassDefinition *class_definitions, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS); + + // Install the redefinition of a class +- void redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS); ++ void redefine_single_class(instanceKlassHandle the_new_class, TRAPS); + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. + void increment_class_counter(instanceKlass *ik, TRAPS); + +- // Support for constant pool merging (these routines are in alpha +- // order): +- void append_entry(constantPoolHandle scratch_cp, int scratch_i, +- constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); +- int find_new_index(int old_index); +- bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- bool is_unresolved_string_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); +- bool merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS); +- jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class, +- instanceKlassHandle scratch_class, TRAPS); +- u2 rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS); +- bool rewrite_cp_refs(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_element_value( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_method(methodHandle method, +- methodHandle * new_method_p, TRAPS); +- bool rewrite_cp_refs_in_methods(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); +- void rewrite_cp_refs_in_verification_type_info( +- address& stackmap_addr_ref, address stackmap_end, u2 frame_i, +- u1 frame_size, TRAPS); +- void set_new_constant_pool(instanceKlassHandle scratch_class, +- constantPoolHandle scratch_cp, int scratch_cp_length, bool shrink, TRAPS); + + void flush_dependent_code(instanceKlassHandle k_h, TRAPS); + +- static void check_class(klassOop k_oop, oop initiating_loader, TRAPS); +- static void dump_methods(); ++ static void check_class(klassOop k_oop,/* oop initiating_loader,*/ TRAPS) PRODUCT_RETURN; ++ ++ static void adjust_cpool_cache(klassOop k_oop, oop initiating_loader, TRAPS); ++ ++#ifdef ASSERT ++ static void verify_classes(klassOop k_oop, oop initiating_loader, TRAPS); ++#endif ++ ++ int calculate_redefinition_flags(instanceKlassHandle new_version); ++ void calculate_instance_update_information(klassOop new_version); ++ void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p); ++ ++ static void calculate_type_check_information(klassOop k); ++ static void clear_type_check_information(klassOop k); + + public: +- VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind); +- VMOp_Type type() const { return VMOp_RedefineClasses; } ++ VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind); ++ virtual ~VM_RedefineClasses(); ++ ++ bool check_arguments(); + bool doit_prologue(); + void doit(); + void doit_epilogue(); ++ void rollback(); + +- bool allow_nested_vm_operations() const { return true; } +- jvmtiError check_error() { return _res; } ++ jvmtiError check_exception() const; ++ VMOp_Type type() const { return VMOp_RedefineClasses; } ++ bool skip_operation() const { return false; } ++ bool allow_nested_vm_operations() const { return true; } ++ jvmtiError check_error() { return _result; } ++ ++ void update_active_methods(); ++ ++ // Checks for type consistency after hierarchy change ++ bool check_type_consistency(); ++ void calculate_type_check_information(); ++ bool check_field_value_types(); ++ void clear_type_check_information(); ++ bool check_method_stacks(); ++ bool check_loaded_methods(); ++ bool check_method(methodOop method); ++ static Symbol* signature_to_class_name(Symbol* signature); ++ ++ void method_forwarding(); ++ ++ void update_array_classes_to_newest_version(klassOop smallest_dimension); + + // Modifiable test must be shared between IsModifiableClass query + // and redefine implementation + static bool is_modifiable_class(oop klass_mirror); ++ ++ // Method used during garbage collection, the VM operation must iterate over all oops. ++ void oops_do(OopClosure* f); ++ ++ // Utility methods for transfering field access flags ++ ++ static void transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to); ++ static void update_klass_field_access_flag(fieldDescriptor *fd); ++ ++ void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ ++ void lock_threads(); ++ void unlock_threads(); ++ ++ template <class T> static void do_oop_work(T* p); ++ ++ static void swap_marks(oop first, oop second); ++ + }; + + #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP ++ +diff --git a/src/share/vm/prims/methodComparator.cpp b/src/share/vm/prims/methodComparator.cpp +index 60eaf97..07bb6e3 100644 +--- a/src/share/vm/prims/methodComparator.cpp ++++ b/src/share/vm/prims/methodComparator.cpp +@@ -65,6 +65,7 @@ bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) + if (! args_same(c_old, c_new)) + return false; + } ++ + return true; + } + +diff --git a/src/share/vm/prims/nativeLookup.cpp b/src/share/vm/prims/nativeLookup.cpp +index 41fc42d..53b3e0c 100644 +--- a/src/share/vm/prims/nativeLookup.cpp ++++ b/src/share/vm/prims/nativeLookup.cpp +@@ -35,6 +35,7 @@ + #include "oops/symbol.hpp" + #include "prims/jvm_misc.hpp" + #include "prims/nativeLookup.hpp" ++#include "prims/jvmtiRedefineClasses.hpp" + #include "runtime/arguments.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/javaCalls.hpp" +@@ -53,7 +54,6 @@ + # include "os_bsd.inline.hpp" + #endif + +- + static void mangle_name_on(outputStream* st, Symbol* name, int begin, int end) { + char* bytes = (char*)name->bytes() + begin; + char* end_bytes = (char*)name->bytes() + end; +@@ -138,6 +138,40 @@ static JNINativeMethod lookup_special_native_methods[] = { + { CC"Java_sun_hotspot_WhiteBox_registerNatives", NULL, FN_PTR(JVM_RegisterWhiteBoxMethods) }, + }; + ++// Helper function to call redefineClasses from Java Code ++JVM_ENTRY(int, JVM_RedefineClassesHelper(JNIEnv *env, jclass cb, jclass target, jbyteArray bytes)) ++ ResourceMark rm(THREAD); ++ ++ JavaThread* current_thread = JavaThread::current(); ++ jbyte* bytecodes = NULL; ++ const int class_count = 1; ++ jvmtiClassDefinition* class_definitions = NEW_RESOURCE_ARRAY(jvmtiClassDefinition, class_count); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ jboolean is_copy = JNI_FALSE; ++ bytecodes = env->GetByteArrayElements(bytes, &is_copy); ++ class_definitions[0].klass = target; ++ class_definitions[0].class_byte_count = env->GetArrayLength(bytes); ++ class_definitions[0].class_bytes = (unsigned char*)bytecodes; ++ } ++ ++ VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); ++ VMThread::execute(&op); ++ int result = op.check_error(); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ if (env->ExceptionOccurred()) { ++ return -1; ++ } ++ env->ReleaseByteArrayElements(bytes, bytecodes, 0); ++ } ++ ++ return result; ++JVM_END ++ ++ + static address lookup_special_native(char* jni_name) { + int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods + int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); +@@ -177,6 +211,9 @@ address NativeLookup::lookup_style(methodHandle method, char* pure_name, const c + return entry; + } + } ++ if(strstr(jni_name, "Java_at_ssw_hotswap_ClassRedefinition_redefineClasses") != NULL) { ++ return CAST_FROM_FN_PTR(address, JVM_RedefineClassesHelper); ++ } + + // Otherwise call static method findNative in ClassLoader + KlassHandle klass (THREAD, SystemDictionary::ClassLoader_klass()); +diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp +index 22d450b..d7bf297 100644 +--- a/src/share/vm/runtime/arguments.cpp ++++ b/src/share/vm/runtime/arguments.cpp +@@ -1792,6 +1792,15 @@ bool Arguments::check_gc_consistency() { + status = false; + } + ++ // (tw) Must use serial GC ++ if (!UseSerialGC && i >= 1) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Must use the serial GC in the Dynamic Code Evolution VM\n"); ++ status = false; ++ } else { ++ UseSerialGC = true; ++ } ++ + return status; + } + +diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp +index 2b767d4..bcea3be 100644 +--- a/src/share/vm/runtime/deoptimization.cpp ++++ b/src/share/vm/runtime/deoptimization.cpp +@@ -599,6 +599,38 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m + // Cleanup thread deopt data + cleanup_deopt_info(thread, array); + ++ // (tw) Redefinition support: Check if we need to transfer method execution points to new versions ++ { ++ ResourceMark res_mark; ++ ++ // Verify that the just-unpacked frames match the interpreter's ++ // notions of expression stack and locals ++ vframeArray* cur_array = thread->vframe_array_last(); ++ RegisterMap rm(thread, false); ++ rm.set_include_argument_oops(false); ++ for (int i = 0; i < cur_array->frames(); i++) { ++ vframeArrayElement* el = cur_array->element(i); ++ frame* frame = el->iframe(); ++ guarantee(frame->is_interpreted_frame(), "Wrong frame type"); ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(frame, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ methodOop method = iframe->method(); ++ int bci = iframe->bci(); ++ method = method->newest_version(); ++ iframe->set_method(method, bci); ++ ++ methodOop forward_method = method->forward_method(); ++ if (forward_method != NULL && method->is_in_code_section(bci)) { ++ int new_bci = method->calculate_forward_bci(bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ iframe->set_method(forward_method, new_bci); ++ } ++ } ++ } ++ + #ifndef PRODUCT + if (VerifyStack) { + ResourceMark res_mark; +diff --git a/src/share/vm/runtime/frame.cpp b/src/share/vm/runtime/frame.cpp +index 32d02d6..bbb6c47 100644 +--- a/src/share/vm/runtime/frame.cpp ++++ b/src/share/vm/runtime/frame.cpp +@@ -407,6 +407,12 @@ void frame::interpreter_frame_set_method(methodOop method) { + *interpreter_frame_method_addr() = method; + } + ++// (tw) Sets constant pool cache oop ++void frame::interpreter_frame_set_cache(constantPoolCacheOop cp) { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ *interpreter_frame_cache_addr() = cp; ++} ++ + void frame::interpreter_frame_set_bcx(intptr_t bcx) { + assert(is_interpreted_frame(), "Not an interpreted frame"); + if (ProfileInterpreter) { +@@ -422,19 +428,27 @@ void frame::interpreter_frame_set_bcx(intptr_t bcx) { + // The bcx was just converted from bci to bcp. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. +- address mdp = mdo->di_to_dp(mdi); +- interpreter_frame_set_mdx((intptr_t)mdp); ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. ++ address mdp = mdo->di_to_dp(mdi); ++ interpreter_frame_set_mdx((intptr_t)mdp); ++ } + } + } else { + if (is_now_bci) { + // The bcx was just converted from bcp to bci. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdo->dp_to_di((address)mdx); +- interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdo->dp_to_di((address)mdx); ++ interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ } + } + } + } +diff --git a/src/share/vm/runtime/frame.hpp b/src/share/vm/runtime/frame.hpp +index 9c7bb72..04a6595 100644 +--- a/src/share/vm/runtime/frame.hpp ++++ b/src/share/vm/runtime/frame.hpp +@@ -346,6 +346,7 @@ class frame VALUE_OBJ_CLASS_SPEC { + // Method & constant pool cache + methodOop interpreter_frame_method() const; + void interpreter_frame_set_method(methodOop method); ++ void interpreter_frame_set_cache(constantPoolCacheOop method); + methodOop* interpreter_frame_method_addr() const; + constantPoolCacheOop* interpreter_frame_cache_addr() const; + #ifdef PPC +diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp +index 8df7220..634c589 100644 +--- a/src/share/vm/runtime/globals.hpp ++++ b/src/share/vm/runtime/globals.hpp +@@ -1227,9 +1227,23 @@ class CommandLineFlags { + product(bool, StressLdcRewrite, false, \ + "Force ldc -> ldc_w rewrite during RedefineClasses") \ + \ ++ product(bool, UseMethodForwardPoints, false, \ ++ "Use method forward points") \ ++ \ ++ product(intx, MethodForwardPointsMaxLocals, 300, \ ++ "Maximum number of locals in forwarding method") \ ++ \ ++ product(intx, MethodForwardPointsMaxStack, 300, \ ++ "Maximum number of stack slots in forwarding method") \ ++ \ + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, TimeRedefineClasses, false, \ ++ "Measure timing for JVMTI RedefineClasses") \ ++ \ ++ product(bool, AllowAdvancedClassRedefinition, true, \ ++ "Allow advanced class redefinition beyond swapping method bodies")\ + develop(bool, StressMethodComparator, false, \ + "run the MethodComparator on all loaded methods") \ + \ +diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp +index 2875ee0..61fd8fe 100644 +--- a/src/share/vm/runtime/interfaceSupport.hpp ++++ b/src/share/vm/runtime/interfaceSupport.hpp +@@ -296,7 +296,7 @@ class ThreadToNativeFromVM : public ThreadStateTransition { + ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { + // We are leaving the VM at this point and going directly to native code. + // Block, if we are in the middle of a safepoint synchronization. +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + thread->frame_anchor()->make_walkable(thread); + trans_and_fence(_thread_in_vm, _thread_in_native); + // Check for pending. async. exceptions or suspends. +diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp +index edbba98..4a27925 100644 +--- a/src/share/vm/runtime/javaCalls.cpp ++++ b/src/share/vm/runtime/javaCalls.cpp +@@ -60,7 +60,7 @@ JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, Ja + bool clear_pending_exception = true; + + guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code"); +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); + _result = result; + +diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp +index 3cbcaca..30839d7 100644 +--- a/src/share/vm/runtime/jniHandles.cpp ++++ b/src/share/vm/runtime/jniHandles.cpp +@@ -112,6 +112,10 @@ jobject JNIHandles::make_weak_global(Handle obj) { + } + + jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { ++ if (mh->newest_version() != mh()) { ++ methodHandle mh_new(Thread::current(), mh()->newest_version()); ++ return (jmethodID) make_weak_global(mh_new); ++ } + return (jmethodID) make_weak_global(mh); + } + +diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp +index 2095237..c541434 100644 +--- a/src/share/vm/runtime/mutex.cpp ++++ b/src/share/vm/runtime/mutex.cpp +@@ -1227,7 +1227,7 @@ Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1247,7 +1247,7 @@ Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1310,6 +1310,7 @@ void Monitor::set_owner_implementation(Thread *new_owner) { + // already hold Terminator_lock - may happen because of periodic safepoints + if (this->rank() != Mutex::native && + this->rank() != Mutex::suspend_resume && ++ this->rank() != Mutex::redefine_classes && + locks != NULL && locks->rank() <= this->rank() && + !SafepointSynchronize::is_at_safepoint() && + this != Interrupt_lock && +diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp +index 7d2cd82..11eb32e 100644 +--- a/src/share/vm/runtime/mutex.hpp ++++ b/src/share/vm/runtime/mutex.hpp +@@ -109,7 +109,8 @@ class Monitor : public CHeapObj<mtInternal> { + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, +- native = max_nonleaf + 1 ++ native = max_nonleaf + 1, ++ redefine_classes = native + 1 + }; + + // The WaitSet and EntryList linked lists are composed of ParkEvents. +diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp +index a6b2106..758e87f 100644 +--- a/src/share/vm/runtime/mutexLocker.cpp ++++ b/src/share/vm/runtime/mutexLocker.cpp +@@ -49,6 +49,7 @@ + // Consider using GCC's __read_mostly. + + Mutex* Patching_lock = NULL; ++Mutex* RedefineClasses_lock = NULL; + Monitor* SystemDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -90,6 +91,7 @@ Mutex* Shared_SATB_Q_lock = NULL; + Mutex* DirtyCardQ_FL_lock = NULL; + Monitor* DirtyCardQ_CBL_mon = NULL; + Mutex* Shared_DirtyCardQ_lock = NULL; ++Monitor* RedefinitionSync_lock = NULL; + Mutex* ParGCRareEvent_lock = NULL; + Mutex* EvacFailureStack_lock = NULL; + Mutex* DerivedPointerTableGC_lock = NULL; +@@ -205,6 +207,7 @@ void mutex_init() { + def(HotCardCache_lock , Mutex , special , true ); + def(EvacFailureStack_lock , Mutex , nonleaf , true ); + } ++ def(RedefinitionSync_lock , Monitor , leaf , false ); + def(ParGCRareEvent_lock , Mutex , leaf , true ); + def(DerivedPointerTableGC_lock , Mutex, leaf, true ); + def(CodeCache_lock , Mutex , special, true ); +@@ -279,6 +282,7 @@ void mutex_init() { + def(Debug2_lock , Mutex , nonleaf+4, true ); + def(Debug3_lock , Mutex , nonleaf+4, true ); + def(CompileThread_lock , Monitor, nonleaf+5, false ); ++ def(RedefineClasses_lock , Mutex , nonleaf+7, false ); // for ensuring that class redefinition is not done in parallel + + def(JfrMsg_lock , Monitor, leaf, true); + def(JfrBuffer_lock , Mutex, nonleaf+1, true); +diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp +index 40008bb..72f8ce0 100644 +--- a/src/share/vm/runtime/mutexLocker.hpp ++++ b/src/share/vm/runtime/mutexLocker.hpp +@@ -43,6 +43,8 @@ + // Mutexes used in the VM. + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code ++extern Monitor* RedefinitionSync_lock; // a lock on synchronized class redefinition ++extern Mutex* RedefineClasses_lock; // a lock on class redefinition + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp +index cd009ed..a53ad09 100644 +--- a/src/share/vm/runtime/reflection.cpp ++++ b/src/share/vm/runtime/reflection.cpp +@@ -468,7 +468,8 @@ bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +@@ -519,6 +520,12 @@ bool Reflection::verify_field_access(klassOop current_class, + AccessFlags access, + bool classloader_only, + bool protected_restriction) { ++ ++ // (tw) Decide accessibility based on active version ++ if (current_class != NULL) { ++ current_class = current_class->klass_part()->active_version(); ++ } ++ + // Verify that current_class can access a field of field_class, where that + // field's access bits are "access". We assume that we've already verified + // that current_class can access field_class. +@@ -560,7 +567,8 @@ bool Reflection::verify_field_access(klassOop current_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp +index 709d783..e0e19b1 100644 +--- a/src/share/vm/runtime/sharedRuntime.cpp ++++ b/src/share/vm/runtime/sharedRuntime.cpp +@@ -1137,7 +1137,20 @@ methodHandle SharedRuntime::resolve_helper(JavaThread *thread, + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && +- callee_method->method_holder() != SystemDictionary::Object_klass()) { ++ callee_method->method_holder()->klass_part()->newest_version() != SystemDictionary::Object_klass()->klass_part()->newest_version()) { ++ ++ // DCEVM: If we are executing an old method, this is OK! ++ { ++ ResourceMark rm(thread); ++ RegisterMap cbl_map(thread, false); ++ frame caller_frame = thread->last_frame().sender(&cbl_map); ++ ++ CodeBlob* caller_cb = caller_frame.cb(); ++ guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); ++ nmethod* caller_nm = caller_cb->as_nmethod_or_null(); ++ if (caller_nm->method()->is_old()) break; ++ } ++ + // If has a pending exception then there is no need to re-try to + // resolve this method. + // If the method has been redefined, we need to try again. +diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp +index ae28b65..7d0d809 100644 +--- a/src/share/vm/runtime/thread.cpp ++++ b/src/share/vm/runtime/thread.cpp +@@ -216,6 +216,8 @@ Thread::Thread() { + set_self_raw_id(0); + set_lgrp_id(-1); + ++ _redefine_classes_mutex = new Mutex(Mutex::redefine_classes, "redefine classes lock", false); ++ + // allocated data structures + set_osthread(NULL); + set_resource_area(new (mtThread)ResourceArea()); +@@ -249,6 +251,7 @@ Thread::Thread() { + omFreeProvision = 32 ; + omInUseList = NULL ; + omInUseCount = 0 ; ++ _pretend_new_universe = false; + + #ifdef ASSERT + _visited_for_critical_count = false; +@@ -884,6 +887,15 @@ bool Thread::owns_locks_but_compiled_lock() const { + return false; + } + ++bool Thread::owns_locks_but_redefine_classes_lock() const { ++ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) { ++ if (cur != RedefineClasses_lock && cur->rank() != Mutex::redefine_classes) { ++ return true; ++ } ++ } ++ return false; ++} ++ + + #endif + +@@ -1637,7 +1649,7 @@ void JavaThread::run() { + ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm); + + assert(JavaThread::current() == this, "sanity check"); +- assert(!Thread::current()->owns_locks(), "sanity check"); ++ assert(!Thread::current()->owns_locks_but_redefine_classes_lock(), "sanity check"); + + DTRACE_THREAD_PROBE(start, this); + +@@ -3193,7 +3205,7 @@ static void compiler_thread_entry(JavaThread* thread, TRAPS) { + + // Create a CompilerThread + CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) +-: JavaThread(&compiler_thread_entry) { ++: JavaThread(&compiler_thread_entry), _should_bailout(false) { + _env = NULL; + _log = NULL; + _task = NULL; +@@ -3201,6 +3213,7 @@ CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) + _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; ++ _compilation_mutex = new Mutex(Mutex::redefine_classes, "compilationMutex", false); + + #ifndef PRODUCT + _ideal_graph_printer = NULL; +@@ -3230,6 +3243,7 @@ int Threads::_number_of_threads = 0; + int Threads::_number_of_non_daemon_threads = 0; + int Threads::_return_code = 0; + size_t JavaThread::_stack_size_at_create = 0; ++bool Threads::_wait_at_instrumentation_entry = false; + + // All JavaThreads + #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next()) +diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp +index 774bd27..4ca4502 100644 +--- a/src/share/vm/runtime/thread.hpp ++++ b/src/share/vm/runtime/thread.hpp +@@ -203,11 +203,14 @@ class Thread: public ThreadShadow { + void enter_signal_handler() { _num_nested_signal++; } + void leave_signal_handler() { _num_nested_signal--; } + bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ Mutex* redefine_classes_mutex() { return _redefine_classes_mutex; } + + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; + ++ Mutex* _redefine_classes_mutex; ++ + // Active_handles points to a block of handles + JNIHandleBlock* _active_handles; + +@@ -530,10 +533,15 @@ public: + uintptr_t _self_raw_id; // used by get_thread (mutable) + int _lgrp_id; + ++ ++ bool _pretend_new_universe; ++ + public: + // Stack overflow support + address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + ++ void set_pretend_new_universe(bool b) { if (_pretend_new_universe != b) { if (TraceRedefineClasses >= 5) tty->print_cr("Changing pretend universe to %d", (int)b); _pretend_new_universe = b; } } ++ bool pretend_new_universe() { return _pretend_new_universe; } + void set_stack_base(address base) { _stack_base = base; } + size_t stack_size() const { return _stack_size; } + void set_stack_size(size_t size) { _stack_size = size; } +@@ -570,6 +578,7 @@ public: + void print_owned_locks() const { print_owned_locks_on(tty); } + Monitor* owned_locks() const { return _owned_locks; } + bool owns_locks() const { return owned_locks() != NULL; } ++ bool owns_locks_but_redefine_classes_lock() const; + bool owns_locks_but_compiled_lock() const; + + // Deadlock detection +@@ -1793,6 +1802,8 @@ class CompilerThread : public JavaThread { + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; ++ bool _should_bailout; ++ Mutex* _compilation_mutex; + + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + +@@ -1802,12 +1813,16 @@ class CompilerThread : public JavaThread { + + CompilerThread(CompileQueue* queue, CompilerCounters* counters); + ++ bool should_bailout() const { return _should_bailout; } ++ void set_should_bailout(bool b) { _should_bailout = false; } ++ + bool is_Compiler_thread() const { return true; } + // Hide this compiler thread from external view. + bool is_hidden_from_external_view() const { return true; } + + CompileQueue* queue() { return _queue; } + CompilerCounters* counters() { return _counters; } ++ Mutex *compilation_mutex() { return _compilation_mutex; } + + // Get/set the thread's compilation environment. + ciEnv* env() { return _env; } +@@ -1862,6 +1877,7 @@ class Threads: AllStatic { + static int _number_of_threads; + static int _number_of_non_daemon_threads; + static int _return_code; ++ static bool _wait_at_instrumentation_entry; + + public: + // Thread management +@@ -1873,6 +1889,9 @@ class Threads: AllStatic { + static JavaThread* first() { return _thread_list; } + static void threads_do(ThreadClosure* tc); + ++ static bool wait_at_instrumentation_entry() { return _wait_at_instrumentation_entry; } ++ static void set_wait_at_instrumentation_entry(bool b) { _wait_at_instrumentation_entry = b; } ++ + // Initializes the vm and creates the vm thread + static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); + static void convert_vm_init_libraries_to_agents(); +diff --git a/src/share/vm/runtime/vframe.cpp b/src/share/vm/runtime/vframe.cpp +index 09e324f..d47ffef 100644 +--- a/src/share/vm/runtime/vframe.cpp ++++ b/src/share/vm/runtime/vframe.cpp +@@ -253,6 +253,46 @@ methodOop interpretedVFrame::method() const { + return fr().interpreter_frame_method(); + } + ++// (tw) Sets interpreter frame method. ++void interpretedVFrame::set_method(methodOop new_method, int new_bci) { ++ methodOop old_method = fr().interpreter_frame_method(); ++ int old_stack_size = fr().interpreter_frame_expression_stack_size(); ++ if (old_method == new_method) return; ++ u_char *old_bcp = bcp(); ++ int old_bci = bci(); ++ fr().interpreter_frame_set_method(new_method); ++ fr().interpreter_frame_set_cache(new_method->constants()->cache()); ++ u_char *new_bcp = new_method->code_base() + new_bci; ++ assert(new_method->bcp_from(new_bci) == new_bcp, ""); ++ ++ set_bcp(new_bcp); ++ ++ Bytecodes::Code code = Bytecodes::java_code_at(old_method, old_bcp); ++ assert(Bytecodes::java_code_at(new_method, new_bcp) == code, "must have same bytecode at this position"); ++ ++ switch (code) { ++ case Bytecodes::_invokevirtual : ++ case Bytecodes::_invokespecial : ++ case Bytecodes::_invokestatic : ++ case Bytecodes::_invokeinterface: { ++ int old_index = Bytes::get_native_u2(old_bcp+1); ++ int new_index = Bytes::get_native_u2(new_bcp+1); ++ new_method->constants()->cache()->entry_at(new_index)->copy_from(old_method->constants()->cache()->entry_at(old_index)); ++ break; ++ } ++ ++ case Bytecodes::_invokedynamic: { ++ int old_index = Bytes::get_native_u4(old_bcp+1); ++ int new_index = Bytes::get_native_u4(new_bcp+1); ++ new_method->constants()->cache()->secondary_entry_at(new_index)->copy_from(old_method->constants()->cache()->secondary_entry_at(old_index)); ++ break; ++ } ++ } ++ ++ int new_stack_size = fr().interpreter_frame_expression_stack_size(); ++ assert(new_method->validate_bci_from_bcx((intptr_t)new_bcp) == new_bci, ""); ++} ++ + StackValueCollection* interpretedVFrame::locals() const { + int length = method()->max_locals(); + +diff --git a/src/share/vm/runtime/vframe.hpp b/src/share/vm/runtime/vframe.hpp +index badfea5..edbc5c7 100644 +--- a/src/share/vm/runtime/vframe.hpp ++++ b/src/share/vm/runtime/vframe.hpp +@@ -163,6 +163,7 @@ class interpretedVFrame: public javaVFrame { + StackValueCollection* locals() const; + StackValueCollection* expressions() const; + GrowableArray<MonitorInfo*>* monitors() const; ++ void set_method(methodOop method, int new_bci); + + void set_locals(StackValueCollection* values) const; + +diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp +index 7643670..036ab64 100644 +--- a/src/share/vm/runtime/vmThread.cpp ++++ b/src/share/vm/runtime/vmThread.cpp +@@ -691,6 +691,10 @@ void VMThread::execute(VM_Operation* op) { + void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + Thread::oops_do(f, cf); + _vm_queue->oops_do(f); ++ // (DCEVM) need to update oops in VM_RedefineClasses! ++ if (_cur_vm_operation != NULL) { ++ _cur_vm_operation->oops_do(f); ++ } + } + + //------------------------------------------------------------------------------------------------------------------ +diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp +index 03f254d..c9e0efc 100644 +--- a/src/share/vm/utilities/exceptions.cpp ++++ b/src/share/vm/utilities/exceptions.cpp +@@ -254,6 +254,8 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + assert(thread->is_Java_thread(), "can only be called by a Java thread"); + assert(!thread->has_pending_exception(), "already has exception"); + ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception; + + // Resolve exception klass +@@ -285,6 +287,7 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +@@ -295,6 +298,8 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + Symbol* signature, JavaCallArguments *args, + Handle h_cause, + Handle h_loader, Handle h_protection_domain) { ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception = new_exception(thread, name, signature, args, h_loader, h_protection_domain); + + // Future: object initializer should take a cause argument +@@ -317,6 +322,8 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +diff --git a/src/share/vm/utilities/growableArray.hpp b/src/share/vm/utilities/growableArray.hpp +index 2a6d6b8..4b6927f 100644 +--- a/src/share/vm/utilities/growableArray.hpp ++++ b/src/share/vm/utilities/growableArray.hpp +@@ -145,6 +145,33 @@ class GenericGrowableArray : public ResourceObj { + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } ++ ++}; ++ ++template<class E, class F> class Pair : public StackObj ++{ ++private: ++ E _left; ++ F _right; ++ ++public: ++ ++ Pair() { ++ ++ } ++ ++ Pair(E left, F right) { ++ this->_left = left; ++ this->_right = right; ++ } ++ ++ E left() { ++ return _left; ++ } ++ ++ F right() { ++ return _right; ++ } + }; + + template<class E> class GrowableArray : public GenericGrowableArray { diff --git a/hotspot/.hg/patches/full-jdk7u51-b13.patch b/hotspot/.hg/patches/full-jdk7u51-b13.patch new file mode 100644 index 00000000..e9abeb8d --- /dev/null +++ b/hotspot/.hg/patches/full-jdk7u51-b13.patch @@ -0,0 +1,12233 @@ +diff -r 6c6a2299029a make/bsd/makefiles/gcc.make +--- a/make/bsd/makefiles/gcc.make Sat Dec 14 11:51:15 2013 -0800 ++++ b/make/bsd/makefiles/gcc.make Mon Apr 28 13:12:30 2014 -0700 +@@ -116,7 +116,10 @@ + CFLAGS += -fno-rtti + CFLAGS += -fno-exceptions + CFLAGS += -pthread +-CFLAGS += -fcheck-new ++## well, strictly speaking we should check for clang not Darwin ++ifneq ($(OS_VENDOR), Darwin) ++ CFLAGS += -fcheck-new ++endif + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +diff -r 6c6a2299029a src/cpu/x86/vm/templateTable_x86_32.cpp +--- a/src/cpu/x86/vm/templateTable_x86_32.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -2109,6 +2109,22 @@ + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic : // fall through + case Bytecodes::_putstatic : // fall through + case Bytecodes::_getfield : // fall through +@@ -2211,6 +2227,7 @@ + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, + Register index, ++ int byte_no, + bool is_static, + bool has_tos) { + if (JvmtiExport::can_post_field_access()) { +@@ -2237,7 +2254,11 @@ + // cache: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), + rax, cache); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2258,7 +2279,7 @@ + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) pop_and_check_object(obj); +@@ -2393,7 +2414,7 @@ + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); + +@@ -2451,7 +2472,11 @@ + // rcx: jvalue object on the stack + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), + rbx, rax, rcx); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // (tw) Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2467,7 +2492,7 @@ + const Register flags = rax; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO). +@@ -2818,6 +2843,11 @@ + // rcx: cache entry pointer + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -3008,6 +3038,26 @@ + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +@@ -3093,6 +3143,31 @@ + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ //__ movl(rax, rdx); ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Get receiver klass into rdx - also a null check ++ __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); ++ __ verify_oop(rdx); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), rcx, rax, rbx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, rsi, rdi); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore rdi + __ null_check(rcx, oopDesc::klass_offset_in_bytes()); +diff -r 6c6a2299029a src/cpu/x86/vm/templateTable_x86_64.cpp +--- a/src/cpu/x86/vm/templateTable_x86_64.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -2151,6 +2151,22 @@ + // resolve first time through + address entry; + switch (bytecode()) { ++ case Bytecodes::_fast_agetfield : // fall through ++ case Bytecodes::_fast_bgetfield : // fall through ++ case Bytecodes::_fast_cgetfield : // fall through ++ case Bytecodes::_fast_dgetfield : // fall through ++ case Bytecodes::_fast_fgetfield : // fall through ++ case Bytecodes::_fast_igetfield : // fall through ++ case Bytecodes::_fast_lgetfield : // fall through ++ case Bytecodes::_fast_sgetfield : // fall through ++ case Bytecodes::_fast_aputfield : // fall through ++ case Bytecodes::_fast_bputfield : // fall through ++ case Bytecodes::_fast_cputfield : // fall through ++ case Bytecodes::_fast_dputfield : // fall through ++ case Bytecodes::_fast_fputfield : // fall through ++ case Bytecodes::_fast_iputfield : // fall through ++ case Bytecodes::_fast_lputfield : // fall through ++ case Bytecodes::_fast_sputfield : // fall through + case Bytecodes::_getstatic: + case Bytecodes::_putstatic: + case Bytecodes::_getfield: +@@ -2267,7 +2283,7 @@ + // The registers cache and index expected to be set before call. + // Correct values of the cache and index registers are preserved. + void TemplateTable::jvmti_post_field_access(Register cache, Register index, +- bool is_static, bool has_tos) { ++ int byte_no, bool is_static, bool has_tos) { + // do the JVMTI work here to avoid disturbing the register state below + // We use c_rarg registers here because we want to use the register used in + // the call to the VM +@@ -2298,7 +2314,11 @@ + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2320,7 +2340,7 @@ + const Register bc = c_rarg3; // uses same reg as obj, so don't mix them + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_access(cache, index, is_static, false); ++ jvmti_post_field_access(cache, index, byte_no, is_static, false); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + if (!is_static) { +@@ -2455,7 +2475,7 @@ + + // The registers cache and index expected to be set before call. + // The function may destroy various registers, just not the cache and index registers. +-void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static) { + transition(vtos, vtos); + + ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); +@@ -2507,7 +2527,11 @@ + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + c_rarg1, c_rarg2, c_rarg3); +- __ get_cache_and_index_at_bcp(cache, index, 1); ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); ++ + __ bind(L1); + } + } +@@ -2523,7 +2547,7 @@ + const Register bc = c_rarg3; + + resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); +- jvmti_post_field_mod(cache, index, is_static); ++ jvmti_post_field_mod(cache, index, byte_no, is_static); + load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + + // [jk] not needed currently +@@ -2837,6 +2861,11 @@ + InterpreterRuntime::post_field_access), + c_rarg1, c_rarg2); + __ pop_ptr(rax); // restore object pointer ++ ++ // DCEVM: Redefinition might have occured => reresolve the cp entry. ++ __ restore_bcp(); ++ resolve_cache_and_index(1, noreg, rax, rcx, sizeof(u2)); ++ + __ bind(L1); + } + +@@ -3073,6 +3102,26 @@ + + __ bind(notFinal); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ movl(rax, flags); ++ __ andl(rax, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Need a null check here! ++ __ null_check(recv); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ __ call_VM(method, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_method), recv, index); ++ ++ // profile this call ++ __ profile_final_call(rax); ++ __ jump_from_interpreted(method, rdx); ++ ++ __ bind(notOld); ++ + // get receiver klass + __ null_check(recv, oopDesc::klass_offset_in_bytes()); + __ load_klass(rax, recv); +@@ -3156,6 +3205,35 @@ + invokevirtual_helper(rbx, rcx, rdx); + __ bind(notMethod); + ++ // DCEVM: Check if we are calling an old method (and have to go slow path) ++ Label notOld; ++ __ andl(rdx, (1 << ConstantPoolCacheEntry::is_old_method_shift)); ++ __ jcc(Assembler::zero, notOld); ++ ++ // Call out to VM to do look up based on correct vTable version (has to iterate back over the class history of the receiver class) ++ // DCEVM: TODO: Check if we can improve performance by inlining. ++ // DCEVM: TODO: Check if this additional branch affects normal execution time. ++ // DCEVM: TODO: Check the exact semantic (with respect to destoying registers) of call_VM ++ // DCEVM: FIXME: What exactly should we store here? ++ __ push(rcx); // destroyed by Linux arguments passing conventions ++ __ movptr(r14, rcx); ++ __ call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::find_correct_interface_method), r14, rax, rbx); ++ __ pop(rcx); ++ ++ // Get receiver klass into rdx - also a null check ++ __ restore_locals(); // restore r14 ++ __ load_klass(rdx, rcx); ++ __ verify_oop(rdx); ++ ++ // DCEVM: TODO: Check if resolved method could be null. ++ ++ // profile this call ++ __ profile_virtual_call(rdx, r13, r14); ++ ++ __ jump_from_interpreted(rbx, rdx); ++ ++ __ bind(notOld); ++ + // Get receiver klass into rdx - also a null check + __ restore_locals(); // restore r14 + __ null_check(rcx, oopDesc::klass_offset_in_bytes()); +diff -r 6c6a2299029a src/os/bsd/vm/attachListener_bsd.cpp +--- a/src/os/bsd/vm/attachListener_bsd.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/os/bsd/vm/attachListener_bsd.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -460,14 +460,14 @@ + + void AttachListener::vm_start() { + char fn[UNIX_PATH_MAX]; +- struct stat64 st; ++ struct stat st; + int ret; + + int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d", + os::get_temp_directory(), os::current_process_id()); + assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow"); + +- RESTARTABLE(::stat64(fn, &st), ret); ++ RESTARTABLE(::stat(fn, &st), ret); + if (ret == 0) { + ret = ::unlink(fn); + if (ret == -1) { +diff -r 6c6a2299029a src/share/vm/c1/c1_Compilation.hpp +--- a/src/share/vm/c1/c1_Compilation.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/c1/c1_Compilation.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -242,8 +242,8 @@ + #define BAILOUT(msg) { bailout(msg); return; } + #define BAILOUT_(msg, res) { bailout(msg); return res; } + +-#define CHECK_BAILOUT() { if (bailed_out()) return; } +-#define CHECK_BAILOUT_(res) { if (bailed_out()) return res; } ++#define CHECK_BAILOUT() { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return; } ++#define CHECK_BAILOUT_(res) { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return res; } + + + class InstructionMark: public StackObj { +diff -r 6c6a2299029a src/share/vm/ci/ciEnv.cpp +--- a/src/share/vm/ci/ciEnv.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/ci/ciEnv.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1172,3 +1172,11 @@ + // If memory is low, we stop compiling methods. + record_method_not_compilable("out of memory"); + } ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciEnv::cleanup_after_redefinition() { ++ ++ if (_factory != NULL) { ++ _factory->cleanup_after_redefinition(); ++ } ++} +diff -r 6c6a2299029a src/share/vm/ci/ciEnv.hpp +--- a/src/share/vm/ci/ciEnv.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/ci/ciEnv.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -417,6 +417,8 @@ + void record_failure(const char* reason); + void record_method_not_compilable(const char* reason, bool all_tiers = true); + void record_out_of_memory_failure(); ++ ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIENV_HPP +diff -r 6c6a2299029a src/share/vm/ci/ciObjectFactory.cpp +--- a/src/share/vm/ci/ciObjectFactory.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/ci/ciObjectFactory.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -296,6 +296,11 @@ + // into the table. We need to recompute our index. + index = find(keyHandle(), _ci_objects); + } ++ ++ if (is_found_at(index, keyHandle(), _ci_objects)) { ++ // DCEVM: Check if this is an error? Can occur when redefining classes. ++ return _ci_objects->at(index); ++ } + assert(!is_found_at(index, keyHandle(), _ci_objects), "no double insert"); + insert(index, new_object, _ci_objects); + return new_object; +@@ -764,3 +769,50 @@ + _unloaded_instances->length(), + _unloaded_klasses->length()); + } ++ ++// DCEVM: Resoring the ciObject arrays after class redefinition ++void ciObjectFactory::sort_ci_objects(GrowableArray<ciObject*>* objects) { ++ ++ // Resort the _ci_objects array. The order of two class pointers can be changed during class redefinition. ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ if (last >= o) { ++ int cur_last_index = j - 1; ++ oop cur_last = last; ++ while (cur_last >= o) { ++ ++ // Swap the two objects to guarantee ordering ++ ciObject *tmp = objects->at(cur_last_index); ++ objects->at_put(cur_last_index, objects->at(cur_last_index + 1)); ++ objects->at_put(cur_last_index + 1, tmp); ++ ++ // Decrement index to move one step to the left ++ cur_last_index--; ++ if (cur_last_index < 0) { ++ break; ++ } ++ cur_last = objects->at(cur_last_index)->get_oop(); ++ } ++ } else { ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++ ++#ifdef ASSERT ++ if (CIObjectFactoryVerify) { ++ oop last = NULL; ++ for (int j = 0; j< objects->length(); j++) { ++ oop o = objects->at(j)->get_oop(); ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++#endif // ASSERT ++} ++ ++// DCEVM: Called after class redefinition to clean up possibly invalidated state. ++void ciObjectFactory::cleanup_after_redefinition() { ++ sort_ci_objects(_ci_objects); ++} +diff -r 6c6a2299029a src/share/vm/ci/ciObjectFactory.hpp +--- a/src/share/vm/ci/ciObjectFactory.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/ci/ciObjectFactory.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -38,6 +38,7 @@ + class ciObjectFactory : public ResourceObj { + friend class VMStructs; + friend class ciEnv; ++ friend class CompileBroker; + + private: + static volatile bool _initialized; +@@ -137,6 +138,11 @@ + + void print_contents(); + void print(); ++ ++private: ++ ++ static void sort_ci_objects(GrowableArray<ciObject*>* objects); ++ void cleanup_after_redefinition(); + }; + + #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP +diff -r 6c6a2299029a src/share/vm/classfile/classFileParser.cpp +--- a/src/share/vm/classfile/classFileParser.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/classFileParser.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -795,6 +795,7 @@ + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS) { + ClassFileStream* cfs = stream(); + assert(length > 0, "only called for length>0"); +@@ -813,6 +814,9 @@ + interface_index, CHECK_(nullHandle)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); ++ if (!old_klass.is_null() && !interf->is_newest_version()) { ++ interf = KlassHandle(THREAD, interf->newest_version()); ++ } + } else { + Symbol* unresolved_klass = cp->klass_name_at(interface_index); + +@@ -825,6 +829,9 @@ + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + interf = KlassHandle(THREAD, k); + } + +@@ -1912,6 +1919,8 @@ + int runtime_invisible_parameter_annotations_length = 0; + u1* annotation_default = NULL; + int annotation_default_length = 0; ++ u2 code_section_table_length; ++ typeArrayHandle code_section_table; + + // Parse code and exceptions attribute + u2 method_attributes_count = cfs->get_u2_fast(); +@@ -2081,6 +2090,24 @@ + parse_stackmap_table(code_attribute_length, CHECK_(nullHandle)); + stackmap_data = typeArrayHandle(THREAD, sm); + parsed_stackmap_attribute = true; ++ } else if (UseMethodForwardPoints && cp->symbol_at(code_attribute_name_index) == vmSymbols::tag_code_sections()) { ++ int length = code_attribute_length; ++ int value_count = length / sizeof(u2); ++ int line_count = length / 3; ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Found code section attribute when loading class with %d entries", value_count, line_count); ++ } ++ code_section_table_length = value_count; ++ code_section_table = oopFactory::new_permanent_shortArray(value_count, CHECK_NULL); ++ code_section_table->set_length(value_count); ++ ++ for (int i = 0; i < value_count; ++i) { ++ u2 value = cfs->get_u2(CHECK_(nullHandle)); ++ code_section_table->short_at_put(i, value); ++ if (TraceRedefineClasses >= 4) { ++ tty->print_cr("Code section table at %d: %d", i, value); ++ } ++ } + } else { + // Skip unknown attributes + cfs->skip_u1(code_attribute_length, CHECK_(nullHandle)); +@@ -2206,6 +2233,18 @@ + } + #endif + ++ // DCEVM: TODO: Get a different solution for the problem of method forward ++ // points and variable sized interpreter frames. ++ if (UseMethodForwardPoints) { ++ if (max_stack > MethodForwardPointsMaxStack) { ++ fatal(err_msg("Method has too large stack (%d), increase the value of MethodForwardPointsMaxStack (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ if (max_locals > MethodForwardPointsMaxLocals) { ++ fatal(err_msg("Method has too many locals (%d), increase the value of MethodForwardPointsMaxLocals (%d)", max_stack, MethodForwardPointsMaxStack)); ++ } ++ max_stack = MethodForwardPointsMaxStack; ++ max_locals = MethodForwardPointsMaxLocals; ++ } + // Fill in code attribute information + m->set_max_stack(max_stack); + m->set_max_locals(max_locals); +@@ -2219,6 +2258,8 @@ + */ + m->constMethod()->set_stackmap_data(stackmap_data()); + ++ m->constMethod()->set_code_section_table(code_section_table()); ++ + // Copy byte codes + m->set_code(code_start); + +@@ -2792,6 +2833,15 @@ + "Invalid Deprecated classfile attribute length %u in class file %s", + attribute_length, CHECK); + } ++ } else if (tag == vmSymbols::tag_field_redefinition_policy()) { ++ // DCEVM: Check for deleted field attribute ++ _field_redefinition_policy = cfs->get_u1_fast(); ++ } else if (tag == vmSymbols::tag_static_field_redefinition_policy()) { ++ // DCEVM: Check for deleted static field attribute ++ _static_field_redefinition_policy = cfs->get_u1_fast(); ++ } else if (tag == vmSymbols::tag_method_redefinition_policy()) { ++ // DCEVM: Check for deleted method attribute ++ _method_redefinition_policy = cfs->get_u1_fast(); + } else if (_major_version >= JAVA_1_5_VERSION) { + if (tag == vmSymbols::tag_signature()) { + if (attribute_length != 2) { +@@ -2895,6 +2945,17 @@ + } + k->set_inner_classes(_inner_classes()); + k->set_class_annotations(_annotations()); ++ ++ ++ if (_field_redefinition_policy != 0xff) { ++ k->set_field_redefinition_policy(_field_redefinition_policy); ++ } ++ if (_static_field_redefinition_policy != 0xff) { ++ k->set_static_field_redefinition_policy(_static_field_redefinition_policy); ++ } ++ if (_method_redefinition_policy != 0xff) { ++ k->set_method_redefinition_policy(_method_redefinition_policy); ++ } + } + + typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annotations, +@@ -2918,9 +2979,126 @@ + } + + ++// DCEVM: Finds the super symbols by reading the bytes of the class and returns ++// them in a growable array. ++void ClassFileParser::findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS) { ++ ++ _cp_patches = NULL; ++ // So that JVMTI can cache class file in the state before retransformable agents ++ // have modified it ++ unsigned char *cached_class_file_bytes = NULL; ++ ++ ClassFileStream* cfs = stream(); ++ ++ _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false; ++ ++ instanceKlassHandle nullHandle; ++ ++ // Save the class file name for easier error message printing. ++ _class_name = name != NULL ? name : vmSymbols::unknown_class_name(); ++ ++ cfs->guarantee_more(8, CHECK); // magic, major, minor ++ // Magic value ++ u4 magic = cfs->get_u4_fast(); ++ if (magic != JAVA_CLASSFILE_MAGIC) { ++ // Invalid class file! ++ return; ++ } ++ ++ // Version numbers ++ u2 minor_version = cfs->get_u2_fast(); ++ u2 major_version = cfs->get_u2_fast(); ++ ++ // Check version numbers - we check this even with verifier off ++ if (!is_supported_version(major_version, minor_version)) { ++ ++ // Unsupported version! ++ return; ++ } ++ ++ _major_version = major_version; ++ _minor_version = minor_version; ++ ++ ++ // Check if verification needs to be relaxed for this class file ++ // Do not restrict it to jdk1.0 or jdk1.1 to maintain backward compatibility (4982376) ++ _relax_verify = Verifier::relax_verify_for(class_loader()); ++ _need_verify = false; ++ ++ // Constant pool ++ constantPoolHandle cp = parse_constant_pool(class_loader(), CHECK); ++ int cp_size = cp->length(); ++ ++ cfs->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len ++ ++ // Access flags ++ AccessFlags access_flags; ++ jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; ++ ++ if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { ++ // Set abstract bit for old class files for backward compatibility ++ flags |= JVM_ACC_ABSTRACT; ++ } ++ access_flags.set_flags(flags); ++ ++ // This class and superclass ++ instanceKlassHandle super_klass; ++ u2 this_class_index = cfs->get_u2_fast(); ++ check_property( ++ valid_cp_range(this_class_index, cp_size) && ++ cp->tag_at(this_class_index).is_unresolved_klass(), ++ "Invalid this class index %u in constant pool in class file %s", ++ this_class_index, CHECK); ++ ++ Symbol* class_name = cp->unresolved_klass_at(this_class_index); ++ assert(class_name != NULL, "class_name can't be null"); ++ ++ // Update _class_name which could be null previously to be class_name ++ _class_name = class_name; ++ ++ // DCEVM: DO NOT release all handles when parsing is done ++ {// HandleMark hm(THREAD); ++ ++ // Checks if name in class file matches requested name ++ if (name != NULL && class_name != name) { ++ return; ++ } ++ ++ u2 super_class_index = cfs->get_u2_fast(); ++ ++ if (super_class_index != 0) { ++ Symbol* super_class = cp->klass_name_at(super_class_index); ++ handles.append(super_class); ++ } else { ++ // DCEVM: This redefinition must be for the Object class. ++ } ++ ++ // Interfaces ++ u2 itfs_len = cfs->get_u2_fast(); ++ objArrayHandle local_interfaces; ++ if (itfs_len == 0) { ++ local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); ++ } else { ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK); ++ } ++ ++ for (int i=0; i<local_interfaces->length(); i++) { ++ oop o = local_interfaces->obj_at(i); ++ Symbol* interface_handle = ((klassOop)o)->klass_part()->name(); ++ handles.append(interface_handle); ++ } ++ } ++} ++ + instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, +@@ -2971,10 +3149,13 @@ + unsigned char* ptr = cfs->buffer(); + unsigned char* end_ptr = cfs->buffer() + cfs->length(); + ++ bool pretend_new_universe = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, + &ptr, &end_ptr, + &cached_class_file_bytes, + &cached_class_file_length); ++ Thread::current()->set_pretend_new_universe(pretend_new_universe); + + if (ptr != cfs->buffer()) { + // JVMTI agent has modified class file data. +@@ -3130,7 +3311,11 @@ + // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { +- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); ++ klassOop resolved_klass = cp->resolved_klass_at(super_class_index); ++ if (!old_klass.is_null()) { ++ resolved_klass = resolved_klass->klass_part()->newest_version(); ++ } ++ super_klass = instanceKlassHandle(THREAD, resolved_klass); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { +@@ -3148,7 +3333,7 @@ + if (itfs_len == 0) { + local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); + } else { +- local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle)); ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_(nullHandle)); + } + + u2 java_fields_count = 0; +@@ -3202,7 +3387,9 @@ + protection_domain, + true, + CHECK_(nullHandle)); +- ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + KlassHandle kh (THREAD, k); + super_klass = instanceKlassHandle(THREAD, kh()); + } +@@ -3591,6 +3778,19 @@ + rt = REF_NONE; + } else { + rt = super_klass->reference_type(); ++ ++ // DCEVM: With class redefinition, it can also happen that special classes are loaded. ++ if (name == vmSymbols::java_lang_ref_Reference()) { ++ rt = REF_OTHER; ++ } else if (name == vmSymbols::java_lang_ref_SoftReference()) { ++ rt = REF_SOFT; ++ } else if (name == vmSymbols::java_lang_ref_WeakReference()) { ++ rt = REF_WEAK; ++ } else if (name == vmSymbols::java_lang_ref_FinalReference()) { ++ rt = REF_FINAL; ++ } else if (name == vmSymbols::java_lang_ref_PhantomReference()) { ++ rt = REF_PHANTOM; ++ } + } + + // We can now create the basic klassOop for this klass +@@ -3691,7 +3891,7 @@ + fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts); + + // Fill in has_finalizer, has_vanilla_constructor, and layout_helper +- set_precomputed_flags(this_klass); ++ set_precomputed_flags(this_klass, old_klass); + + // reinitialize modifiers, using the InnerClasses attribute + int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle)); +@@ -3714,6 +3914,10 @@ + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + ++ if (rt == REF_OTHER) { ++ instanceRefKlass::update_nonstatic_oop_maps(ik); ++ } ++ + ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), + false /* not shared class */); + +@@ -3856,7 +4060,7 @@ + } + + +-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { ++void ClassFileParser::set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass) { + klassOop super = k->super(); + + // Check if this klass has an empty finalize method (i.e. one with return bytecode only), +@@ -3864,7 +4068,9 @@ + if (!_has_empty_finalizer) { + if (_has_finalizer || + (super != NULL && super->klass_part()->has_finalizer())) { +- k->set_has_finalizer(); ++ if (old_klass.is_null() || old_klass->has_finalizer()) { ++ k->set_has_finalizer(); ++ } + } + } + +@@ -3880,7 +4086,7 @@ + + // Check if this klass supports the java.lang.Cloneable interface + if (SystemDictionary::Cloneable_klass_loaded()) { +- if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) { ++ if (k->is_subtype_of(SystemDictionary::Cloneable_klass()) || k->is_subtype_of(SystemDictionary::Cloneable_klass()->klass_part()->newest_version())) { + k->set_is_cloneable(); + } + } +diff -r 6c6a2299029a src/share/vm/classfile/classFileParser.hpp +--- a/src/share/vm/classfile/classFileParser.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/classFileParser.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -64,6 +64,9 @@ + int _sde_length; + typeArrayHandle _inner_classes; + typeArrayHandle _annotations; ++ u1 _field_redefinition_policy; ++ u1 _static_field_redefinition_policy; ++ u1 _method_redefinition_policy; + + void set_class_synthetic_flag(bool x) { _synthetic_flag = x; } + void set_class_sourcefile(Symbol* x) { _sourcefile = x; } +@@ -151,6 +154,7 @@ + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS); + + // Field parsing +@@ -237,7 +241,7 @@ + unsigned int nonstatic_oop_map_count, + int* nonstatic_oop_offsets, + unsigned int* nonstatic_oop_counts); +- void set_precomputed_flags(instanceKlassHandle k); ++ void set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass); + objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, + objArrayHandle local_ifs, TRAPS); + +@@ -337,7 +341,12 @@ + + public: + // Constructor +- ClassFileParser(ClassFileStream* st) { set_stream(st); } ++ ClassFileParser(ClassFileStream* st) { ++ set_stream(st); ++ _field_redefinition_policy = 0xff; ++ _static_field_redefinition_policy = 0xff; ++ _method_redefinition_policy = 0xff; ++ } + + // Parse .class file and return new klassOop. The klassOop is not hooked up + // to the system dictionary or any other structures, so a .class file can +@@ -349,21 +358,33 @@ + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { + KlassHandle no_host_klass; +- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD); ++ return parseClassFile(name, class_loader, protection_domain, old_klass, no_host_klass, NULL, parsed_name, verify, THREAD); + } + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, + TempNewSymbol& parsed_name, + bool verify, + TRAPS); + ++ static void initialize_static_field(fieldDescriptor* fd, TRAPS); ++ ++ // DCEVM: Creates symbol handles for the super class and the interfaces ++ void findSuperSymbols(Symbol* name, ++ Handle class_loader, ++ Handle protection_domain, ++ KlassHandle old_klass, ++ GrowableArray<Symbol*> &handles, ++ TRAPS); ++ + // Verifier checks + static void check_super_class_access(instanceKlassHandle this_klass, TRAPS); + static void check_super_interface_access(instanceKlassHandle this_klass, TRAPS); +diff -r 6c6a2299029a src/share/vm/classfile/classLoader.cpp +--- a/src/share/vm/classfile/classLoader.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/classLoader.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -915,6 +915,7 @@ + instanceKlassHandle result = parser.parseClassFile(h_name, + class_loader, + protection_domain, ++ KlassHandle(), + parsed_name, + false, + CHECK_(h)); +diff -r 6c6a2299029a src/share/vm/classfile/dictionary.cpp +--- a/src/share/vm/classfile/dictionary.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/dictionary.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -326,6 +326,21 @@ + } + } + ++ ++// DCEVM: Just the classes from defining class loaders ++void Dictionary::classes_do(ObjectClosure *closure) { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* probe = bucket(index); ++ probe != NULL; ++ probe = probe->next()) { ++ klassOop k = probe->klass(); ++ if (probe->loader() == instanceKlass::cast(k)->class_loader()) { ++ closure->do_object(k); ++ } ++ } ++ } ++} ++ + // Added for initialize_itable_for_klass to handle exceptions + // Just the classes from defining class loaders + void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) { +@@ -433,6 +448,33 @@ + add_entry(index, entry); + } + ++// DCEVM: Updates the klass entry to point to the new klassOop. Necessary only for class redefinition. ++bool Dictionary::update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class) { ++ ++ // There are several entries for the same class in the dictionary: One extra entry for each parent classloader of the classloader of the class. ++ bool found = false; ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass() == old_class()) { ++ entry->set_literal(k()); ++ found = true; ++ } ++ } ++ } ++ ++ return found; ++} ++ ++// DCEVM: Undo previous updates to the system dictionary ++void Dictionary::rollback_redefinition() { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass()->klass_part()->is_redefining()) { ++ entry->set_literal(entry->klass()->klass_part()->old_version()); ++ } ++ } ++ } ++} + + // This routine does not lock the system dictionary. + // +@@ -459,12 +501,22 @@ + return NULL; + } + ++// DCEVM: return old version if we are not in the new universe? ++klassOop Dictionary::intercept_for_version(klassOop k) { ++ if (k == NULL) return k; ++ ++ if (k->klass_part()->is_redefining() && !Thread::current()->pretend_new_universe()) { ++ return k->klass_part()->old_version(); ++ } ++ ++ return k; ++} + + klassOop Dictionary::find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS) { + DictionaryEntry* entry = get_entry(index, hash, name, loader); + if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { +- return entry->klass(); ++ return intercept_for_version(entry->klass()); + } else { + return NULL; + } +@@ -477,7 +529,7 @@ + assert (index == index_for(name, loader), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, loader); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +@@ -489,7 +541,7 @@ + assert (index == index_for(name, Handle()), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, Handle()); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +diff -r 6c6a2299029a src/share/vm/classfile/dictionary.hpp +--- a/src/share/vm/classfile/dictionary.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/dictionary.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -73,6 +73,10 @@ + + void add_klass(Symbol* class_name, Handle class_loader,KlassHandle obj); + ++ bool update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class); ++ ++ void rollback_redefinition(); ++ + klassOop find_class(int index, unsigned int hash, + Symbol* name, Handle loader); + +@@ -89,6 +93,7 @@ + void classes_do(void f(klassOop, TRAPS), TRAPS); + void classes_do(void f(klassOop, oop)); + void classes_do(void f(klassOop, oop, TRAPS), TRAPS); ++ void classes_do(ObjectClosure *closure); + + void methods_do(void f(methodOop)); + +@@ -105,6 +110,7 @@ + bool do_unloading(BoolObjectClosure* is_alive); + + // Protection domains ++ static klassOop intercept_for_version(klassOop k); + klassOop find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS); + bool is_valid_protection_domain(int index, unsigned int hash, +diff -r 6c6a2299029a src/share/vm/classfile/javaClasses.cpp +--- a/src/share/vm/classfile/javaClasses.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/javaClasses.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1798,7 +1798,7 @@ + klassOop klass = SystemDictionary::reflect_Method_klass(); + // This class is eagerly initialized during VM initialization, since we keep a refence + // to one of the methods +- assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized"); ++ assert(instanceKlass::cast(klass)->is_initialized() || klass->klass_part()->old_version() != NULL, "must be initialized"); + return instanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH); + } + +diff -r 6c6a2299029a src/share/vm/classfile/javaClasses.hpp +--- a/src/share/vm/classfile/javaClasses.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/javaClasses.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -213,7 +213,6 @@ + + class java_lang_Class : AllStatic { + friend class VMStructs; +- + private: + // The fake offsets are added by the class loader when java.lang.Class is loaded + +diff -r 6c6a2299029a src/share/vm/classfile/loaderConstraints.cpp +--- a/src/share/vm/classfile/loaderConstraints.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/loaderConstraints.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -449,7 +449,7 @@ + if (k != NULL) { + // We found the class in the system dictionary, so we should + // make sure that the klassOop matches what we already have. +- guarantee(k == probe->klass(), "klass should be in dictionary"); ++ guarantee(k == probe->klass()->klass_part()->newest_version(), "klass should be in dictionary"); + } else { + // If we don't find the class in the system dictionary, it + // has to be in the placeholders table. +diff -r 6c6a2299029a src/share/vm/classfile/loaderConstraints.hpp +--- a/src/share/vm/classfile/loaderConstraints.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/loaderConstraints.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -106,7 +106,7 @@ + + klassOop klass() { return literal(); } + klassOop* klass_addr() { return literal_addr(); } +- void set_klass(klassOop k) { set_literal(k); } ++ void set_klass(klassOop k) { set_literal(k); assert(k == NULL || !k->klass_part()->is_redefining(), "just checking"); } + + LoaderConstraintEntry* next() { + return (LoaderConstraintEntry*)HashtableEntry<klassOop, mtClass>::next(); +diff -r 6c6a2299029a src/share/vm/classfile/systemDictionary.cpp +--- a/src/share/vm/classfile/systemDictionary.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/systemDictionary.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -157,6 +157,7 @@ + // can return a null klass + klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); + } ++ assert(klass == NULL || klass->klass_part()->is_newest_version() || klass->klass_part()->newest_version()->klass_part()->is_redefining(), "must be"); + return klass; + } + +@@ -199,7 +200,8 @@ + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), ++ // DCEVM: Check if this relaxing of the condition is correct? Test case hs203t004 failing otherwise. ++ assert(!THREAD->is_Compiler_thread() || JvmtiThreadState::state_for(JavaThread::current())->get_class_being_redefined() != NULL, + err_msg("can not load classes with compiler thread: class=%s, classloader=%s", + class_name->as_C_string(), + class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); +@@ -961,6 +963,7 @@ + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ KlassHandle(), + host_klass, + cp_patches, + parsed_name, +@@ -1022,7 +1025,14 @@ + Handle protection_domain, + ClassFileStream* st, + bool verify, ++ KlassHandle old_class, + TRAPS) { ++ bool redefine_classes_locked = false; ++ if (!Thread::current()->redefine_classes_mutex()->owned_by_self()) { ++ Thread::current()->redefine_classes_mutex()->lock(); ++ redefine_classes_locked = true; ++ } ++ + // Classloaders that support parallelism, e.g. bootstrap classloader, + // or all classloaders with UnsyncloadClass do not acquire lock here + bool DoObjectLock = true; +@@ -1050,9 +1060,14 @@ + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ old_class, + parsed_name, + verify, + THREAD); ++ if (!old_class.is_null() && !k.is_null()) { ++ k->set_redefining(true); ++ k->set_old_version(old_class()); ++ } + + const char* pkg = "java/"; + if (!HAS_PENDING_EXCEPTION && +@@ -1087,13 +1102,18 @@ + // Add class just loaded + // If a class loader supports parallel classloading handle parallel define requests + // find_or_define_instance_class may return a different instanceKlass +- if (is_parallelCapable(class_loader)) { ++ // (tw) TODO: for class redefinition the parallel version does not work, check if this is a problem? ++ if (is_parallelCapable(class_loader) && old_class.is_null()) { + k = find_or_define_instance_class(class_name, class_loader, k, THREAD); + } else { +- define_instance_class(k, THREAD); ++ define_instance_class(k, old_class, THREAD); + } + } + ++ if (redefine_classes_locked) { ++ Thread::current()->redefine_classes_mutex()->unlock(); ++ } ++ + // If parsing the class file or define_instance_class failed, we + // need to remove the placeholder added on our behalf. But we + // must make sure parsed_name is valid first (it won't be if we had +@@ -1122,7 +1142,7 @@ + MutexLocker mu(SystemDictionary_lock, THREAD); + + klassOop check = find_class(parsed_name, class_loader); +- assert(check == k(), "should be present in the dictionary"); ++ assert((check == k() && !k->is_redefining()) || (k->is_redefining() && check == k->old_version()), "should be present in the dictionary"); + + klassOop check2 = find_class(h_name, h_loader); + assert(check == check2, "name inconsistancy in SystemDictionary"); +@@ -1349,7 +1369,11 @@ + } + } + +-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ++void SystemDictionary::rollback_redefinition() { ++ dictionary()->rollback_redefinition(); ++} ++ ++void SystemDictionary::define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS) { + + Handle class_loader_h(THREAD, k->class_loader()); + +@@ -1376,13 +1400,23 @@ + Symbol* name_h = k->name(); + unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h); + int d_index = dictionary()->hash_to_index(d_hash); +- check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK); ++ ++ // DCEVM: Update version of the klassOop in the system dictionary ++ // TODO: Check for thread safety! ++ if (!old_class.is_null()) { ++ bool ok = dictionary()->update_klass(d_index, d_hash, name_h, class_loader_h, k, old_class); ++ assert (ok, "must have found old class and updated!"); ++ } ++ check_constraints(d_index, d_hash, k, class_loader_h, old_class.is_null(), CHECK); ++ ++ if(!old_class.is_null() && TraceRedefineClasses >= 3){ tty->print_cr("Class has been updated!"); } + + // Register class just loaded with class loader (placed in Vector) + // Note we do this before updating the dictionary, as this can + // fail with an OutOfMemoryError (if it does, we will *not* put this + // class in the dictionary and will not update the class hierarchy). +- if (k->class_loader() != NULL) { ++ // (tw) Only register if not redefining a class. ++ if (k->class_loader() != NULL && old_class.is_null()) { + methodHandle m(THREAD, Universe::loader_addClass_method()); + JavaValue result(T_VOID); + JavaCallArguments args(class_loader_h); +@@ -1408,8 +1442,9 @@ + } + k->eager_initialize(THREAD); + ++ // (tw) Only notify jvmti if not redefining a class. + // notify jvmti +- if (JvmtiExport::should_post_class_load()) { ++ if (JvmtiExport::should_post_class_load() && old_class.is_null()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + +@@ -1482,7 +1517,7 @@ + } + } + +- define_instance_class(k, THREAD); ++ define_instance_class(k, KlassHandle(), THREAD); + + Handle linkage_exception = Handle(); // null handle + +@@ -1612,6 +1647,14 @@ + Universe::flush_dependents_on(k); + } + ++// (tw) Remove from hierarchy - Undo add_to_hierarchy. ++void SystemDictionary::remove_from_hierarchy(instanceKlassHandle k) { ++ assert(k.not_null(), "just checking"); ++ ++ k->remove_from_sibling_list(); ++ ++ // TODO: Remove from interfaces. ++} + + // ---------------------------------------------------------------------------- + // GC support +@@ -1701,7 +1744,8 @@ + } + + +-void SystemDictionary::preloaded_oops_do(OopClosure* f) { ++// (tw) Iterate over all pre-loaded classes in the dictionary. ++void SystemDictionary::preloaded_classes_do(OopClosure *f) { + for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { + f->do_oop((oop*) &_well_known_klasses[k]); + } +@@ -1715,6 +1759,23 @@ + } + } + ++ // TODO: Check if we need to call FilterFieldsMap ++} ++ ++void SystemDictionary::preloaded_oops_do(OopClosure* f) { ++ for (int k = (int)FIRST_WKID; k < (int)WKID_LIMIT; k++) { ++ f->do_oop((oop*) &_well_known_klasses[k]); ++ } ++ ++ { ++ for (int i = 0; i < T_VOID+1; i++) { ++ if (_box_klasses[i] != NULL) { ++ assert(i >= T_BOOLEAN, "checking"); ++ f->do_oop((oop*) &_box_klasses[i]); ++ } ++ } ++ } ++ + // The basic type mirrors would have already been processed in + // Universe::oops_do(), via a call to shared_oops_do(), so should + // not be processed again. +@@ -1733,6 +1794,11 @@ + dictionary()->classes_do(f); + } + ++// (tw) Iterate over all classes in the dictionary. ++void SystemDictionary::classes_do(ObjectClosure *closure) { ++ dictionary()->classes_do(closure); ++} ++ + // Added for initialize_itable_for_klass + // Just the classes from defining class loaders + // Don't iterate over placeholders +@@ -1869,7 +1935,9 @@ + + // Preload ref klasses and set reference types + instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER); +- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); ++ ++ // (tw) This is now done in parseClassFile in order to support class redefinition ++ // instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK); + instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT); +@@ -1955,7 +2023,11 @@ + // also holds array classes + + assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary"); +- if ((defining == true) || (k() != check)) { ++ if ((defining == true) && ((k() != check) && k->old_version() != check)) { ++ ResourceMark rm(Thread::current()); ++ tty->print_cr("(%d / %d) (%s/%s)", k->revision_number(), check->klass_part()->revision_number(), k->name()->as_C_string(), check->klass_part()->name()->as_C_string()); ++ k()->print(); ++ check->print(); + linkage_error = "loader (instance of %s): attempted duplicate class " + "definition for name: \"%s\""; + } else { +diff -r 6c6a2299029a src/share/vm/classfile/systemDictionary.hpp +--- a/src/share/vm/classfile/systemDictionary.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/systemDictionary.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -268,7 +268,7 @@ + // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) + static klassOop resolve_from_stream(Symbol* class_name, Handle class_loader, + Handle protection_domain, +- ClassFileStream* st, bool verify, TRAPS); ++ ClassFileStream* st, bool verify, KlassHandle old_class, TRAPS); + + // Lookup an already loaded class. If not found NULL is returned. + static klassOop find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); +@@ -309,6 +309,8 @@ + // Iterate over all klasses in dictionary + // Just the classes from defining class loaders + static void classes_do(void f(klassOop)); ++ static void classes_do(ObjectClosure *closure); ++ static void preloaded_classes_do(OopClosure *closure); + // Added for initialize_itable_for_klass to handle exceptions + static void classes_do(void f(klassOop, TRAPS), TRAPS); + // All classes, and their class loaders +@@ -415,6 +417,8 @@ + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); + } + ++ static void rollback_redefinition(); ++ + public: + #define WK_KLASS_DECLARE(name, symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } +@@ -596,7 +600,7 @@ + // after waiting, but before reentering SystemDictionary_lock + // to preserve lock order semantics. + static void double_lock_wait(Handle lockObject, TRAPS); +- static void define_instance_class(instanceKlassHandle k, TRAPS); ++ static void define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS); + static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, + Handle class_loader, + instanceKlassHandle k, TRAPS); +@@ -615,12 +619,17 @@ + // Setup link to hierarchy + static void add_to_hierarchy(instanceKlassHandle k, TRAPS); + ++public: ++ ++ // Remove link to hierarchy ++ static void remove_from_hierarchy(instanceKlassHandle k); ++ ++private: + // event based tracing + static void post_class_load_event(TracingTime start_time, instanceKlassHandle k, + Handle initiating_loader); + static void post_class_unload_events(BoolObjectClosure* is_alive); + +-private: + // We pass in the hashtable index so we can calculate it outside of + // the SystemDictionary_lock. + +diff -r 6c6a2299029a src/share/vm/classfile/verifier.cpp +--- a/src/share/vm/classfile/verifier.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/verifier.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -106,7 +106,7 @@ + return !need_verify; + } + +-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) { ++bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS) { + HandleMark hm; + ResourceMark rm(THREAD); + +@@ -117,7 +117,8 @@ + + const char* klassName = klass->external_name(); + bool can_failover = FailOverToOldVerifier && +- klass->major_version() < NOFAILOVER_MAJOR_VERSION; ++ klass->major_version() < NOFAILOVER_MAJOR_VERSION && ++ may_use_old_verifier; + + // If the class should be verified, first see if we can use the split + // verifier. If not, or if verification fails and FailOverToOldVerifier +@@ -138,6 +139,7 @@ + tty->print_cr( + "Fail over class verification to old verifier for: %s", klassName); + } ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -145,6 +147,7 @@ + exception_message = split_verifier.exception_message(); + } + } else { ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -159,6 +162,9 @@ + } + tty->print_cr("End class verification for: %s", klassName); + } ++ } else if (TraceClassInitialization) { ++ // (tw) Output not verified classes ++ tty->print_cr("Class %s was not verified", klassName); + } + + if (HAS_PENDING_EXCEPTION) { +@@ -210,7 +216,7 @@ + // NOTE: this is called too early in the bootstrapping process to be + // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. + (refl_magic_klass == NULL || +- !klass->is_subtype_of(refl_magic_klass) || ++ !(klass->is_subtype_of(refl_magic_klass) || klass->is_subtype_of(refl_magic_klass->klass_part()->newest_version())) || + VerifyReflectionBytecodes) + ); + } +@@ -517,7 +523,7 @@ + + ClassVerifier::ClassVerifier( + instanceKlassHandle klass, TRAPS) +- : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass) { ++ : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass->newest_version()), _klass_to_verify(klass) { + _this_type = VerificationType::reference_type(klass->name()); + // Create list to hold symbols in reference area. + _symbols = new GrowableArray<Symbol*>(100, 0, NULL); +@@ -547,7 +553,7 @@ + _klass->external_name()); + } + +- objArrayHandle methods(THREAD, _klass->methods()); ++ objArrayHandle methods(THREAD, _klass_to_verify->methods()); + int num_methods = methods->length(); + + for (int index = 0; index < num_methods; index++) { +@@ -2444,7 +2450,10 @@ + VerificationType stack_object_type = + current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); + if (current_type() != stack_object_type) { +- assert(cp->cache() == NULL, "not rewritten yet"); ++ ++ // (tw) TODO: Check if relaxing the following assertion is correct. For class redefinition we might call the verifier twice. ++ //assert(cp->cache() == NULL, "not rewritten yet"); ++ + Symbol* ref_class_name = + cp->klass_name_at(cp->klass_ref_index_at(index)); + // See the comments in verify_field_instructions() for +diff -r 6c6a2299029a src/share/vm/classfile/verifier.hpp +--- a/src/share/vm/classfile/verifier.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/verifier.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -47,7 +47,7 @@ + * Otherwise, no exception is thrown and the return indicates the + * error. + */ +- static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS); ++ static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS); + + // Return false if the class is loaded by the bootstrap loader, + // or if defineClass was called requesting skipping verification +@@ -256,7 +256,10 @@ + + ErrorContext _error_context; // contains information about an error + ++public: + void verify_method(methodHandle method, TRAPS); ++ ++private: + char* generate_code_data(methodHandle m, u4 code_length, TRAPS); + void verify_exception_handler_table(u4 code_length, char* code_data, + int& min, int& max, TRAPS); +@@ -329,6 +332,7 @@ + + VerificationType object_type() const; + ++ instanceKlassHandle _klass_to_verify; + instanceKlassHandle _klass; // the class being verified + methodHandle _method; // current method being verified + VerificationType _this_type; // the verification type of the current class +diff -r 6c6a2299029a src/share/vm/classfile/vmSymbols.hpp +--- a/src/share/vm/classfile/vmSymbols.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/classfile/vmSymbols.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -138,6 +138,10 @@ + template(tag_annotation_default, "AnnotationDefault") \ + template(tag_enclosing_method, "EnclosingMethod") \ + template(tag_bootstrap_methods, "BootstrapMethods") \ ++ template(tag_static_field_redefinition_policy, "StaticFieldRedefinitionPolicy") \ ++ template(tag_field_redefinition_policy, "FieldRedefinitionPolicy") \ ++ template(tag_method_redefinition_policy, "MethodRedefinitionPolicy") \ ++ template(tag_code_sections, "CodeSections") \ + \ + /* exception klasses: at least all exceptions thrown by the VM have entries here */ \ + template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \ +@@ -377,6 +381,10 @@ + template(oop_size_name, "oop_size") \ + template(static_oop_field_count_name, "static_oop_field_count") \ + \ ++ /* mutator in case of class redefinition */ \ ++ template(static_transformer_name, "$staticTransformer") \ ++ template(transformer_name, "$transformer") \ ++ \ + /* non-intrinsic name/signature pairs: */ \ + template(register_method_name, "register") \ + do_alias(register_method_signature, object_void_signature) \ +diff -r 6c6a2299029a src/share/vm/compiler/compileBroker.cpp +--- a/src/share/vm/compiler/compileBroker.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/compiler/compileBroker.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1181,6 +1181,14 @@ + int comp_level, + methodHandle hot_method, int hot_count, + const char* comment, Thread* THREAD) { ++ JavaThread* thread = JavaThread::current(); ++ if (thread->is_Compiler_thread() && thread->as_CompilerThread()->should_bailout()) { ++ return NULL; // FIXME: DCEVM: should we do something else? ++ } ++ if (instanceKlass::cast(method->method_holder())->is_not_initialized()) { ++ return NULL; // FIXME: DCEVM: how should we avoid this? ++ } ++ + // make sure arguments make sense + assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method"); + assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); +@@ -1260,6 +1268,7 @@ + } + + // RedefineClasses() has replaced this method; just return ++ // (tw) This is important for the new version of hotswapping: Old code will only execute properly in the interpreter! + if (method->is_old()) { + return NULL; + } +@@ -1592,6 +1601,8 @@ + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { ++ thread->compilation_mutex()->lock(); ++ thread->set_should_bailout(false); + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + #ifdef COMPILER1 +@@ -1615,6 +1626,7 @@ + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); + } ++ thread->compilation_mutex()->unlock(); + } + } + } +@@ -2164,3 +2176,15 @@ + st->cr(); + #endif + } ++ ++// (tw) Clean up compiler interface after a class redefinition step ++void CompileBroker::cleanup_after_redefinition() { ++ int num_threads = _method_threads->length(); ++ ++ ciObjectFactory::sort_ci_objects(ciObjectFactory::_shared_ci_objects); ++ for (int i=0; i<num_threads; i++) { ++ if (_method_threads->at(i)->env() != NULL && _method_threads->at(i)->env() != (ciEnv *)badAddress) { ++ _method_threads->at(i)->env()->cleanup_after_redefinition(); ++ } ++ } ++} +diff -r 6c6a2299029a src/share/vm/compiler/compileBroker.hpp +--- a/src/share/vm/compiler/compileBroker.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/compiler/compileBroker.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -408,6 +408,7 @@ + + static void print_compiler_threads_on(outputStream* st); + ++ static void cleanup_after_redefinition(); + static int get_total_compile_count() { return _total_compile_count; } + static int get_total_bailout_count() { return _total_bailout_count; } + static int get_total_invalidated_count() { return _total_invalidated_count; } +diff -r 6c6a2299029a src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -162,6 +162,13 @@ + } + } + ++ ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +diff -r 6c6a2299029a src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -149,6 +149,7 @@ + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff -r 6c6a2299029a src/share/vm/gc_implementation/shared/markSweep.cpp +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -32,6 +32,8 @@ + #include "oops/objArrayKlass.inline.hpp" + #include "oops/oop.inline.hpp" + ++GrowableArray<oop>* MarkSweep::_rescued_oops = NULL; ++ + Stack<oop, mtGC> MarkSweep::_marking_stack; + Stack<DataLayout*, mtGC> MarkSweep::_revisit_mdo_stack; + Stack<Klass*, mtGC> MarkSweep::_revisit_klass_stack; +@@ -357,3 +359,86 @@ + } + + #endif ++ ++// (tw) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ oop rescued_obj = _rescued_oops->at(i); ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ if (rescued_obj->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_obj, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (tw) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->blueprint()->new_version() != NULL, "class of old object must have new version"); ++ ++ klassOop old_klass_oop = q->klass(); ++ klassOop new_klass_oop = q->blueprint()->new_version(); ++ ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = instanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ oop tmp_obj = q; ++ ++ if (new_klass_oop->klass_part()->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ int *cur = new_klass_oop->klass_part()->update_information(); ++ ++ tmp_obj->set_klass_no_check(new_klass_oop); ++ ++ if (cur == NULL) { ++ assert(size == new_size, "just checking"); ++ Copy::conjoint_words(((HeapWord *)tmp_obj), ((HeapWord *)new_location), size); ++ } else { ++ int destOffset = 0; ++ while (*cur != 0) { ++ if (*cur > 0) { ++ int size = *cur; ++ cur++; ++ int offset = *cur; ++ Copy::conjoint_jbytes(((char *)tmp_obj) + offset, ((char *)new_location) + destOffset, size); ++ destOffset += size; ++ cur++; ++ } else { ++ assert(*cur < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(((char*)new_location) + destOffset, skip, 0); ++ destOffset += skip; ++ cur++; ++ } ++ } ++ } ++ ++ if (tmp_obj != q) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp_obj, size); ++ } ++} +diff -r 6c6a2299029a src/share/vm/gc_implementation/shared/markSweep.hpp +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -117,8 +117,12 @@ + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<oop>* _rescued_oops; ++ + // + // Vars + // +@@ -208,6 +212,8 @@ + template <class T> static inline void mark_and_push(T* p); + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); +diff -r 6c6a2299029a src/share/vm/interpreter/interpreterRuntime.cpp +--- a/src/share/vm/interpreter/interpreterRuntime.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/interpreter/interpreterRuntime.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -402,7 +402,7 @@ + assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); + assert(h_exception->is_oop(), "just checking"); + // Check that exception is a subclass of Throwable, otherwise we have a VerifyError +- if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { ++ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()->klass_part()->newest_version())) && !(h_exception->is_a(SystemDictionary::Throwable_klass()))) { + if (ExitVMOnVerifyError) vm_exit(-1); + ShouldNotReachHere(); + } +@@ -656,6 +656,82 @@ + JvmtiExport::post_raw_breakpoint(thread, method, bcp); + IRT_END + ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::forward_method(JavaThread *thread)) ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ while (Threads::wait_at_instrumentation_entry()) { ++ ml.wait(); ++ } ++ } ++ frame f = last_frame(thread); ++ methodOop m = f.interpreter_frame_method(); ++ methodOop forward_method = m->forward_method(); ++ if (forward_method != NULL) { ++ int bci = f.interpreter_frame_bci(); ++ ++ if (TraceRedefineClasses >= 3) { ++ tty->print_cr("Executing NOP in method %s at bci %d %d", m->name()->as_C_string(), bci, m->is_in_code_section(bci + 1)); ++ } ++ ++ int next_bci = bci - 1; ++ // First try bci before NOP. ++ if (!m->is_in_code_section(next_bci)) { ++ // Try bci after NOP. ++ next_bci = bci + 1; ++ if (!m->is_in_code_section(next_bci)) return; ++ } ++ ++ int new_bci = m->calculate_forward_bci(next_bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(&f, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ iframe->set_method(forward_method, new_bci - 1); ++ } ++IRT_END ++ ++// (tw) Correctly resolve method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_method(JavaThread *thread, oopDesc* receiverOop, int vTableIndex)) ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ // TODO: Check for correctness if different vtable indices in different versions? ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_vtable(vTableIndex); ++ thread->set_vm_result(method); ++IRT_END ++ ++// Correctly resolve interface method when running old code. ++IRT_ENTRY(void, InterpreterRuntime::find_correct_interface_method(JavaThread *thread, oopDesc* receiverOop, oopDesc* interface_klass, int vTableIndex)) ++ ++ // extract receiver from the outgoing argument list if necessary ++ Handle receiver(thread, receiverOop); ++ ++ // TODO: Check for invokeinterface! ++ Bytecodes::Code bytecode = Bytecodes::_invokevirtual; ++ ++ int method_holder_revision_number = method(thread)->method_holder()->klass_part()->revision_number(); ++ klassOop klass = receiverOop->klass(); ++ while (klass->klass_part()->revision_number() > method_holder_revision_number) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ methodOop method = ((instanceKlass *)klass->klass_part())->method_at_itable((klassOop)interface_klass, vTableIndex, THREAD); ++ thread->set_vm_result(method); ++IRT_END ++ + IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode)) { + // extract receiver from the outgoing argument list if necessary + Handle receiver(thread, NULL); +@@ -684,6 +760,10 @@ + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (info.resolved_method()->is_old()) { ++ // (tw) If we are executing an old method, this is OK! ++ if (method(thread)->is_old()) { ++ break; ++ } + // It is very unlikely that method is redefined more than 100 times + // in the middle of resolve. If it is looping here more than 100 times + // means then there could be a bug here. +diff -r 6c6a2299029a src/share/vm/interpreter/interpreterRuntime.hpp +--- a/src/share/vm/interpreter/interpreterRuntime.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/interpreter/interpreterRuntime.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -140,6 +140,9 @@ + static void post_method_entry(JavaThread *thread); + static void post_method_exit (JavaThread *thread); + static int interpreter_contains(address pc); ++ static void forward_method(JavaThread *thread); ++ static void find_correct_method(JavaThread *thread, oopDesc* receiver, int vTableIndex); ++ static void find_correct_interface_method(JavaThread *thread, oopDesc* receiver, oopDesc* interface_klass, int vTableIndex); + + // Native signature handlers + static void prepare_native_call(JavaThread* thread, methodOopDesc* method); +diff -r 6c6a2299029a src/share/vm/interpreter/linkResolver.cpp +--- a/src/share/vm/interpreter/linkResolver.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/interpreter/linkResolver.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -153,8 +153,8 @@ + // Klass resolution + + void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) { +- if (!Reflection::verify_class_access(ref_klass->as_klassOop(), +- sel_klass->as_klassOop(), ++ if (!Reflection::verify_class_access(ref_klass->as_klassOop()->klass_part()->newest_version(), ++ sel_klass->as_klassOop()->klass_part()->newest_version(), + true)) { + ResourceMark rm(THREAD); + Exceptions::fthrow( +@@ -338,7 +338,7 @@ + // We'll check for the method name first, as that's most likely + // to be false (so we'll short-circuit out of these tests). + if (sel_method->name() == vmSymbols::clone_name() && +- sel_klass() == SystemDictionary::Object_klass() && ++ sel_klass()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() && + resolved_klass->oop_is_array()) { + // We need to change "protected" to "public". + assert(flags.is_protected(), "clone not protected?"); +@@ -404,6 +404,156 @@ + } + } + ++ ++void LinkResolver::lookup_method(methodHandle& resolved_method, KlassHandle resolved_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, KlassHandle current_klass, TRAPS) { ++ ++ // Interface method lookup? ++ if (is_interface) { ++ ++ // lookup method in this interface or its super, java.lang.Object ++ lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // lookup method in all the super-interfaces ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ } ++ ++ // Other methods ++ } else { ++ Handle nested_exception; ++ ++ // 2. lookup method in resolved klass and its super klasses ++ lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { // not found in the class hierarchy ++ // 3. lookup method in all the interfaces implemented by the resolved klass ++ lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); ++ ++ if (resolved_method.is_null()) { ++ // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc ++ lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, ++ current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); ++ if (HAS_PENDING_EXCEPTION) { ++ nested_exception = Handle(THREAD, PENDING_EXCEPTION); ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_field(fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static) { ++ ++ // First attempt unversioned ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int redefinition_policy = current_klass->new_version()->klass_part()->field_redefinition_policy(); ++ if (is_static) { ++ redefinition_policy = current_klass->new_version()->klass_part()->static_field_redefinition_policy(); ++ } ++ ++ assert(redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (redefinition_policy != Klass::DynamicCheck) { ++ ++ if (redefinition_policy == Klass::AccessOldMembers) { ++ // Forget looked up fields ++ sel_klass = KlassHandle(Thread::current(), (oop)NULL); ++ } ++ ++ assert(redefinition_policy == Klass::AccessOldMembers || redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (sel_klass.is_null() || fd.is_static() != is_static /* access old static field field is changed from static to non-static */) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ sel_klass = KlassHandle(Thread::current(), instanceKlass::cast(resolved_klass())->find_field(field_name, field_sig, &fd)); ++ ++ // FIXME: idubrov ++ //if (sel_klass.is_null()) { ++ // TRACE_RC2("Trying to resolve field (%s) in old universe failed => exception is the correct behaviour", field_name->as_C_string()); ++ //} else { ++ // assert(sel_klass->new_version() != NULL, "must be old class!"); ++ // TRACE_RC2("Resolved a field in the old universe (%s)!", field_name->as_C_string()); ++ //} ++ } ++ } ++ } ++} ++ ++void LinkResolver::lookup_correct_method(methodHandle& resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, ++ Symbol* method_name, Symbol* method_signature, bool is_interface, TRAPS) { ++ ++ // First attempt unversioned ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // (tw) Are we in an old method that wants to see a different view on the world? ++ if (!current_klass.is_null() && !current_klass->is_newest_version()) { ++ ++ // Look for the policy defined in the new version of the class (_not_ in the newest, but only in the newer relative to current klass). ++ int method_redefinition_policy = current_klass->new_version()->klass_part()->method_redefinition_policy(); ++ assert(method_redefinition_policy != Klass::StaticCheck, "if the policy is static check, then we can never reach here"); ++ ++ if (method_redefinition_policy != Klass::DynamicCheck) { ++ ++ // We do not throw the exception ++ if (method_redefinition_policy == Klass::AccessOldMembers) { ++ // Forget any new member lookup ++ resolved_method = methodHandle(THREAD, NULL); ++ } ++ ++ assert(method_redefinition_policy == Klass::AccessOldMembers || method_redefinition_policy == Klass::AccessDeletedMembers, ""); ++ ++ if (resolved_method.is_null()) { ++ ++ // Select correct version for resolved klass. ++ find_correct_resolved_klass(resolved_klass, current_klass); ++ ++ // Now do the lookup in a second attempt with a different resolved klass. ++ lookup_method(resolved_method, resolved_klass, method_name, method_signature, is_interface, current_klass, CHECK); ++ ++ // FIXME: idubrov ++ //IF_TRACE_RC2 { ++ // ResourceMark rm(THREAD); ++ // if (resolved_method.is_null()) { ++ // TRACE_RC2("Trying to resolve method (%s) in old universe failed => exception is the correct behaviour", method_name->as_C_string()); ++ // } else { ++ // assert(resolved_method->is_old(), "must be old method!"); ++ // TRACE_RC2("Resolved a method in the old universe (%s)!", resolved_method->name()->as_C_string()); ++ // } ++ //} ++ } ++ } ++ } ++ ++ if (resolved_method.is_null()) { ++ // no method found ++ ResourceMark rm(THREAD); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), ++ methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), ++ method_name, ++ method_signature)); ++ } ++} ++ ++void LinkResolver::find_correct_resolved_klass(KlassHandle &resolved_klass, KlassHandle ¤t_klass) { ++ int current_klass_revision = current_klass->revision_number(); ++ int resolved_klass_revision = resolved_klass->revision_number(); ++ // FIXME: idubrov ++ //TRACE_RC2("The two different revision numbers for interfaces: current=%d / resolved_callee=%d", current_klass_revision, resolved_klass_revision); ++ ++ while (resolved_klass->revision_number() > current_klass_revision) { ++ assert(resolved_klass->old_version(), "must have old version"); ++ resolved_klass = KlassHandle(Thread::current(), resolved_klass->old_version()); ++ } ++} ++ + void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass, + Symbol* method_name, Symbol* method_signature, + KlassHandle current_klass, bool check_access, TRAPS) { +@@ -416,35 +566,8 @@ + THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf); + } + +- Handle nested_exception; +- +- // 2. lookup method in resolved klass and its super klasses +- lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { // not found in the class hierarchy +- // 3. lookup method in all the interfaces implemented by the resolved klass +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc +- lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature, +- current_klass, (Handle*)NULL, (Handle*)NULL, THREAD); +- if (HAS_PENDING_EXCEPTION) { +- nested_exception = Handle(THREAD, PENDING_EXCEPTION); +- CLEAR_PENDING_EXCEPTION; +- } +- } +- +- if (resolved_method.is_null()) { +- // 4. method lookup failed +- ResourceMark rm(THREAD); +- THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature), +- nested_exception); +- } +- } ++ // 2. and 3. and 4. lookup method in resolved klass and its super klasses ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, false, CHECK); + + // 5. check if method is concrete + if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) { +@@ -512,20 +635,7 @@ + } + + // lookup method in this interface or its super, java.lang.Object +- lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- +- if (resolved_method.is_null()) { +- // lookup method in all the super-interfaces +- lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK); +- if (resolved_method.is_null()) { +- // no method found +- ResourceMark rm(THREAD); +- THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), +- methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()), +- method_name, +- method_signature)); +- } +- } ++ lookup_correct_method(resolved_method, resolved_klass, current_klass, method_name, method_signature, true, CHECK); + + if (check_access) { + HandleMark hm(THREAD); +@@ -612,9 +722,14 @@ + THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string()); + } + ++ KlassHandle ref_klass(THREAD, pool->pool_holder()->klass_part()); ++ + // Resolve instance field + fieldDescriptor fd; // find_field initializes fd if found +- KlassHandle sel_klass(THREAD, instanceKlass::cast(resolved_klass())->find_field(field, sig, &fd)); ++ ++ KlassHandle sel_klass; ++ lookup_correct_field(fd, sel_klass, resolved_klass, ref_klass, field, sig, is_static); ++ + // check if field exists; i.e., if a klass containing the field def has been selected + if (sel_klass.is_null()){ + ResourceMark rm(THREAD); +@@ -622,7 +737,6 @@ + } + + // check access +- KlassHandle ref_klass(THREAD, pool->pool_holder()); + check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK); + + // check for errors +@@ -634,7 +748,7 @@ + } + + // Final fields can only be accessed from its own class. +- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { ++ if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()->klass_part()->active_version() && sel_klass() != pool->pool_holder()) { + THROW(vmSymbols::java_lang_IllegalAccessError()); + } + +@@ -839,7 +953,7 @@ + bool check_access, bool check_null_and_abstract, TRAPS) { + methodHandle resolved_method; + linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); +- runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK); ++ runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, current_klass, check_null_and_abstract, CHECK); + } + + // throws linktime exceptions +@@ -869,6 +983,7 @@ + KlassHandle resolved_klass, + Handle recv, + KlassHandle recv_klass, ++ KlassHandle current_klass, + bool check_null_and_abstract, + TRAPS) { + +@@ -917,7 +1032,40 @@ + // recv_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + instanceKlass* inst = (instanceKlass*)recv_klass()->klass_part(); ++ ++ // (tw) The type of the virtual method call and the type of the receiver do not need to ++ // have anything in common, as the receiver type could've been hotswapped. ++ // Does not always work (method could be resolved with correct dynamic type and later ++ // be called at the same place with a wrong dynamic type). ++ // (tw) TODO: Need to handle the static type vs dynamic type issue more generally. ++ ++ // The vTable must be based on the view of the world of the resolved method ++ klassOop method_holder = resolved_method->method_holder(); ++ ++ if (method_holder->klass_part()->new_version() != NULL) { ++ // We are executing in old code ++ // FIXME: idubrov ++ //TRACE_RC2("Calling a method in old code"); ++ while (method_holder->klass_part()->revision_number() < inst->revision_number()) { ++ inst = (instanceKlass *)(inst->old_version()->klass_part()); ++ } ++ } ++ ++ if (inst->is_subtype_of(method_holder)) { + selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); ++ } else { ++ ++ tty->print_cr("Failure:"); ++ inst->as_klassOop()->print(); ++ inst->super()->print(); ++ juint off = inst->super_check_offset(); ++ klassOop sup = *(klassOop*)( (address)inst->as_klassOop() + off ); ++ sup->print(); ++ method_holder->print(); ++ ++ bool b = inst->is_subtype_of(method_holder); ++ THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(), "(tw) A virtual method was called, but the type of the receiver is not related with the type of the class of the called method!"); ++ } + } + } + +diff -r 6c6a2299029a src/share/vm/interpreter/linkResolver.hpp +--- a/src/share/vm/interpreter/linkResolver.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/interpreter/linkResolver.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -110,7 +110,11 @@ + // It does all necessary link-time checks & throws exceptions if necessary. + + class LinkResolver: AllStatic { +- private: ++private: ++ static void lookup_method (methodHandle& result, KlassHandle resolved_klass, Symbol* name, Symbol* signature, bool is_interface, KlassHandle current_klass, TRAPS); ++ static void lookup_correct_field (fieldDescriptor &fd, KlassHandle &sel_klass, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* field_name, Symbol* field_sig, bool is_static); ++ static void lookup_correct_method (methodHandle& result, KlassHandle resolved_klass, KlassHandle current_klass, Symbol* name, Symbol* signature, bool is_interface, TRAPS); ++ static void find_correct_resolved_klass (KlassHandle &resolved_klass, KlassHandle ¤t_klass); + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +@@ -133,7 +137,7 @@ + static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + + static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); +- static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); ++ static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, KlassHandle current_klass, bool check_null_and_abstract, TRAPS); + static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); + + static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); +diff -r 6c6a2299029a src/share/vm/interpreter/templateTable.hpp +--- a/src/share/vm/interpreter/templateTable.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/interpreter/templateTable.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -329,8 +329,8 @@ + static void shouldnotreachhere(); + + // jvmti support +- static void jvmti_post_field_access(Register cache, Register index, bool is_static, bool has_tos); +- static void jvmti_post_field_mod(Register cache, Register index, bool is_static); ++ static void jvmti_post_field_access(Register cache, Register index, int byte_no, bool is_static, bool has_tos); ++ static void jvmti_post_field_mod(Register cache, Register index, int byte_no, bool is_static); + static void jvmti_post_fast_field_mod(); + + // debugging of TemplateGenerator +diff -r 6c6a2299029a src/share/vm/memory/genMarkSweep.cpp +--- a/src/share/vm/memory/genMarkSweep.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/genMarkSweep.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -421,6 +421,7 @@ + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + Generation* pg = gch->perm_gen(); + +@@ -433,10 +434,14 @@ + + VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); + + VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); + ++ MarkSweep::copy_rescued_objects_back(); ++ + pg->post_compact(); // Shared spaces verification. + } +diff -r 6c6a2299029a src/share/vm/memory/permGen.cpp +--- a/src/share/vm/memory/permGen.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/permGen.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -57,7 +57,12 @@ + + for (;;) { + { +- MutexLocker ml(Heap_lock); ++ // (tw) Only lock when not at a safepoint (necessary to use the split verifier from the VmThread) ++ Monitor *lock = Heap_lock; ++ if (SafepointSynchronize::is_at_safepoint()) { ++ lock = NULL; ++ } ++ MutexLockerEx ml(lock); + if ((obj = gen->allocate(size, false)) != NULL) { + return obj; + } +diff -r 6c6a2299029a src/share/vm/memory/space.cpp +--- a/src/share/vm/memory/space.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/space.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -378,6 +378,31 @@ + _compaction_top = bottom(); + } + ++// (tw) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, ++CompactPoint* cp, HeapWord* compact_top) { ++ // First check if we should switch compaction space ++ assert(this == cp->space, "'this' should be current compaction space."); ++ size_t compaction_max_size = pointer_delta(end(), compact_top); ++ while (size > compaction_max_size) { ++ // switch to next compaction space ++ cp->space->set_compaction_top(compact_top); ++ cp->space = cp->space->next_compaction_space(); ++ if (cp->space == NULL) { ++ cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); ++ assert(cp->gen != NULL, "compaction must succeed"); ++ cp->space = cp->gen->first_compaction_space(); ++ assert(cp->space != NULL, "generation must have a first compaction space"); ++ } ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ compaction_max_size = pointer_delta(cp->space->end(), compact_top); ++ } ++ ++ return compact_top; ++} ++ + HeapWord* CompactibleSpace::forward(oop q, size_t size, + CompactPoint* cp, HeapWord* compact_top) { + // q is alive +@@ -401,7 +426,7 @@ + } + + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -449,7 +474,208 @@ + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ return; ++ } ++ ++ /* Compute the new addresses for the live objects and store it in the mark ++ * Used by universe::mark_sweep_phase2() ++ */ ++ HeapWord* compact_top; /* This is where we are currently compacting to. */ ++ ++ /* We're sure to be here before any objects are compacted into this ++ * space, so this is a good time to initialize this: ++ */ ++ set_compaction_top(bottom()); ++ ++ if (cp->space == NULL) { ++ assert(cp->gen != NULL, "need a generation"); ++ assert(cp->threshold == NULL, "just checking"); ++ assert(cp->gen->first_compaction_space() == this, "just checking"); ++ cp->space = cp->gen->first_compaction_space(); ++ compact_top = cp->space->bottom(); ++ cp->space->set_compaction_top(compact_top); ++ cp->threshold = cp->space->initialize_threshold(); ++ } else { ++ compact_top = cp->space->compaction_top(); ++ } ++ ++ /* We allow some amount of garbage towards the bottom of the space, so ++ * we don't start compacting before there is a significant gain to be made. ++ * Occasionally, we want to ensure a full compaction, which is determined ++ * by the MarkSweepAlwaysCompactCount parameter. ++ */ ++ int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations; ++ bool skip_dead = (MarkSweepAlwaysCompactCount < 1) ++ ||((invocations % MarkSweepAlwaysCompactCount) != 0); ++ ++ size_t allowed_deadspace = 0; ++ if (skip_dead) { ++ int ratio = (int)allowed_dead_ratio(); ++ allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; ++ } ++ ++ HeapWord* q = bottom(); ++ HeapWord* t = end(); ++ ++ HeapWord* end_of_live= q; /* One byte beyond the last byte of the last ++ live object. */ ++ HeapWord* first_dead = end();/* The first dead object. */ ++ LiveRange* liveRange = NULL; /* The current live range, recorded in the ++ first header of preceding free area. */ ++ _first_dead = first_dead; ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ while (q < t) { ++ assert(!block_is_obj(q) || ++ oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || ++ oop(q)->mark()->has_bias_pattern(), ++ "these are the only valid states during a mark sweep"); ++ if (block_is_obj(q) && oop(q)->is_gc_marked()) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size(q); ++ ++ // DCEVM: begin ++ ////////////////////////////////////////////////////////////////////////// ++ size_t forward_size = size; ++ ++ // Compute the forward sizes and leave out objects whose position could ++ // possibly overlap other objects. ++ ++ // DCEVM: There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL && oop(q)->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward_compact_top(forward_size, cp, compact_top); ++ ++ bool rescueing = false; ++ if (rescueing = must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ // FIXME: idubrov ++ //TRACE_RC5("rescue obj %d klass=%s", MarkSweep::_rescued_oops->length(), oop(q)->klass()->klass_part()->name()->as_C_string()); ++ MarkSweep::_rescued_oops->append(oop(q)); ++ } else { ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ } ++ ++ if ((size != forward_size || rescueing) && q < first_dead) { ++ // (tw) This object moves => first_dead must be set to here! ++ first_dead = q; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ q += size; ++ end_of_live = q; ++ } else { ++ /* run over all the contiguous dead objects */ ++ HeapWord* end = q; ++ do { ++ /* prefetch beyond end */ ++ Prefetch::write(end, interval); ++ end += block_size(end); ++ } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked())); ++ ++ /* see if we might want to pretend this object is alive so that ++ * we don't have to compact quite as often. ++ */ ++ if (allowed_deadspace > 0 && q == compact_top) { ++ size_t sz = pointer_delta(end, q); ++ if (insert_deadspace(allowed_deadspace, q, sz)) { ++ compact_top = cp->space->forward(oop(q), sz, cp, compact_top); ++ q = end; ++ end_of_live = end; ++ continue; ++ } ++ } ++ ++ /* otherwise, it really is a free region. */ ++ ++ /* for the previous LiveRange, record the end of the live objects. */ ++ if (liveRange) { ++ liveRange->set_end(q); ++ } ++ ++ /* record the current LiveRange object. ++ * liveRange->start() is overlaid on the mark word. ++ */ ++ liveRange = (LiveRange*)q; ++ liveRange->set_start(end); ++ liveRange->set_end(end); ++ ++ /* see if this is the first dead region. */ ++ if (q < first_dead) { ++ first_dead = q; ++ } ++ ++ /* move on to the next object */ ++ q = end; ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////// ++ // Compute the forwarding addresses for the objects that need to be ++ // rescued. ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ // FIXME: idubrov ++ //TRACE_RC2("Calculating new forward sizes for %d objects!", MarkSweep::_rescued_oops->length()); ++ ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ oop q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size((HeapWord*)q); ++ ++ size_t forward_size = size; ++ ++ // (tw) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), forward_size, cp, compact_top); ++ assert(compact_top <= t, "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ ////////////////////////////////////////////////////////////////////////// ++ ++ assert(q == t, "just checking"); ++ if (liveRange != NULL) { ++ liveRange->set_end(q); ++ } ++ _end_of_live = end_of_live; ++ if (end_of_live < first_dead) { ++ first_dead = end_of_live; ++ } ++ _first_dead = first_dead; ++ ++// FIXME: idubrov ++// if (_first_dead > top()) { ++// _first_dead = top(); ++// } ++// ++// if (_end_of_live > top()) { ++// _end_of_live = top(); ++// } ++ assert(_first_dead <= top(), "Must be smaller equal"); ++ assert(_end_of_live <= top(), "Must be smaller equal"); ++ ++ /* save the compaction_top of the compaction space. */ ++ cp->space->set_compaction_top(compact_top); + } + + void Space::adjust_pointers() { +@@ -490,17 +716,313 @@ + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ if (heap->is_in_permanent(obj)) { ++ return -1; ++ } ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ ++ assert(is_in_reserved(old_obj), "old_obj must be in this space"); ++ ++ if (old_obj->is_perm()) { ++ // This object is in perm gen; check for invariant obj->klass() <= obj ++ if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ return true; ++ } ++ } ++ ++ int size = old_obj->size(); ++ int original_size = size; ++ if (oop(old_obj)->blueprint()->is_redefining()) { ++ assert(oop(old_obj)->blueprint()->old_version() != NULL, "must not be null"); ++ original_size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->old_version()->klass_part()); ++ } else if (oop(old_obj)->blueprint()->new_version() != NULL) { ++ size = oop(old_obj)->size_given_klass(oop(old_obj)->blueprint()->new_version()->klass_part()); ++ } ++ ++ bool normalComparison = (old_obj + original_size < new_obj + size); ++ ++ if (is_in_reserved(new_obj)) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ return normalComparison; ++ ++ } else { ++ ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else if (tenured_gen->is_in_reserved(old_obj)) { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert((normalComparison) == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ return normalComparison; ++ } ++ } ++} ++ ++oop CompactibleSpace::rescue(oop old_obj) { ++ assert(must_rescue(old_obj, old_obj->forwardee()), "do not call otherwise"); ++ ++ int size = old_obj->size(); ++ oop rescued_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)old_obj, (HeapWord*)rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { + return; // Nothing to do. + } ++ /* adjust all the interior pointers to point at the new locations of objects ++ * Used by MarkSweep::mark_sweep_phase3() */ + +- SCAN_AND_ADJUST_POINTERS(adjust_obj_size); ++ HeapWord* q = bottom(); ++ HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ ++ ++ assert(_first_dead <= _end_of_live, "Stands to reason, no?"); ++ ++ debug_only(HeapWord* prev_q = NULL); ++ debug_only(HeapWord* prev_prev_q = NULL); ++ debug_only(HeapWord* prev_prev_prev_q = NULL); ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ /* we have a chunk of the space which hasn't moved and we've ++ * reinitialized the mark word during the previous pass, so we can't ++ * use is_gc_marked for the traversal. */ ++ HeapWord* end = _first_dead; ++ ++ while (q < end) { ++ /* I originally tried to conjoin "block_start(q) == q" to the ++ * assertion below, but that doesn't work, because you can't ++ * accurately traverse previous objects to get to the current one ++ * after their pointers (including pointers into permGen) have been ++ * updated, until the actual compaction is done. dld, 4/00 */ ++ assert(block_is_obj(q), ++ "should be at block boundaries, and should be looking at objs"); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++// if (_first_dead == t) { ++// q = t; ++// } else { ++// /* $$$ This is funky. Using this to read the previously written ++// * LiveRange. See also use below. */ ++// q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); ++// } ++ } ++ ++ const intx interval = PrefetchScanIntervalInBytes; ++ ++ debug_only(prev_q = NULL); ++ debug_only(prev_prev_q = NULL); ++ debug_only(prev_prev_prev_q = NULL); ++ while (q < t) { ++ /* prefetch beyond q */ ++ Prefetch::write(q, interval); ++ if (oop(q)->is_gc_marked()) { ++ /* q is alive */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); ++ /* point all the oops to the new location */ ++ size_t size = oop(q)->adjust_pointers(); ++ size = adjust_obj_size(size); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q += size; ++ } else { ++ /* q is not a live object, so its mark should point at the next ++ * live object */ ++ debug_only(prev_prev_prev_q = prev_prev_q); ++ debug_only(prev_prev_q = prev_q); ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } ++ } ++ ++ assert(q == t, "just checking"); + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size); ++ return; ++ } ++ ++ /* Copy all live objects to their new location ++ * Used by MarkSweep::mark_sweep_phase4() */ ++ ++ HeapWord* q = bottom(); ++ HeapWord* const t = _end_of_live; ++ debug_only(HeapWord* prev_q = NULL); ++ ++ if (q < t && _first_dead > q && ++ !oop(q)->is_gc_marked()) { ++ debug_only( ++ /* we have a chunk of the space which hasn't moved and we've reinitialized ++ * the mark word during the previous pass, so we can't use is_gc_marked for ++ * the traversal. */ ++ HeapWord* const end = _first_dead; ++ ++ while (q < end) { ++ size_t size = obj_size(q); // FIXME: idubrov oop(q)->size(); ++ assert(!oop(q)->is_gc_marked(), ++ "should be unmarked (special dense prefix handling)"); ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); ++ debug_only(prev_q = q); ++ q += size; ++ } ++ ) /* debug_only */ ++ // (tw) first_dead can be live object! ++ q = _first_dead; ++ ++ //if (_first_dead == t) { ++ // q = t; ++ //} else { ++ ///* $$$ Funky */ ++ //q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); ++ //} ++ } ++ ++ const intx scan_interval = PrefetchScanIntervalInBytes; ++ const intx copy_interval = PrefetchCopyIntervalInBytes; ++ while (q < t) { ++ if (!oop(q)->is_gc_marked()) { ++ /* mark is pointer to next marked oop */ ++ debug_only(prev_q = q); ++ q = (HeapWord*) oop(q)->mark()->decode_pointer(); ++ assert(q > prev_q, "we should be moving forward through memory"); ++ } else { ++ /* prefetch beyond q */ ++ Prefetch::read(q, scan_interval); ++ ++ /* size and destination */ ++ size_t size = obj_size(q); ++ HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); ++ ++ if (must_rescue(oop(q), oop(q)->forwardee())) { ++ oop dest_obj = rescue(oop(q)); ++ debug_only(Copy::fill_to_words(q, size, 0)); ++ } else { ++ ++ /* prefetch beyond compaction_top */ ++ Prefetch::write(compaction_top, copy_interval); ++ ++ /* copy object and reinit its mark */ ++ VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, ++ compaction_top)); ++ assert(q != compaction_top || oop(q)->blueprint()->new_version() != NULL, "everything in this pass should be moving"); ++ ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); ++ } else { ++ Copy::aligned_conjoint_words(q, compaction_top, size); ++ } ++ oop(compaction_top)->init_mark(); ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); ++ } ++ ++ debug_only(prev_q = q); ++ q += size; ++ } ++ } ++ ++ /* Let's remember if we were empty before we did the compaction. */ ++ bool was_empty = used_region().is_empty(); ++ /* Reset space after compaction is complete */ ++ reset_after_compaction(); ++ /* We do this clear, below, since it has overloaded meanings for some */ ++ /* space subtypes. For example, OffsetTableContigSpace's that were */ ++ /* compacted into will have had their offset table thresholds updated */ ++ /* continuously, but those that weren't need to have their thresholds */ ++ /* re-initialized. Also mangles unused area for debugging. */ ++ if (used_region().is_empty()) { ++ if (!was_empty) clear(SpaceDecorator::Mangle); ++ } else { ++ if (ZapUnusedHeapArea) mangle_unused_area(); ++ } ++ ++ //SCAN_AND_COMPACT(obj_size); + } + + void Space::print_short() const { print_short_on(tty); } +diff -r 6c6a2299029a src/share/vm/memory/space.hpp +--- a/src/share/vm/memory/space.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/space.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -445,6 +445,9 @@ + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ oop rescue(oop old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -475,6 +478,10 @@ + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); + ++ // (tw) ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } + +diff -r 6c6a2299029a src/share/vm/memory/universe.cpp +--- a/src/share/vm/memory/universe.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/universe.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -100,6 +100,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + klassOop Universe::_boolArrayKlassObj = NULL; + klassOop Universe::_byteArrayKlassObj = NULL; +@@ -204,6 +206,38 @@ + f(systemObjArrayKlassObj()); + } + ++// DCEVM: This method should iterate all pointers that are not within heap objects. ++void Universe::root_oops_do(OopClosure *oopClosure) { ++ ++ class AlwaysTrueClosure: public BoolObjectClosure { ++ public: ++ void do_object(oop p) { ShouldNotReachHere(); } ++ bool do_object_b(oop p) { return true; } ++ }; ++ AlwaysTrueClosure always_true; ++ ++ // General strong roots ++ Universe::oops_do(oopClosure); ++ JNIHandles::oops_do(oopClosure); ++ Threads::oops_do(oopClosure, NULL); ++ ObjectSynchronizer::oops_do(oopClosure); ++ FlatProfiler::oops_do(oopClosure); ++ //Management::oops_do(oopClosure); // DCEVM: TODO: Check if this is correct? ++ JvmtiExport::oops_do(oopClosure); ++ // SO_AllClasses ++ SystemDictionary::oops_do(oopClosure); ++ ++ // Now adjust pointers in remaining weak roots. (All of which should ++ // have been cleared if they pointed to non-surviving objects.) ++ // Global (weak) JNI handles ++ JNIHandles::weak_oops_do(&always_true, oopClosure); ++ ++ CodeCache::oops_do(oopClosure); ++ StringTable::oops_do(oopClosure); ++ //ref_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++ //PSScavenge::reference_processor()->weak_oops_do(&oopClosure); // DCEVM: TODO: Check if this is correct? ++} ++ + void Universe::oops_do(OopClosure* f, bool do_all) { + + f->do_oop((oop*) &_int_mirror); +diff -r 6c6a2299029a src/share/vm/memory/universe.hpp +--- a/src/share/vm/memory/universe.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/memory/universe.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -127,6 +127,8 @@ + friend class SystemDictionary; + friend class VMStructs; + friend class CompactingPermGenGen; ++ friend class Space; ++ friend class ContiguousSpace; + friend class VM_PopulateDumpSharedSpace; + + friend jint universe_init(); +@@ -258,7 +260,18 @@ + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { ++ return _is_redefining_gc_run; ++ } ++ ++ static void set_redefining_gc_run(bool b) { ++ _is_redefining_gc_run = b; ++ } ++ + // Known classes in the VM + static klassOop boolArrayKlassObj() { return _boolArrayKlassObj; } + static klassOop byteArrayKlassObj() { return _byteArrayKlassObj; } +@@ -403,6 +416,8 @@ + + // Iteration + ++ static void root_oops_do(OopClosure *f); ++ + // Apply "f" to the addresses of all the direct heap pointers maintained + // as static fields of "Universe". + static void oops_do(OopClosure* f, bool do_all = false); +@@ -419,6 +434,7 @@ + + // Debugging + static bool verify_in_progress() { return _verify_in_progress; } ++ static void set_verify_in_progress(bool b) { _verify_in_progress = b; } + static void verify(bool silent, VerifyOption option); + static void verify(bool silent) { + verify(silent, VerifyOption_Default /* option */); +diff -r 6c6a2299029a src/share/vm/oops/arrayKlass.cpp +--- a/src/share/vm/oops/arrayKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/arrayKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -129,9 +129,9 @@ + + bool arrayKlass::compute_is_subtype_of(klassOop k) { + // An array is a subtype of Serializable, Clonable, and Object +- return k == SystemDictionary::Object_klass() +- || k == SystemDictionary::Cloneable_klass() +- || k == SystemDictionary::Serializable_klass(); ++ return k->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Cloneable_klass()->klass_part()->newest_version() ++ || k->klass_part()->newest_version() == SystemDictionary::Serializable_klass()->klass_part()->newest_version(); + } + + +diff -r 6c6a2299029a src/share/vm/oops/constMethodKlass.cpp +--- a/src/share/vm/oops/constMethodKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/constMethodKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -102,6 +102,7 @@ + constMethodOop cm = constMethodOop(obj); + MarkSweep::mark_and_push(cm->adr_constants()); + MarkSweep::mark_and_push(cm->adr_stackmap_data()); ++ MarkSweep::mark_and_push(cm->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -113,6 +114,7 @@ + constMethodOop cm_oop = constMethodOop(obj); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_constants()); + PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data()); ++ PSParallelCompact::mark_and_push(cm, cm_oop->adr_code_section_table()); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::constMethodKlassObj never moves. + } +@@ -123,6 +125,7 @@ + constMethodOop cm = constMethodOop(obj); + blk->do_oop(cm->adr_constants()); + blk->do_oop(cm->adr_stackmap_data()); ++ blk->do_oop(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -138,6 +141,8 @@ + if (mr.contains(adr)) blk->do_oop(adr); + adr = cm->adr_stackmap_data(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = cm->adr_code_section_table(); ++ if (mr.contains(adr)) blk->do_oop(adr); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +@@ -152,6 +157,7 @@ + constMethodOop cm = constMethodOop(obj); + MarkSweep::adjust_pointer(cm->adr_constants()); + MarkSweep::adjust_pointer(cm->adr_stackmap_data()); ++ MarkSweep::adjust_pointer(cm->adr_code_section_table()); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = cm->object_size(); +diff -r 6c6a2299029a src/share/vm/oops/constMethodOop.hpp +--- a/src/share/vm/oops/constMethodOop.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/constMethodOop.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -129,7 +129,7 @@ + + public: + oop* oop_block_beg() const { return adr_constants(); } +- oop* oop_block_end() const { return adr_stackmap_data() + 1; } ++ oop* oop_block_end() const { return adr_code_section_table() + 1; } + + private: + // +@@ -141,6 +141,9 @@ + // Raw stackmap data for the method + typeArrayOop _stackmap_data; + ++ // (tw) Table mapping code sections for method forward points. ++ typeArrayOop _code_section_table; ++ + // + // End of the oop block. + // +@@ -195,6 +198,28 @@ + } + bool has_stackmap_table() const { return _stackmap_data != NULL; } + ++ // code section table ++ typeArrayOop code_section_table() const { return _code_section_table; } ++ void set_code_section_table(typeArrayOop e) { oop_store_without_check((oop*) &_code_section_table, (oop) e); } ++ bool has_code_section_table() const { return code_section_table() != NULL && code_section_table()->length() > 0; } ++ static const int ValuesPerCodeSectionEntry = 3; ++ int code_section_entries() const { ++ if (!has_code_section_table()) return 0; ++ return _code_section_table->length() / ValuesPerCodeSectionEntry; ++ } ++ ++ int code_section_new_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry); ++ } ++ ++ int code_section_original_index_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 1); ++ } ++ ++ int code_section_length_at(int index) const { ++ return _code_section_table->short_at(index * ValuesPerCodeSectionEntry + 2); ++ } ++ + void init_fingerprint() { + const uint64_t initval = CONST64(0x8000000000000000); + _fingerprint = initval; +@@ -301,6 +326,7 @@ + // Garbage collection support + oop* adr_constants() const { return (oop*)&_constants; } + oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } ++ oop* adr_code_section_table() const { return (oop*)&_code_section_table; } + bool is_conc_safe() { return _is_conc_safe; } + void set_is_conc_safe(bool v) { _is_conc_safe = v; } + +diff -r 6c6a2299029a src/share/vm/oops/cpCacheOop.cpp +--- a/src/share/vm/oops/cpCacheOop.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/cpCacheOop.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -37,9 +37,15 @@ + + // Implememtation of ConstantPoolCacheEntry + ++void ConstantPoolCacheEntry::copy_from(ConstantPoolCacheEntry *other) { ++ _flags = other->_flags; // flags ++} ++ + void ConstantPoolCacheEntry::initialize_entry(int index) { + assert(0 < index && index < 0x10000, "sanity check"); + _indices = index; ++ _f1 = NULL; ++ _f2 = 0; + assert(constant_pool_index() == index, ""); + } + +@@ -162,7 +168,8 @@ + int vtable_index) { + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); +- assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); ++ // (tw) No longer valid assert ++ //assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); + + int byte_no = -1; + bool change_to_virtual = false; +@@ -183,6 +190,7 @@ + set_method_flags(as_TosState(method->result_type()), + ( 1 << is_vfinal_shift) | + ((method->is_final_method() ? 1 : 0) << is_final_shift) | ++ ((method->is_old() ? 1 : 0) << is_old_method_shift) | + ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), + method()->size_of_parameters()); + set_f2_as_vfinal_method(method()); +@@ -190,9 +198,13 @@ + assert(vtable_index >= 0, "valid index"); + assert(!method->is_final_method(), "sanity"); + set_method_flags(as_TosState(method->result_type()), ++ ((method->is_old() ? 1 : 0) << is_old_method_shift) | + ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), + method()->size_of_parameters()); + set_f2(vtable_index); ++ ++ // (tw) save method holder in f1 for virtual calls ++ set_f1(method()); + } + byte_no = 2; + break; +@@ -206,7 +218,8 @@ + // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. + set_method_flags(as_TosState(method->result_type()), + ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | +- ((method->is_final_method() ? 1 : 0) << is_final_shift), ++ ((method->is_final_method() ? 1 : 0) << is_final_shift) | ++ ((method->is_old() ? 1 : 0) << is_old_method_shift), + method()->size_of_parameters()); + set_f1(method()); + byte_no = 1; +@@ -259,7 +272,7 @@ + set_f1(interf); + set_f2(index); + set_method_flags(as_TosState(method->result_type()), +- 0, // no option bits ++ ((method->is_old() ? 1 : 0) << is_old_method_shift), + method()->size_of_parameters()); + set_bytecode_1(Bytecodes::_invokeinterface); + } +@@ -520,27 +533,12 @@ + // If this constantPoolCacheEntry refers to old_method then update it + // to refer to new_method. + bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, +- methodOop new_method, bool * trace_name_printed) { ++ methodOop new_method) { + + if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- if (f2_as_vfinal_method() == old_method) { +- // match old_method so need an update +- // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values +- _f2 = (intptr_t)new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- ++ // virtual and final so f2() contains method ptr instead of vtable index ++ if (f2_as_vfinal_method() != NULL && f2_as_vfinal_method()->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); + return true; + } + +@@ -548,84 +546,27 @@ + return false; + } + +- if ((oop)_f1 == NULL) { +- // NULL f1() means this is a virtual entry so bail out +- // We are assuming that the vtable index does not need change. ++ // (tw) check how to update interface methods! ++ if (bytecode_1() == Bytecodes::_invokevirtual || bytecode_2() == Bytecodes::_invokevirtual) { ++ ++ if(f1_as_method()->method_holder()->klass_part()->new_version()) { ++ initialize_entry(constant_pool_index()); ++ return true; ++ } ++ + return false; + } + + if ((oop)_f1 == old_method) { + _f1 = new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- + return true; ++ } else if(_f1 != NULL && (bytecode_1() != Bytecodes::_invokeinterface && f1_as_method()->method_holder()->klass_part()->new_version())) { ++ initialize_entry(constant_pool_index()); + } + + return false; + } + +-// a constant pool cache entry should never contain old or obsolete methods +-bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- methodOop m = (methodOop)_f2; +- // Return false if _f2 refers to an old or an obsolete method. +- // _f2 == NULL || !m->is_method() are just as unexpected here. +- return (m != NULL && m->is_method() && !m->is_old() && !m->is_obsolete()); +- } else if ((oop)_f1 == NULL || !((oop)_f1)->is_method()) { +- // _f1 == NULL || !_f1->is_method() are OK here +- return true; +- } +- +- methodOop m = (methodOop)_f1; +- // return false if _f1 refers to an old or an obsolete method +- return (!m->is_old() && !m->is_obsolete()); +-} +- +-bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { +- if (!is_method_entry()) { +- // not a method entry so not interesting by default +- return false; +- } +- +- methodOop m = NULL; +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- m = f2_as_vfinal_method(); +- } else if (is_f1_null()) { +- // NULL _f1 means this is a virtual entry so also not interesting +- return false; +- } else { +- oop f1 = _f1; // _f1 is volatile +- if (!f1->is_method()) { +- // _f1 can also contain a klassOop for an interface +- return false; +- } +- m = f1_as_method(); +- } +- +- assert(m != NULL && m->is_method(), "sanity check"); +- if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) { +- // robustness for above sanity checks or method is not in +- // the interesting class +- return false; +- } +- +- // the method is in the interesting class so the entry is interesting +- return true; +-} +- + void ConstantPoolCacheEntry::print(outputStream* st, int index) const { + // print separator + if (index == 0) st->print_cr(" -------------"); +@@ -663,60 +604,18 @@ + } + } + +-// RedefineClasses() API support: +-// If any entry of this constantPoolCache points to any of +-// old_methods, replace it with the corresponding new_method. +-void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed) { +- +- if (methods_length == 0) { +- // nothing to do if there are no methods +- return; +- } +- +- // get shorthand for the interesting class +- klassOop old_holder = old_methods[0]->method_holder(); ++void constantPoolCacheOopDesc::adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length) { + + for (int i = 0; i < length(); i++) { +- if (!entry_at(i)->is_interesting_method_entry(old_holder)) { +- // skip uninteresting methods +- continue; +- } +- +- // The constantPoolCache contains entries for several different +- // things, but we only care about methods. In fact, we only care +- // about methods in the same class as the one that contains the +- // old_methods. At this point, we have an interesting entry. +- +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; +- methodOop new_method = new_methods[j]; +- +- if (entry_at(i)->adjust_method_entry(old_method, new_method, +- trace_name_printed)) { +- // current old_method matched this entry and we updated it so +- // break out and get to the next interesting entry if there one +- break; +- } ++ if (entry_at(i)->is_field_entry()) { ++ // (tw) TODO: Update only field offsets and modify only constant pool entries that ++ // point to changed fields ++ entry_at(i)->initialize_entry(entry_at(i)->constant_pool_index()); ++ } else if(entry_at(i)->is_method_entry()) { ++ entry_at(i)->adjust_method_entry(NULL, NULL); + } + } + } + +-// the constant pool cache should never contain old or obsolete methods +-bool constantPoolCacheOopDesc::check_no_old_or_obsolete_entries() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL) && +- !entry_at(i)->check_no_old_or_obsolete_entries()) { +- return false; +- } +- } +- return true; +-} + +-void constantPoolCacheOopDesc::dump_cache() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL)) { +- entry_at(i)->print(tty, i); +- } +- } +-} +diff -r 6c6a2299029a src/share/vm/oops/cpCacheOop.hpp +--- a/src/share/vm/oops/cpCacheOop.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/cpCacheOop.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -136,7 +136,8 @@ + void set_bytecode_2(Bytecodes::Code code); + void set_f1(oop f1) { + oop existing_f1 = _f1; // read once +- assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); ++ // (tw) need to relax assertion for redefinition ++ // assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); + oop_store(&_f1, f1); + } + void release_set_f1(oop f1); +@@ -167,6 +168,7 @@ + tos_state_mask = right_n_bits(tos_state_bits), + tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below + // misc. option bits; can be any bit position in [16..27] ++ is_old_method_shift = 19, + is_vfinal_shift = 20, + is_volatile_shift = 21, + is_final_shift = 22, +@@ -200,6 +202,8 @@ + void initialize_entry(int original_index); // initialize primary entry + void initialize_secondary_entry(int main_index); // initialize secondary entry + ++ void copy_from(ConstantPoolCacheEntry *other); ++ + void set_field( // sets entry to resolved field state + Bytecodes::Code get_code, // the bytecode used for reading the field + Bytecodes::Code put_code, // the bytecode used for writing the field +@@ -361,10 +365,7 @@ + // trace_name_printed is set to true if the current call has + // printed the klass name so that other routines in the adjust_* + // group don't print the klass name. +- bool adjust_method_entry(methodOop old_method, methodOop new_method, +- bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- bool is_interesting_method_entry(klassOop k); ++ bool adjust_method_entry(methodOop old_method, methodOop new_method); + + // Debugging & Printing + void print (outputStream* st, int index) const; +@@ -485,16 +486,9 @@ + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); + } + +- // RedefineClasses() API support: +- // If any entry of this constantPoolCache points to any of +- // old_methods, replace it with the corresponding new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- void adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- void dump_cache(); ++ // (tw) Update method and field references ++ void adjust_entries(methodOop* old_methods, methodOop* new_methods, ++ int methods_length); + }; + + #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP +diff -r 6c6a2299029a src/share/vm/oops/instanceKlass.cpp +--- a/src/share/vm/oops/instanceKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/instanceKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -250,12 +250,118 @@ + } + + ++void instanceKlass::initialize_redefined_class() { ++ RC_TRACE(0x00000400, ("initializing redefined class %s", ++ name()->as_C_string())); ++ ++ assert(!is_initialized(), ""); ++ assert(this->old_version() != NULL, ""); ++ assert(is_linked(), "must be linked before"); ++ ++ ++ instanceKlassHandle this_oop(Thread::current(), this->as_klassOop()); ++ class UpdateStaticFieldClosure : public FieldClosure { ++ ++ private: ++ instanceKlassHandle this_oop; ++ ++ public: ++ UpdateStaticFieldClosure(instanceKlassHandle this_oop) { ++ this->this_oop = this_oop; ++ } ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ fieldDescriptor result; ++ bool found = ((instanceKlass *)(this_oop->old_version()->klass_part()))->find_local_field(fd->name(), fd->signature(), &result); ++ ++ if (found && result.is_static()) { ++ int old_offset = result.offset(); ++ assert(result.field_type() == fd->field_type(), "Old and new field type does not match"); ++ ++ oop new_location = this_oop()->java_mirror(); ++ oop old_location = this_oop->old_version()->java_mirror(); ++ int offset = fd->offset(); ++ RC_TRACE(0x00000400, ("Copying static field value for field '%s' old_offset=%d new_offset=%d", ++ fd->name()->as_C_string(), old_offset, offset)); ++ ++ oop cur_oop; ++ ++ switch(result.field_type()) { ++ ++ // Found static field with same name and type in the old klass => copy value from old to new klass ++ ++ case T_BOOLEAN: ++ new_location->bool_field_put(offset, old_location->bool_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_CHAR: ++ new_location->char_field_put(offset, old_location->char_field(old_offset)); ++ DEBUG_ONLY(old_location->char_field_put(old_offset, 0)); ++ break; ++ ++ case T_FLOAT: ++ new_location->float_field_put(offset, old_location->float_field(old_offset)); ++ DEBUG_ONLY(old_location->float_field_put(old_offset, 0)); ++ break; ++ ++ case T_DOUBLE: ++ new_location->double_field_put(offset, old_location->double_field(old_offset)); ++ DEBUG_ONLY(old_location->double_field_put(old_offset, 0)); ++ break; ++ ++ case T_BYTE: ++ new_location->byte_field_put(offset, old_location->byte_field(old_offset)); ++ DEBUG_ONLY(old_location->byte_field_put(old_offset, 0)); ++ break; ++ ++ case T_SHORT: ++ new_location->short_field_put(offset, old_location->short_field(old_offset)); ++ DEBUG_ONLY(old_location->short_field_put(old_offset, 0)); ++ break; ++ ++ case T_INT: ++ new_location->int_field_put(offset, old_location->int_field(old_offset)); ++ DEBUG_ONLY(old_location->int_field_put(old_offset, 0)); ++ break; ++ ++ case T_LONG: ++ new_location->long_field_put(offset, old_location->long_field(old_offset)); ++ DEBUG_ONLY(old_location->long_field_put(old_offset, 0)); ++ break; ++ ++ case T_OBJECT: ++ case T_ARRAY: ++ cur_oop = old_location->obj_field(old_offset); ++ new_location->obj_field_put_raw(offset, cur_oop); ++ old_location->obj_field_put_raw(old_offset, NULL); ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ } else { ++ RC_TRACE(0x00000200, ("New static field %s has_initial_value=%d", ++ fd->name()->as_C_string(), (int)(fd->has_initial_value()))); ++ // field not found ++ // (tw) TODO: Probably this call is not necessary here! ++ // FIXME: idubrov ++ //ClassFileParser::initialize_static_field(fd, Thread::current()); ++ } ++ } ++ }; ++ ++ UpdateStaticFieldClosure cl(this_oop); ++ this->do_local_static_fields(&cl); ++} ++ ++ + bool instanceKlass::verify_code( + instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { + // 1) Verify the bytecodes + Verifier::Mode mode = + throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; +- return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); ++ return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), true, CHECK_false); + } + + +@@ -362,7 +468,13 @@ + jt->get_thread_stat()->perf_recursion_counts_addr(), + jt->get_thread_stat()->perf_timers_addr(), + PerfClassTraceTime::CLASS_VERIFY); +- bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(true); ++ } ++ bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(false); ++ } + if (!verify_ok) { + return false; + } +@@ -400,7 +512,8 @@ + } + #endif + this_oop->set_init_state(linked); +- if (JvmtiExport::should_post_class_prepare()) { ++ // (tw) Must check for old version in order to prevent infinite loops. ++ if (JvmtiExport::should_post_class_prepare() && this_oop->old_version() == NULL /* JVMTI deadlock otherwise */) { + Thread *thread = THREAD; + assert(thread->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); +@@ -673,6 +786,18 @@ + return false; + } + ++bool instanceKlass::implements_interface_any_version(klassOop k) const { ++ k = k->klass_part()->newest_version(); ++ if (this->newest_version() == k) return true; ++ assert(Klass::cast(k)->is_interface(), "should be an interface class"); ++ for (int i = 0; i < transitive_interfaces()->length(); i++) { ++ if (((klassOop)transitive_interfaces()->obj_at(i))->klass_part()->newest_version() == k) { ++ return true; ++ } ++ } ++ return false; ++} ++ + objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { + if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + if (length > arrayOopDesc::max_array_length(T_OBJECT)) { +@@ -801,7 +926,25 @@ + } + + void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { ++ ++ ResourceMark rm(THREAD); + methodHandle h_method(THREAD, this_oop->class_initializer()); ++ ++ if (this_oop->revision_number() != -1){ ++ methodOop m = NULL; ++ if (AllowAdvancedClassRedefinition) { ++ m = this_oop->find_method(vmSymbols::static_transformer_name(), vmSymbols::void_method_signature()); ++ } ++ methodHandle method(m); ++ if (method() != NULL && method()->is_static()) { ++ RC_TRACE(0x00000200, ("Calling static transformer instead of static initializer")); ++ h_method = method; ++ } else if (!((instanceKlass*)this_oop->old_version()->klass_part())->is_not_initialized()) { ++ // Only execute the static initializer, if it was not yet executed for the old version of the class. ++ return; ++ } ++ } ++ + assert(!this_oop->is_initialized(), "we cannot initialize twice"); + if (TraceClassInitialization) { + tty->print("%d Initializing ", call_class_initializer_impl_counter++); +@@ -949,6 +1092,137 @@ + } + } + ++void instanceKlass::store_update_information(GrowableArray<int> &values) { ++ int *arr = NEW_C_HEAP_ARRAY(int, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_update_information(arr); ++} ++ ++void instanceKlass::clear_update_information() { ++ FREE_C_HEAP_ARRAY(int, update_information(), mtClass); ++ set_update_information(NULL); ++} ++ ++typedef Pair<int, klassOop> typeInfoPair; ++ ++void instanceKlass::store_type_check_information(GrowableArray< Pair<int, klassOop> > &values) { ++ Pair<int, klassOop> *arr = NEW_C_HEAP_ARRAY(typeInfoPair, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_type_check_information(arr); ++} ++ ++void instanceKlass::clear_type_check_information() { ++ FREE_C_HEAP_ARRAY(typeInfoPair, type_check_information(), mtClass); ++ set_type_check_information(NULL); ++} ++ ++void instanceKlass::do_fields_evolution(FieldEvolutionClosure* cl) { ++ ++ assert (old_version() != NULL, "must have old version!"); ++ ++ klassOop old_klass_oop = old_version(); ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = this; ++ ++ fieldDescriptor fd; ++ fieldDescriptor old_fd; ++ ++ instanceKlass *cur_new_klass = new_klass; ++ klassOop cur_new_klass_oop = this->as_klassOop(); ++ ++ if (_fields_not_changed) { ++ ++ class MyFieldClosure : public FieldClosure { ++ ++ FieldEvolutionClosure *_cl; ++ public: ++ MyFieldClosure(FieldEvolutionClosure *cl) {_cl = cl; } ++ virtual void do_field(fieldDescriptor* fd) { ++ _cl->do_changed_field(fd, fd); ++ } ++ }; ++ ++ MyFieldClosure mfc(cl); ++ do_nonstatic_fields(&mfc); ++ } else { ++ ++ _fields_not_changed = true; ++ GrowableArray<fieldDescriptor> fds; ++ while (true) { ++ for (JavaFieldStream fs(cur_new_klass); !fs.done(); fs.next()) { ++ fd.initialize(cur_new_klass_oop, fs.index()); ++ if (fd.is_static()) { ++ continue; ++ } ++ fds.append(fd); ++ } ++ ++ if (cur_new_klass->super() != NULL) { ++ cur_new_klass_oop = cur_new_klass->super(); ++ cur_new_klass = instanceKlass::cast(cur_new_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ GrowableArray<fieldDescriptor> sortedFds; ++ while (fds.length() > 0) { ++ int minOffset = 0x7fffffff; ++ int minIndex = -1; ++ for (int i=0; i<fds.length(); i++) { ++ int curOffset = fds.adr_at(i)->offset(); ++ if (curOffset < minOffset) { ++ minOffset = curOffset; ++ minIndex = i; ++ } ++ } ++ ++ sortedFds.append(fds.at(minIndex)); ++ fds.remove_at(minIndex); ++ } ++ ++ ++ for (int i=0; i<sortedFds.length(); i++) { ++ fieldDescriptor &fd = *sortedFds.adr_at(i); ++ ++ char found = 0; ++ instanceKlass *cur_old_klass = old_klass; ++ klassOop cur_old_klass_oop = old_klass_oop; ++ while (true) { ++ for (JavaFieldStream fs(cur_old_klass); !fs.done(); fs.next()) { ++ old_fd.initialize(cur_old_klass_oop, fs.index()); ++ if (old_fd.is_static()) { ++ continue; ++ } ++ if (old_fd.name() == fd.name() && old_fd.signature() == fd.signature()) { ++ found = 1; ++ break; ++ } ++ } ++ if (!found && cur_old_klass->super()) { ++ cur_old_klass_oop = cur_old_klass->super(); ++ cur_old_klass = instanceKlass::cast(cur_old_klass_oop); ++ } else { ++ break; ++ } ++ } ++ ++ if (found) { ++ if (old_fd.offset() != fd.offset()) { ++ _fields_not_changed = false; ++ } ++ cl->do_changed_field(&old_fd, &fd); ++ } else { ++ _fields_not_changed = false; ++ cl->do_new_field(&fd); ++ } ++ } ++ } ++} + + void instanceKlass::do_local_static_fields(FieldClosure* cl) { + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { +@@ -1368,6 +1642,20 @@ + return id; + } + ++bool instanceKlass::update_jmethod_id(methodOop method, jmethodID newMethodID) { ++ size_t idnum = (size_t)method->method_idnum(); ++ jmethodID* jmeths = methods_jmethod_ids_acquire(); ++ size_t length; // length assigned as debugging crumb ++ jmethodID id = NULL; ++ if (jmeths != NULL && // If there is a cache ++ (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, ++ jmeths[idnum+1] = newMethodID; // Set the id (may be NULL) ++ return true; ++ } ++ ++ return false; ++} ++ + + // Cache an itable index + void instanceKlass::set_cached_itable_index(size_t idnum, int index) { +@@ -1527,6 +1815,13 @@ + last = b; + b = b->next(); + } ++ ++ // (tw) Hack as dependencies get wrong version of klassOop ++ if(this->old_version() != NULL) { ++ ((instanceKlass *)this->old_version()->klass_part())->remove_dependent_nmethod(nm); ++ return; ++ } ++ + #ifdef ASSERT + tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); + nm->print(); +@@ -2382,6 +2677,9 @@ + klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); + st->print(BULLET"fake entry for mirror: "); + mirrored_klass->print_value_on(st); ++ if (mirrored_klass != NULL) { ++ st->print_cr("revision: %d (oldest=%d, newest=%d)", mirrored_klass->klass_part()->revision_number(), mirrored_klass->klass_part()->oldest_version()->klass_part()->revision_number(), mirrored_klass->klass_part()->newest_version()->klass_part()->revision_number()); ++ } + st->cr(); + st->print(BULLET"fake entry resolved_constructor: "); + methodOop ctor = java_lang_Class::resolved_constructor(obj); +diff -r 6c6a2299029a src/share/vm/oops/instanceKlass.hpp +--- a/src/share/vm/oops/instanceKlass.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/instanceKlass.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -101,6 +101,22 @@ + virtual void do_field(fieldDescriptor* fd) = 0; + }; + ++// (tw) Iterates over the fields of the old and new class ++class FieldEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_field(fieldDescriptor* fd) = 0; ++ virtual void do_old_field(fieldDescriptor* fd) = 0; ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd) = 0; ++}; ++ ++// (tw) Iterates over the methods of the old and new class ++class MethodEvolutionClosure : public StackObj { ++public: ++ virtual void do_new_method(methodOop oop) = 0; ++ virtual void do_old_method(methodOop oop) = 0; ++ virtual void do_changed_method(methodOop oldOop, methodOop newOop) = 0; ++}; ++ + #ifndef PRODUCT + // Print fields. + // If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields. +@@ -285,6 +301,11 @@ + // _idnum_allocated_count. + u1 _init_state; // state of class + ++ // (tw) Field that allows for a short-path when calculating updated fields for the second time and ++ // no fields changed. Testing performance impact with this, can be removed later when the update ++ // information is cached. ++ bool _fields_not_changed; ++ + u1 _reference_type; // reference type + + // embedded Java vtable follows here +@@ -452,6 +473,7 @@ + // initialization (virtuals from Klass) + bool should_be_initialized() const; // means that initialize should be called + void initialize(TRAPS); ++ void initialize_redefined_class(); + void link_class(TRAPS); + bool link_class_or_fail(TRAPS); // returns false on failure + void unlink_class(); +@@ -629,6 +651,7 @@ + static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum, + size_t *length_p, jmethodID* id_p); + jmethodID jmethod_id_or_null(methodOop method); ++ bool update_jmethod_id(methodOop method, jmethodID newMethodID); + + // cached itable index support + void set_cached_itable_index(size_t idnum, int index); +@@ -711,6 +734,7 @@ + + // subclass/subinterface checks + bool implements_interface(klassOop k) const; ++ bool implements_interface_any_version(klassOop k) const; + + // Access to the implementor of an interface. + klassOop implementor() const +@@ -760,6 +784,12 @@ + void do_local_static_fields(FieldClosure* cl); + void do_nonstatic_fields(FieldClosure* cl); // including inherited fields + void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS); ++ void do_fields_evolution(FieldEvolutionClosure *cl); ++ void store_update_information(GrowableArray<int> &values); ++ void clear_update_information(); ++ void store_type_check_information(GrowableArray< Pair<int, klassOop> > &values); ++ void clear_type_check_information(); ++ + + void methods_do(void f(methodOop method)); + void array_klasses_do(void f(klassOop k)); +diff -r 6c6a2299029a src/share/vm/oops/instanceKlassKlass.cpp +--- a/src/share/vm/oops/instanceKlassKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/instanceKlassKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -480,6 +480,28 @@ + instanceKlass* ik = instanceKlass::cast(klassOop(obj)); + klassKlass::oop_print_on(obj, st); + ++ // (tw) Output revision number and revision numbers of older / newer and oldest / newest version of this class. ++ ++ st->print(BULLET"revision: %d", ik->revision_number()); ++ ++ if (ik->new_version() != NULL) { ++ st->print(" (newer=%d)", ik->new_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->newest_version() != ik->new_version() && ik->newest_version() != obj) { ++ st->print(" (newest=%d)", ik->newest_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->old_version() != NULL) { ++ st->print(" (old=%d)", ik->old_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->oldest_version() != ik->old_version() && ik->oldest_version() != obj) { ++ st->print(" (oldest=%d)", ik->oldest_version()->klass_part()->revision_number()); ++ } ++ ++ st->cr(); ++ + st->print(BULLET"instance size: %d", ik->size_helper()); st->cr(); + st->print(BULLET"klass size: %d", ik->object_size()); st->cr(); + st->print(BULLET"access: "); ik->access_flags().print_on(st); st->cr(); +@@ -663,7 +685,7 @@ + } + guarantee(sib->as_klassOop()->is_klass(), "should be klass"); + guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); +- guarantee(sib->super() == super, "siblings should have same superklass"); ++ guarantee(sib->super() == super || super->klass_part()->newest_version() == SystemDictionary::Object_klass(), "siblings should have same superklass"); + sib = sib->next_sibling(); + } + +diff -r 6c6a2299029a src/share/vm/oops/instanceRefKlass.cpp +--- a/src/share/vm/oops/instanceRefKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/instanceRefKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -455,10 +455,13 @@ + instanceKlass* ik = instanceKlass::cast(k); + + // Check that we have the right class +- debug_only(static bool first_time = true); +- assert(k == SystemDictionary::Reference_klass() && first_time, +- "Invalid update of maps"); +- debug_only(first_time = false); ++ ++ // (tw) Asserts no longer valid for class redefinition ++ // debug_only(static bool first_time = true); ++ ++ //assert(k == SystemDictionary::Reference_klass() && first_time, ++ // "Invalid update of maps"); ++ //debug_only(first_time = false); + assert(ik->nonstatic_oop_map_count() == 1, "just checking"); + + OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); +diff -r 6c6a2299029a src/share/vm/oops/klass.cpp +--- a/src/share/vm/oops/klass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -55,6 +55,26 @@ + return false; + } + ++void Klass::update_supers_to_newest_version() { ++ ++ if (super() != NULL) set_super(super()->klass_part()->newest_version()); ++ ++ for (uint i=0; i<primary_super_limit(); i++) { ++ klassOop cur = _primary_supers[i]; ++ if (cur != NULL) { ++ _primary_supers[i] = cur->klass_part()->newest_version(); ++ } ++ } ++ ++ // Scan the array-of-objects ++ int cnt = secondary_supers()->length(); ++ for (int i = 0; i < cnt; i++) { ++ klassOop cur = (klassOop)secondary_supers()->obj_at(i); ++ if (cur != NULL) { ++ secondary_supers()->obj_at_put(i, cur->klass_part()->newest_version()); ++ } ++ } ++} + bool Klass::search_secondary_supers(klassOop k) const { + // Put some extra logic here out-of-line, before the search proper. + // This cuts down the size of the inline method. +@@ -161,6 +181,16 @@ + kl->set_alloc_size(0); + TRACE_INIT_ID(kl); + ++ kl->set_redefinition_flags(Klass::NoRedefinition); ++ kl->set_redefining(false); ++ kl->set_new_version(NULL); ++ kl->set_old_version(NULL); ++ kl->set_redefinition_index(-1); ++ kl->set_revision_number(-1); ++ kl->set_field_redefinition_policy(DynamicCheck); ++ kl->set_static_field_redefinition_policy(AccessDeletedMembers); ++ kl->set_method_redefinition_policy(AccessDeletedMembers); ++ + kl->set_prototype_header(markOopDesc::prototype()); + kl->set_biased_lock_revocation_count(0); + kl->set_last_biased_lock_bulk_revocation_time(0); +@@ -232,7 +262,7 @@ + set_super(NULL); + oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop()); + assert(super_depth() == 0, "Object must already be initialized properly"); +- } else if (k != super() || k == SystemDictionary::Object_klass()) { ++ } else if (k != super() || k->klass_part()->super() == NULL) { + assert(super() == NULL || super() == SystemDictionary::Object_klass(), + "initialize this only once to a non-trivial value"); + set_super(k); +diff -r 6c6a2299029a src/share/vm/oops/klass.hpp +--- a/src/share/vm/oops/klass.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klass.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -170,6 +170,7 @@ + void* operator new(size_t ignored, KlassHandle& klass, int size, TRAPS); + }; + ++template<class L, class R> class Pair; + + class Klass : public Klass_vtbl { + friend class VMStructs; +@@ -222,6 +223,39 @@ + oop* oop_block_beg() const { return adr_secondary_super_cache(); } + oop* oop_block_end() const { return adr_next_sibling() + 1; } + ++ // (tw) Different class redefinition flags of code evolution. ++ enum RedefinitionFlags { ++ ++ // This class is not redefined at all! ++ NoRedefinition, ++ ++ // There are changes to the class meta data. ++ ModifyClass = 1, ++ ++ // The size of the class meta data changes. ++ ModifyClassSize = ModifyClass << 1, ++ ++ // There are change to the instance format. ++ ModifyInstances = ModifyClassSize << 1, ++ ++ // The size of instances changes. ++ ModifyInstanceSize = ModifyInstances << 1, ++ ++ // A super type of this class is removed. ++ RemoveSuperType = ModifyInstanceSize << 1, ++ ++ // This class (or one of its super classes) has an instance transformer method. ++ HasInstanceTransformer = RemoveSuperType << 1, ++ }; ++ ++ // (tw) Different policies dealing with deleted fields / methods in old code. ++ enum RedefinitionPolicy { ++ StaticCheck, ++ DynamicCheck, ++ AccessDeletedMembers, ++ AccessOldMembers ++ }; ++ + protected: + // + // The oop block. All oop fields must be declared here and only oop fields +@@ -241,6 +275,10 @@ + oop _java_mirror; + // Superclass + klassOop _super; ++ // Old class ++ klassOop _old_version; ++ // New class ++ klassOop _new_version; + // First subclass (NULL if none); _subklass->next_sibling() is next one + klassOop _subklass; + // Sibling link (or NULL); links all subklasses of a klass +@@ -253,6 +291,19 @@ + jint _modifier_flags; // Processed access flags, for use by Class.getModifiers. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ // (tw) Non-oop fields for enhanced class redefinition ++ jint _revision_number; // The revision number for redefined classes ++ jint _redefinition_index; // Index of this class when performing the redefinition ++ bool _subtype_changed; ++ int _redefinition_flags; // Level of class redefinition ++ bool _is_copying_backwards; // Does the class need to copy fields backwards? => possibly overwrite itself? ++ int * _update_information; // Update information ++ Pair<int, klassOop> * _type_check_information; // Offsets of object fields that need a type check ++ char _method_redefinition_policy; ++ char _field_redefinition_policy; ++ char _static_field_redefinition_policy; ++ bool _is_redefining; ++ + #ifndef PRODUCT + int _verify_count; // to avoid redundant verifies + #endif +@@ -301,6 +352,99 @@ + klassOop secondary_super_cache() const { return _secondary_super_cache; } + void set_secondary_super_cache(klassOop k) { oop_store_without_check((oop*) &_secondary_super_cache, (oop) k); } + ++ // BEGIN class redefinition utilities ++ ++ // double links between new and old version of a class ++ klassOop old_version() const { return _old_version; } ++ void set_old_version(klassOop klass) { assert(_old_version == NULL || klass == NULL, "Can only be set once!"); _old_version = klass; } ++ klassOop new_version() const { return _new_version; } ++ void set_new_version(klassOop klass) { assert(_new_version == NULL || klass == NULL, "Can only be set once!"); _new_version = klass; } ++ ++ // A subtype of this class is no longer a subtype ++ bool has_subtype_changed() const { return _subtype_changed; } ++ void set_subtype_changed(bool b) { assert(is_newest_version() || new_version()->klass_part()->is_newest_version(), "must be newest or second newest version"); ++ _subtype_changed = b; } ++ // state of being redefined ++ int redefinition_index() const { return _redefinition_index; } ++ void set_redefinition_index(int index) { _redefinition_index = index; } ++ void set_redefining(bool b) { _is_redefining = b; } ++ bool is_redefining() const { return _is_redefining; } ++ int redefinition_flags() const { return _redefinition_flags; } ++ bool check_redefinition_flag(int flags) const { return (_redefinition_flags & flags) != 0; } ++ void set_redefinition_flags(int flags) { _redefinition_flags = flags; } ++ bool is_copying_backwards() const { return _is_copying_backwards; } ++ void set_copying_backwards(bool b) { _is_copying_backwards = b; } ++ ++ // update information ++ int *update_information() const { return _update_information; } ++ void set_update_information(int *info) { _update_information = info; } ++ Pair<int, klassOop> *type_check_information() const { return _type_check_information; } ++ void set_type_check_information(Pair<int, klassOop> *info) { _type_check_information = info; } ++ ++ bool is_same_or_older_version(klassOop klass) const { ++ if (Klass::cast(klass) == this) { return true; } ++ else if (_old_version == NULL) { return false; } ++ else { return _old_version->klass_part()->is_same_or_older_version(klass); } ++ } ++ ++ // Revision number for redefined classes, -1 for originally loaded classes ++ jint revision_number() const { ++ return _revision_number; ++ } ++ ++ bool was_redefined() const { ++ return _revision_number != -1; ++ } ++ ++ void set_revision_number(jint number) { ++ _revision_number = number; ++ } ++ ++ char method_redefinition_policy() { ++ return _method_redefinition_policy; ++ } ++ ++ void set_method_redefinition_policy(char v) { ++ _method_redefinition_policy = v; ++ } ++ ++ char field_redefinition_policy() { ++ return _field_redefinition_policy; ++ } ++ ++ void set_field_redefinition_policy(char v) { ++ _field_redefinition_policy = v; ++ } ++ ++ char static_field_redefinition_policy() { ++ return _static_field_redefinition_policy; ++ } ++ ++ void set_static_field_redefinition_policy(char v) { ++ _static_field_redefinition_policy = v; ++ } ++ ++ klassOop oldest_version() const { ++ if (_old_version == NULL) { return this->as_klassOop(); } ++ else { return _old_version->klass_part()->oldest_version(); }; ++ } ++ ++ klassOop newest_version() const { ++ if (_new_version == NULL) { return this->as_klassOop(); } ++ else { return _new_version->klass_part()->newest_version(); }; ++ } ++ ++ klassOop active_version() const { ++ if (_new_version == NULL || _new_version->klass_part()->is_redefining()) { return this->as_klassOop(); assert(!this->is_redefining(), "just checking"); } ++ else { return _new_version->klass_part()->active_version(); }; ++ } ++ ++ bool is_newest_version() const { ++ return _new_version == NULL; ++ } ++ ++ // END class redefinition utilities ++ + objArrayOop secondary_supers() const { return _secondary_supers; } + void set_secondary_supers(objArrayOop k) { oop_store_without_check((oop*) &_secondary_supers, (oop) k); } + +@@ -361,6 +505,8 @@ + void set_next_sibling(klassOop s); + + oop* adr_super() const { return (oop*)&_super; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } + oop* adr_primary_supers() const { return (oop*)&_primary_supers[0]; } + oop* adr_secondary_super_cache() const { return (oop*)&_secondary_super_cache; } + oop* adr_secondary_supers()const { return (oop*)&_secondary_supers; } +@@ -490,6 +636,7 @@ + return search_secondary_supers(k); + } + } ++ void update_supers_to_newest_version(); + bool search_secondary_supers(klassOop k) const; + + // Find LCA in class hierarchy +@@ -816,6 +963,8 @@ + + + inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); } ++inline klassOop klassOopDesc::old_version() const { return klass_part()->old_version(); } ++inline klassOop klassOopDesc::new_version() const { return klass_part()->new_version(); } + + + #endif // SHARE_VM_OOPS_KLASS_HPP +diff -r 6c6a2299029a src/share/vm/oops/klassKlass.cpp +--- a/src/share/vm/oops/klassKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klassKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -68,6 +68,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + MarkSweep::mark_and_push(k->adr_super()); ++ MarkSweep::mark_and_push(k->adr_old_version()); ++ MarkSweep::mark_and_push(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::mark_and_push(k->adr_primary_supers()+i); + MarkSweep::mark_and_push(k->adr_secondary_super_cache()); +@@ -87,6 +89,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + PSParallelCompact::mark_and_push(cm, k->adr_super()); ++ PSParallelCompact::mark_and_push(cm, k->adr_old_version()); ++ PSParallelCompact::mark_and_push(cm, k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + PSParallelCompact::mark_and_push(cm, k->adr_primary_supers()+i); + PSParallelCompact::mark_and_push(cm, k->adr_secondary_super_cache()); +@@ -106,6 +110,8 @@ + int size = oop_size(obj); + Klass* k = Klass::cast(klassOop(obj)); + blk->do_oop(k->adr_super()); ++ blk->do_oop(k->adr_old_version()); ++ blk->do_oop(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + blk->do_oop(k->adr_primary_supers()+i); + blk->do_oop(k->adr_secondary_super_cache()); +@@ -134,6 +140,10 @@ + oop* adr; + adr = k->adr_super(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + for (juint i = 0; i < Klass::primary_super_limit(); i++) { + adr = k->adr_primary_supers()+i; + if (mr.contains(adr)) blk->do_oop(adr); +@@ -147,6 +157,8 @@ + // The following are "weak links" in the perm gen and are + // treated specially in a later phase of a perm gen collection. + assert(oop(k)->is_perm(), "should be in perm"); ++ assert(oop(k->adr_old_version())->is_perm(), "should be in perm"); ++ assert(oop(k->adr_new_version())->is_perm(), "should be in perm"); + assert(oop(k->adr_subklass())->is_perm(), "should be in perm"); + assert(oop(k->adr_next_sibling())->is_perm(), "should be in perm"); + if (blk->should_remember_klasses() +@@ -167,6 +179,8 @@ + Klass* k = Klass::cast(klassOop(obj)); + + MarkSweep::adjust_pointer(k->adr_super()); ++ MarkSweep::adjust_pointer(k->adr_new_version()); ++ MarkSweep::adjust_pointer(k->adr_old_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::adjust_pointer(k->adr_primary_supers()+i); + MarkSweep::adjust_pointer(k->adr_secondary_super_cache()); +diff -r 6c6a2299029a src/share/vm/oops/klassOop.hpp +--- a/src/share/vm/oops/klassOop.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klassOop.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -41,8 +41,10 @@ + // returns the Klass part containing dispatching behavior + Klass* klass_part() const { return (Klass*)((address)this + sizeof(klassOopDesc)); } + +- // Convenience wrapper ++ // Convenience wrappers + inline oop java_mirror() const; ++ inline klassOop old_version() const; ++ inline klassOop new_version() const; + + private: + // These have no implementation since klassOop should never be accessed in this fashion +diff -r 6c6a2299029a src/share/vm/oops/klassVtable.cpp +--- a/src/share/vm/oops/klassVtable.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klassVtable.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -97,7 +97,8 @@ + vtable_length = Universe::base_vtable_size(); + } + +- if (super == NULL && !Universe::is_bootstrapping() && ++ // (tw) TODO: Check if we can relax the condition on a fixed base vtable size ++ /*if (super == NULL && !Universe::is_bootstrapping() && + vtable_length != Universe::base_vtable_size()) { + // Someone is attempting to redefine java.lang.Object incorrectly. The + // only way this should happen is from +@@ -107,9 +108,9 @@ + vtable_length = Universe::base_vtable_size(); + } + assert(super != NULL || vtable_length == Universe::base_vtable_size(), +- "bad vtable size for class Object"); ++ "bad vtable size for class Object");*/ + assert(vtable_length % vtableEntry::size() == 0, "bad vtable length"); +- assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); ++ //assert(vtable_length >= Universe::base_vtable_size(), "vtable too small"); + } + + int klassVtable::index_of(methodOop m, int len) const { +@@ -657,20 +658,6 @@ + return true; + } + +-void klassVtable::dump_vtable() { +- tty->print_cr("vtable dump --"); +- for (int i = 0; i < length(); i++) { +- methodOop m = unchecked_method_at(i); +- if (m != NULL) { +- tty->print(" (%5d) ", i); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- } +-} +- + // CDS/RedefineClasses support - clear vtables so they can be reinitialized + void klassVtable::clear_vtable() { + for (int i = 0; i < _length; i++) table()[i].clear(); +@@ -1241,6 +1228,7 @@ + + void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { + vtableEntry* vte = &vt->table()[index]; ++ if (vte->method() == NULL || table()[index].method() == NULL) return; + if (vte->method()->name() != table()[index].method()->name() || + vte->method()->signature() != table()[index].method()->signature()) { + fatal("mismatched name/signature of vtable entries"); +@@ -1260,6 +1248,8 @@ + + void vtableEntry::verify(klassVtable* vt, outputStream* st) { + NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); ++ // (tw) TODO: Check: Does not hold? ++ if (method() != NULL) { + assert(method() != NULL, "must have set method"); + method()->verify(); + // we sub_type, because it could be a miranda method +@@ -1267,7 +1257,13 @@ + #ifndef PRODUCT + print(); + #endif +- fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); ++ klassOop first_klass = vt->klass()(); ++ klassOop second_klass = method()->method_holder(); ++ // (tw) the following fatal does not work for old versions of classes ++ if (first_klass->klass_part()->is_newest_version()) { ++ //fatal1("vtableEntry %#lx: method is from subclass", this); ++ } ++ } + } + } + +@@ -1275,7 +1271,7 @@ + + void vtableEntry::print() { + ResourceMark rm; +- tty->print("vtableEntry %s: ", method()->name()->as_C_string()); ++ tty->print("vtableEntry %s: ", (method() == NULL) ? "null" : method()->name()->as_C_string()); + if (Verbose) { + tty->print("m %#lx ", (address)method()); + } +@@ -1342,6 +1338,33 @@ + tty->print_cr("%6d bytes total", total); + } + ++bool klassVtable::check_no_old_entries() { ++ // Check that there really is no entry ++ for (int i = 0; i < length(); i++) { ++ methodOop m = unchecked_method_at(i); ++ if (m != NULL) { ++ if (m->is_old() || !m->method_holder()->klass_part()->is_newest_version()) { ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ ++void klassVtable::dump_vtable() { ++ tty->print_cr("vtable dump --"); ++ for (int i = 0; i < length(); i++) { ++ methodOop m = unchecked_method_at(i); ++ if (m != NULL) { ++ tty->print(" (%5d) ", i); ++ m->access_flags().print_on(tty); ++ tty->print(" -- "); ++ m->print_name(tty); ++ tty->cr(); ++ } ++ } ++} ++ + int klassItable::_total_classes; // Total no. of classes with itables + long klassItable::_total_size; // Total no. of bytes used for itables + +diff -r 6c6a2299029a src/share/vm/oops/klassVtable.hpp +--- a/src/share/vm/oops/klassVtable.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/klassVtable.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -100,6 +100,7 @@ + int methods_length, bool * trace_name_printed); + bool check_no_old_or_obsolete_entries(); + void dump_vtable(); ++ bool check_no_old_entries(); + + // Garbage collection + void oop_follow_contents(); +diff -r 6c6a2299029a src/share/vm/oops/methodKlass.cpp +--- a/src/share/vm/oops/methodKlass.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/methodKlass.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -93,6 +93,10 @@ + m->set_adapter_entry(NULL); + m->clear_code(); // from_c/from_i get set to c2i/i2i + ++ m->set_forward_method(NULL); ++ m->set_new_version(NULL); ++ m->set_old_version(NULL); ++ + if (access_flags.is_native()) { + m->clear_native_function(); + m->set_signature_handler(NULL); +@@ -122,6 +126,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::mark_and_push(m->adr_constMethod()); ++ MarkSweep::mark_and_push(m->adr_forward_method()); ++ MarkSweep::mark_and_push(m->adr_new_version()); ++ MarkSweep::mark_and_push(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::mark_and_push(m->adr_method_data()); + } +@@ -135,6 +142,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + PSParallelCompact::mark_and_push(cm, m->adr_constMethod()); ++ PSParallelCompact::mark_and_push(cm, m->adr_forward_method()); ++ PSParallelCompact::mark_and_push(cm, m->adr_new_version()); ++ PSParallelCompact::mark_and_push(cm, m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::mark_and_push(cm, m->adr_method_data()); +@@ -152,6 +162,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves + blk->do_oop(m->adr_constMethod()); ++ blk->do_oop(m->adr_forward_method()); ++ blk->do_oop(m->adr_new_version()); ++ blk->do_oop(m->adr_old_version()); + if (m->method_data() != NULL) { + blk->do_oop(m->adr_method_data()); + } +@@ -170,6 +183,12 @@ + oop* adr; + adr = m->adr_constMethod(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_forward_method(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + if (m->method_data() != NULL) { + adr = m->adr_method_data(); + if (mr.contains(adr)) blk->do_oop(adr); +@@ -187,6 +206,9 @@ + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::adjust_pointer(m->adr_constMethod()); ++ MarkSweep::adjust_pointer(m->adr_forward_method()); ++ MarkSweep::adjust_pointer(m->adr_new_version()); ++ MarkSweep::adjust_pointer(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::adjust_pointer(m->adr_method_data()); + } +@@ -202,6 +224,9 @@ + assert(obj->is_method(), "should be method"); + methodOop m = methodOop(obj); + PSParallelCompact::adjust_pointer(m->adr_constMethod()); ++ PSParallelCompact::adjust_pointer(m->adr_forward_method()); ++ PSParallelCompact::adjust_pointer(m->adr_new_version()); ++ PSParallelCompact::adjust_pointer(m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::adjust_pointer(m->adr_method_data()); +@@ -222,7 +247,18 @@ + methodOop m = methodOop(obj); + // get the effect of PrintOopAddress, always, for methods: + st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m); +- st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr(); ++ st->print (" - method holder: "); m->method_holder()->print_value_on(st); ++ ++ if (m->method_holder()->klass_part()->new_version() != NULL) { ++ st->print(" (old)"); ++ } ++ st->cr(); ++ ++ st->print_cr(" - is obsolete: %d", (int)(m->is_obsolete())); ++ st->print_cr(" - is old: %d", (int)(m->is_old())); ++ st->print_cr(" - new version: "INTPTR_FORMAT" ", (address)(m->new_version())); ++ st->print_cr(" - old version: "INTPTR_FORMAT" ", (address)(m->old_version())); ++ st->print_cr(" - holder revision: %d", m->method_holder()->klass_part()->revision_number()); + st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants()); + m->constants()->print_value_on(st); st->cr(); + st->print (" - access: 0x%x ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr(); +diff -r 6c6a2299029a src/share/vm/oops/methodOop.cpp +--- a/src/share/vm/oops/methodOop.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/methodOop.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -328,6 +328,70 @@ + } + + ++bool methodOopDesc::is_in_code_section(int bci) { ++ // There is no table => every bci is in the code section table. ++ if (!constMethod()->has_code_section_table()) return true; ++ ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++int methodOopDesc::calculate_forward_bci(int bci, methodOop new_method) { ++ int original_bci = -1; ++ if (constMethod()->has_code_section_table()) { ++ assert(is_in_code_section(bci), "can only forward in section"); ++ // First calculate back to original bci. ++ constMethodOop m = constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (bci >= new_index && bci < new_index + length) { ++ // We are in a specified code section. ++ original_bci = bci - new_index + original_index; ++ break; ++ } ++ } ++ assert (original_bci != -1, "must have been in code section"); ++ } else { ++ // No code sections specified => we are in an original method. ++ original_bci = bci; ++ } ++ ++ // We know the original bci => match to new method. ++ int new_bci = -1; ++ if (new_method->constMethod()->has_code_section_table()) { ++ // Map to new bci. ++ constMethodOop m = new_method->constMethod(); ++ for (int i = 0; i < m->code_section_entries(); ++i) { ++ u2 new_index = m->code_section_new_index_at(i); ++ u2 original_index = m->code_section_original_index_at(i); ++ u2 length = m->code_section_length_at(i); ++ if (original_bci >= original_index && original_bci < original_index + length) { ++ new_bci = original_bci - original_index + new_index; ++ break; ++ } ++ } ++ assert (new_bci != -1, "must have found new code section"); ++ ++ } else { ++ // We are in an original method. ++ new_bci = original_bci; ++ } ++ ++ return new_bci; ++} ++ ++ + int methodOopDesc::extra_stack_words() { + // not an inline function, to avoid a header dependency on Interpreter + return extra_stack_entries() * Interpreter::stackElementSize; +@@ -1061,6 +1125,9 @@ + + // Reset correct method/const method, method size, and parameter info + newm->set_constMethod(newcm); ++ newm->set_forward_method(newm->forward_method()); ++ newm->set_new_version(newm->new_version()); ++ newm->set_old_version(newm->old_version()); + newm->constMethod()->set_code_size(new_code_length); + newm->constMethod()->set_constMethod_size(new_const_method_size); + newm->set_method_size(new_method_size); +diff -r 6c6a2299029a src/share/vm/oops/methodOop.hpp +--- a/src/share/vm/oops/methodOop.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/methodOop.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -114,6 +114,11 @@ + AccessFlags _access_flags; // Access flags + int _vtable_index; // vtable index of this method (see VtableIndexFlag) + // note: can have vtables with >2**16 elements (because of inheritance) ++ // (tw) Newer version of method available? ++ methodOop _forward_method; ++ methodOop _new_version; ++ methodOop _old_version; ++ + #ifdef CC_INTERP + int _result_index; // C++ interpreter needs for converting results to/from stack + #endif +@@ -175,6 +180,32 @@ + int name_index() const { return constMethod()->name_index(); } + void set_name_index(int index) { constMethod()->set_name_index(index); } + ++ methodOop forward_method() const {return _forward_method; } ++ void set_forward_method(methodOop m) { _forward_method = m; } ++ bool has_forward_method() const { return forward_method() != NULL; } ++ methodOop new_version() const {return _new_version; } ++ void set_new_version(methodOop m) { _new_version = m; } ++ methodOop newest_version() { if(_new_version == NULL) return this; else return new_version()->newest_version(); } ++ ++ methodOop old_version() const {return _old_version; }; ++ void set_old_version(methodOop m) { ++ if (m == NULL) { ++ _old_version = NULL; ++ return; ++ } ++ ++ assert(_old_version == NULL, "may only be set once"); ++ assert(this->code_size() == m->code_size(), "must have same code length"); ++ _old_version = m; ++ } ++ ++ methodOop oldest_version() const { ++ if(_old_version == NULL) return (methodOop)this; ++ else { ++ return old_version()->oldest_version(); ++ } ++ } ++ + // signature + Symbol* signature() const { return constants()->symbol_at(signature_index()); } + int signature_index() const { return constMethod()->signature_index(); } +@@ -670,6 +701,10 @@ + // Inline cache support + void cleanup_inline_caches(); + ++ // (tw) Method forwarding support. ++ bool is_in_code_section(int bci); ++ int calculate_forward_bci(int bci, methodOop new_method); ++ + // Find if klass for method is loaded + bool is_klass_loaded_by_klass_index(int klass_index) const; + bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; +@@ -734,6 +769,9 @@ + + // Garbage collection support + oop* adr_constMethod() const { return (oop*)&_constMethod; } ++ oop* adr_forward_method() const { return (oop*)&_forward_method; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } + oop* adr_method_data() const { return (oop*)&_method_data; } + }; + +diff -r 6c6a2299029a src/share/vm/oops/oop.hpp +--- a/src/share/vm/oops/oop.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/oop.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -95,6 +95,7 @@ + narrowOop* compressed_klass_addr(); + + void set_klass(klassOop k); ++ void set_klass_no_check(klassOop k); + + // For klass field compression + int klass_gap() const; +@@ -135,6 +136,7 @@ + bool is_array() const; + bool is_objArray() const; + bool is_klass() const; ++ bool is_instanceKlass() const; + bool is_thread() const; + bool is_method() const; + bool is_constMethod() const; +diff -r 6c6a2299029a src/share/vm/oops/oop.inline.hpp +--- a/src/share/vm/oops/oop.inline.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/oops/oop.inline.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -123,6 +123,14 @@ + } + } + ++inline void oopDesc::set_klass_no_check(klassOop k) { ++ if (UseCompressedOops) { ++ oop_store_without_check(compressed_klass_addr(), (oop)k); ++ } else { ++ oop_store_without_check(klass_addr(), (oop) k); ++ } ++} ++ + inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); + } +@@ -156,6 +164,7 @@ + inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } + inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } + inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } ++inline bool oopDesc::is_instanceKlass() const { return blueprint()->oop_is_instanceKlass(); } + inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } + inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } + inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } +diff -r 6c6a2299029a src/share/vm/prims/jni.cpp +--- a/src/share/vm/prims/jni.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jni.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -406,7 +406,7 @@ + } + } + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, +- Handle(), &st, true, ++ Handle(), &st, true, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff -r 6c6a2299029a src/share/vm/prims/jvm.cpp +--- a/src/share/vm/prims/jvm.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvm.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -872,7 +872,7 @@ + Handle protection_domain (THREAD, JNIHandles::resolve(pd)); + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, + protection_domain, &st, +- verify != 0, ++ verify != 0, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff -r 6c6a2299029a src/share/vm/prims/jvmtiEnv.cpp +--- a/src/share/vm/prims/jvmtiEnv.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvmtiEnv.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -290,7 +290,10 @@ + class_definitions[index].klass = jcls; + } + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RetransformClasses */ + +@@ -299,9 +302,12 @@ + // class_definitions - pre-checked for NULL + jvmtiError + JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) { +-//TODO: add locking ++ + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RedefineClasses */ + +diff -r 6c6a2299029a src/share/vm/prims/jvmtiExport.cpp +--- a/src/share/vm/prims/jvmtiExport.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvmtiExport.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -2296,7 +2296,7 @@ + // iterate over any code blob descriptors collected and post a + // DYNAMIC_CODE_GENERATED event to the profiler. + JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { +- assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events"); ++ assert(!JavaThread::current()->owns_locks_but_redefine_classes_lock(), "all locks must be released to post deferred events"); + // iterate over any code blob descriptors that we collected + if (_code_blobs != NULL) { + for (int i=0; i<_code_blobs->length(); i++) { +diff -r 6c6a2299029a src/share/vm/prims/jvmtiImpl.cpp +--- a/src/share/vm/prims/jvmtiImpl.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvmtiImpl.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -286,6 +286,8 @@ + void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { + ((methodOopDesc*)_method->*meth_act)(_bci); + ++ // DCEVM: TODO: Check how we can implement this differently here! ++ + // add/remove breakpoint to/from versions of the method that + // are EMCP. Directly or transitively obsolete methods are + // not saved in the PreviousVersionInfo. +diff -r 6c6a2299029a src/share/vm/prims/jvmtiRedefineClasses.cpp +--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -30,498 +30,637 @@ + #include "interpreter/rewriter.hpp" + #include "memory/gcLocker.hpp" + #include "memory/universe.inline.hpp" ++#include "memory/cardTableRS.hpp" ++#include "oops/klassVtable.hpp" + #include "oops/fieldStreams.hpp" +-#include "oops/klassVtable.hpp" + #include "prims/jvmtiImpl.hpp" + #include "prims/jvmtiRedefineClasses.hpp" ++#include "prims/jvmtiClassFileReconstituter.hpp" + #include "prims/methodComparator.hpp" + #include "runtime/deoptimization.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "compiler/compileBroker.hpp" + + + objArrayOop VM_RedefineClasses::_old_methods = NULL; + objArrayOop VM_RedefineClasses::_new_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_old_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_new_methods = NULL; +-methodOop* VM_RedefineClasses::_deleted_methods = NULL; +-methodOop* VM_RedefineClasses::_added_methods = NULL; ++int* VM_RedefineClasses::_matching_old_methods = NULL; ++int* VM_RedefineClasses::_matching_new_methods = NULL; ++int* VM_RedefineClasses::_deleted_methods = NULL; ++int* VM_RedefineClasses::_added_methods = NULL; + int VM_RedefineClasses::_matching_methods_length = 0; + int VM_RedefineClasses::_deleted_methods_length = 0; + int VM_RedefineClasses::_added_methods_length = 0; + klassOop VM_RedefineClasses::_the_class_oop = NULL; + +- +-VM_RedefineClasses::VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind) { ++// Holds the revision number of the current class redefinition ++int VM_RedefineClasses::_revision_number = -1; ++ ++VM_RedefineClasses::VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind) ++ : VM_GC_Operation(Universe::heap()->total_full_collections(), GCCause::_jvmti_force_gc) { ++ RC_TIMER_START(_timer_total); + _class_count = class_count; + _class_defs = class_defs; + _class_load_kind = class_load_kind; +- _res = JVMTI_ERROR_NONE; ++ _updated_oops = NULL; ++ _result = JVMTI_ERROR_NONE; + } + ++VM_RedefineClasses::~VM_RedefineClasses() { ++ { ++ MonitorLockerEx ml(RedefinitionSync_lock); ++ Threads::set_wait_at_instrumentation_entry(false); ++ ml.notify_all(); ++ } ++ ++ unlock_threads(); ++ RC_TIMER_STOP(_timer_total); ++ ++ if (TimeRedefineClasses) { ++ tty->print_cr("Timing Prologue: %d", _timer_prologue.milliseconds()); ++ tty->print_cr("Timing Class Loading: %d", _timer_class_loading.milliseconds()); ++ tty->print_cr("Timing Waiting for Lock: %d", _timer_wait_for_locks.milliseconds()); ++ tty->print_cr("Timing Class Linking: %d", _timer_class_linking.milliseconds()); ++ tty->print_cr("Timing Check Type: %d", _timer_check_type.milliseconds()); ++ tty->print_cr("Timing Prepare Redefinition: %d", _timer_prepare_redefinition.milliseconds()); ++ tty->print_cr("Timing Redefinition GC: %d", _timer_redefinition.milliseconds()); ++ tty->print_cr("Timing Epilogue: %d", _timer_vm_op_epilogue.milliseconds()); ++ tty->print_cr("------------------------------------------------------------------"); ++ tty->print_cr("Total Time: %d", _timer_total.milliseconds()); ++ } ++} ++ ++// Searches for all affected classes and performs a sorting such that a supertype is always before a subtype. ++jvmtiError VM_RedefineClasses::find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses) { ++ ++ // Create array with all classes for which the redefine command was given ++ GrowableArray<instanceKlassHandle> klasses_to_redefine; ++ for (int i=0; i<_class_count; i++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); ++ instanceKlassHandle klass_handle(Thread::current(), java_lang_Class::as_klassOop(mirror)); ++ klasses_to_redefine.append(klass_handle); ++ assert(klass_handle->new_version() == NULL, "Must be new class"); ++ } ++ ++ // Find classes not directly redefined, but affected by a redefinition (because one of its supertypes is redefined) ++ GrowableArray<instanceKlassHandle> affected_classes; ++ FindAffectedKlassesClosure closure(&klasses_to_redefine, &affected_classes); ++ ++ // Trace affected classes ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Klasses affected: %d", ++ affected_classes.length())); ++ for (int i=0; i<affected_classes.length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ affected_classes.at(i)->name()->as_C_string())); ++ } ++ } ++ ++ // Add the array of affected classes and the array of redefined classes to get a list of all classes that need a redefinition ++ all_affected_klasses->appendAll(&klasses_to_redefine); ++ all_affected_klasses->appendAll(&affected_classes); ++ ++ // Sort the affected klasses such that a supertype is always on a smaller array index than its subtype. ++ jvmtiError result = do_topological_class_sorting(_class_defs, _class_count, &affected_classes, all_affected_klasses, Thread::current()); ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ RC_TRACE(0x00000001, ("Redefine order: ")); ++ for (int i=0; i<all_affected_klasses->length(); i++) { ++ RC_TRACE(0x00000001, ("%s", ++ all_affected_klasses->at(i)->name()->as_C_string())); ++ } ++ } ++ ++ return result; ++} ++ ++// Searches for the class bytes of the given class and returns them as a byte array. ++jvmtiError VM_RedefineClasses::find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed) { ++ ++ *not_changed = false; ++ ++ // Search for the index in the redefinition array that corresponds to the current class ++ int j; ++ for (j=0; j<_class_count; j++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[j].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ if (the_class_oop == the_class()) { ++ break; ++ } ++ } ++ ++ if (j == _class_count) { ++ ++ *not_changed = true; ++ ++ // Redefine with same bytecodes. This is a class that is only indirectly affected by redefinition, ++ // so the user did not specify a different bytecode for that class. ++ ++ if (the_class->get_cached_class_file_bytes() == NULL) { ++ // not cached, we need to reconstitute the class file from VM representation ++ constantPoolHandle constants(Thread::current(), the_class->constants()); ++ ObjectLocker ol(constants, Thread::current()); // lock constant pool while we query it ++ ++ JvmtiClassFileReconstituter reconstituter(the_class); ++ if (reconstituter.get_error() != JVMTI_ERROR_NONE) { ++ return reconstituter.get_error(); ++ } ++ ++ *class_byte_count = (jint)reconstituter.class_file_size(); ++ *class_bytes = (unsigned char*)reconstituter.class_file_bytes(); ++ ++ } else { ++ ++ // it is cached, get it from the cache ++ *class_byte_count = the_class->get_cached_class_file_len(); ++ *class_bytes = the_class->get_cached_class_file_bytes(); ++ } ++ ++ } else { ++ ++ // Redefine with bytecodes at index j ++ *class_bytes = _class_defs[j].class_bytes; ++ *class_byte_count = _class_defs[j].class_byte_count; ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++// Prologue of the VM operation, called on the Java thread in parallel to normal program execution + bool VM_RedefineClasses::doit_prologue() { +- if (_class_count == 0) { +- _res = JVMTI_ERROR_NONE; ++ ++ _revision_number++; ++ RC_TRACE(0x00000001, ("Redefinition with revision number %d started!", _revision_number)); ++ ++ assert(Thread::current()->is_Java_thread(), "must be Java thread"); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (!check_arguments()) { ++ RC_TIMER_STOP(_timer_prologue); + return false; + } +- if (_class_defs == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; ++ ++ // We first load new class versions in the prologue, because somewhere down the ++ // call chain it is required that the current thread is a Java thread. ++ _new_classes = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(5, true); ++ _result = load_new_class_versions(Thread::current()); ++ ++ RC_TRACE(0x00000001, ("Loaded new class versions!")); ++ if (_result != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("error occured: %d!", _result)); ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TIMER_STOP(_timer_prologue); + return false; + } ++ ++ RC_TRACE(0x00000001, ("nearly finished")); ++ VM_GC_Operation::doit_prologue(); ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TRACE(0x00000001, ("doit_prologue finished!")); ++ return true; ++} ++ ++// Checks basic properties of the arguments of the redefinition command. ++bool VM_RedefineClasses::check_arguments() { ++ ++ if (_class_count == 0) RC_ABORT(JVMTI_ERROR_NONE); ++ if (_class_defs == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); + for (int i = 0; i < _class_count; i++) { +- if (_class_defs[i].klass == NULL) { +- _res = JVMTI_ERROR_INVALID_CLASS; +- return false; ++ if (_class_defs[i].klass == NULL) RC_ABORT(JVMTI_ERROR_INVALID_CLASS); ++ if (_class_defs[i].class_byte_count == 0) RC_ABORT(JVMTI_ERROR_INVALID_CLASS_FORMAT); ++ if (_class_defs[i].class_bytes == NULL) RC_ABORT(JVMTI_ERROR_NULL_POINTER); ++ } ++ ++ return true; ++} ++ ++jvmtiError VM_RedefineClasses::check_exception() const { ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000001, ("parse_stream exception: '%s'", ++ ex_name->as_C_string())); ++ if (TraceRedefineClasses >= 1) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->print_cr(""); + } +- if (_class_defs[i].class_byte_count == 0) { +- _res = JVMTI_ERROR_INVALID_CLASS_FORMAT; +- return false; +- } +- if (_class_defs[i].class_bytes == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++ CLEAR_PENDING_EXCEPTION; ++ ++ if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { ++ return JVMTI_ERROR_UNSUPPORTED_VERSION; ++ } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { ++ return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { ++ // The message will be "XXX (wrong name: YYY)" ++ return JVMTI_ERROR_NAMES_DONT_MATCH; ++ } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { ++ return JVMTI_ERROR_OUT_OF_MEMORY; ++ } else { ++ // Just in case more exceptions can be thrown.. ++ return JVMTI_ERROR_FAILS_VERIFICATION; + } + } + +- // Start timer after all the sanity checks; not quite accurate, but +- // better than adding a bunch of stop() calls. +- RC_TIMER_START(_timer_vm_op_prologue); +- +- // We first load new class versions in the prologue, because somewhere down the +- // call chain it is required that the current thread is a Java thread. +- _res = load_new_class_versions(Thread::current()); +- if (_res != JVMTI_ERROR_NONE) { +- // Free os::malloc allocated memory in load_new_class_version. +- os::free(_scratch_classes); +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return false; ++ return JVMTI_ERROR_NONE; ++} ++ ++// Loads all new class versions and stores the instanceKlass handles in an array. ++jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { ++ ++ ResourceMark rm(THREAD); ++ ++ RC_TRACE(0x00000001, ("===================================================================")); ++ RC_TRACE(0x00000001, ("load new class versions (%d)", ++ _class_count)); ++ ++ // Retrieve an array of all classes that need to be redefined ++ GrowableArray<instanceKlassHandle> all_affected_klasses; ++ jvmtiError err = find_sorted_affected_classes(&all_affected_klasses); ++ if (err != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding sorted affected classes: %d", ++ (int)err)); ++ return err; + } + +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return true; +-} +- +-void VM_RedefineClasses::doit() { +- Thread *thread = Thread::current(); +- +- if (UseSharedSpaces) { +- // Sharing is enabled so we remap the shared readonly space to +- // shared readwrite, private just in case we need to redefine +- // a shared class. We do the remap during the doit() phase of +- // the safepoint to be safer. +- if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { +- RC_TRACE_WITH_THREAD(0x00000001, thread, +- ("failed to remap shared readonly space to readwrite, private")); +- _res = JVMTI_ERROR_INTERNAL; +- return; ++ ++ JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); ++ ++ _max_redefinition_flags = Klass::NoRedefinition; ++ jvmtiError result = JVMTI_ERROR_NONE; ++ ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ RC_TRACE(0x00000002, ("Processing affected class %d of %d", ++ i+1, all_affected_klasses.length())); ++ ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ RC_TRACE(0x00000002, ("name=%s", ++ the_class->name()->as_C_string())); ++ ++ the_class->link_class(THREAD); ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ // Find new class bytes ++ const unsigned char* class_bytes; ++ jint class_byte_count; ++ jvmtiError error; ++ jboolean not_changed; ++ if ((error = find_class_bytes(the_class, &class_bytes, &class_byte_count, ¬_changed)) != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error finding class bytes: %d", ++ (int)error)); ++ result = error; ++ break; ++ } ++ assert(class_bytes != NULL && class_byte_count != 0, "Class bytes defined at this point!"); ++ ++ ++ // Set redefined class handle in JvmtiThreadState class. ++ // This redefined class is sent to agent event handler for class file ++ // load hook event. ++ state->set_class_being_redefined(&the_class, _class_load_kind); ++ ++ RC_TRACE(0x00000002, ("Before resolving from stream")); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_loading); ++ ++ ++ // Parse the stream. ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ Symbol* the_class_sym = the_class->name(); ++ ClassFileStream st((u1*) class_bytes, class_byte_count, (char *)"__VM_RedefineClasses__"); ++ instanceKlassHandle new_class(THREAD, SystemDictionary::resolve_from_stream(the_class_sym, ++ the_class_loader, ++ protection_domain, ++ &st, ++ true, ++ the_class, ++ THREAD)); ++ ++ not_changed = false; ++ ++ RC_TIMER_STOP(_timer_class_loading); ++ RC_TIMER_START(_timer_prologue); ++ ++ RC_TRACE(0x00000002, ("After resolving class from stream!")); ++ // Clear class_being_redefined just to be sure. ++ state->clear_class_being_redefined(); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++#ifdef ASSERT ++ ++ assert(new_class() != NULL, "Class could not be loaded!"); ++ assert(new_class() != the_class(), "must be different"); ++ assert(new_class->new_version() == NULL && new_class->old_version() != NULL, ""); ++ ++ ++ objArrayOop k_interfaces = new_class->local_interfaces(); ++ for (int j=0; j<k_interfaces->length(); j++) { ++ assert(((klassOop)k_interfaces->obj_at(j))->klass_part()->is_newest_version(), "just checking"); ++ } ++ ++ if (!THREAD->is_Compiler_thread()) { ++ ++ RC_TRACE(0x00000002, ("name=%s loader="INTPTR_FORMAT" protection_domain="INTPTR_FORMAT" ", ++ the_class->name()->as_C_string(), ++ (address)(the_class->class_loader()), ++ (address)(the_class->protection_domain()))); ++ // If we are on the compiler thread, we must not try to resolve a class. ++ klassOop systemLookup = SystemDictionary::resolve_or_null(the_class->name(), the_class->class_loader(), the_class->protection_domain(), THREAD); ++ ++ if (systemLookup != NULL) { ++ assert(systemLookup == new_class->old_version(), "Old class must be in system dictionary!"); ++ ++ ++ Klass *subklass = new_class()->klass_part()->subklass(); ++ while (subklass != NULL) { ++ assert(subklass->new_version() == NULL, "Most recent version of class!"); ++ subklass = subklass->next_sibling(); ++ } ++ } else { ++ // This can happen for reflection generated classes.. ? ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ ++#endif ++ ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (new_class->layout_helper() != the_class->layout_helper()) { ++ RC_TRACE(0x00000001, ("Instance size change for class %s: new=%d old=%d", ++ new_class->name()->as_C_string(), ++ new_class->layout_helper(), ++ the_class->layout_helper())); ++ } ++ } ++ ++ // Set the new version of the class ++ new_class->set_revision_number(_revision_number); ++ new_class->set_redefinition_index(i); ++ the_class->set_new_version(new_class()); ++ _new_classes->append(new_class); ++ ++ assert(new_class->new_version() == NULL, ""); ++ ++ int redefinition_flags = Klass::NoRedefinition; ++ ++ if (not_changed) { ++ redefinition_flags = Klass::NoRedefinition; ++ } else if (AllowAdvancedClassRedefinition) { ++ redefinition_flags = calculate_redefinition_flags(new_class); ++ } else { ++ jvmtiError allowed = check_redefinition_allowed(new_class); ++ if (allowed != JVMTI_ERROR_NONE) { ++ RC_TRACE(0x00000001, ("Error redefinition not allowed!")); ++ result = allowed; ++ break; ++ } ++ redefinition_flags = Klass::ModifyClass; ++ } ++ ++ if (new_class->super() != NULL) { ++ redefinition_flags = redefinition_flags | new_class->super()->klass_part()->redefinition_flags(); ++ } ++ ++ for (int j=0; j<new_class->local_interfaces()->length(); j++) { ++ redefinition_flags = redefinition_flags | ((klassOop)new_class->local_interfaces()->obj_at(j))->klass_part()->redefinition_flags(); ++ } ++ ++ new_class->set_redefinition_flags(redefinition_flags); ++ ++ _max_redefinition_flags = _max_redefinition_flags | redefinition_flags; ++ ++ if ((redefinition_flags & Klass::ModifyInstances) != 0) { ++ // TODO: Check if watch access flags of static fields are updated correctly. ++ calculate_instance_update_information(_new_classes->at(i)()); ++ } else { ++ assert(new_class->layout_helper() >> 1 == new_class->old_version()->klass_part()->layout_helper() >> 1, "must be equal"); ++ assert(new_class->fields()->length() == ((instanceKlass*)new_class->old_version()->klass_part())->fields()->length(), "must be equal"); ++ ++ fieldDescriptor fd_new; ++ fieldDescriptor fd_old; ++ for (JavaFieldStream fs(new_class); !fs.done(); fs.next()) { ++ fd_new.initialize(new_class(), fs.index()); ++ fd_old.initialize(new_class->old_version(), fs.index()); ++ transfer_special_access_flags(&fd_old, &fd_new); ++ } ++ } ++ ++ if (RC_TRACE_ENABLED(0x00000008)) { ++ if (new_class->super() != NULL) { ++ RC_TRACE(0x00000008, ("Super class is %s", ++ new_class->super()->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++#ifdef ASSERT ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->new_version() == NULL, "Super klass must be newest version!"); ++ ++ the_class->vtable()->verify(tty); ++ new_class->vtable()->verify(tty); ++#endif ++ ++ RC_TRACE(0x00000002, ("Verification done!")); ++ ++ if (i == all_affected_klasses.length() - 1) { ++ ++ // This was the last class processed => check if additional classes have been loaded in the meantime ++ ++ RC_TIMER_STOP(_timer_prologue); ++ lock_threads(); ++ RC_TIMER_START(_timer_prologue); ++ ++ for (int j=0; j<all_affected_klasses.length(); j++) { ++ ++ klassOop initial_klass = all_affected_klasses.at(j)(); ++ Klass *initial_subklass = initial_klass->klass_part()->subklass(); ++ Klass *cur_klass = initial_subklass; ++ while(cur_klass != NULL) { ++ ++ if(cur_klass->oop_is_instance() && cur_klass->is_newest_version()) { ++ instanceKlassHandle handle(THREAD, cur_klass->as_klassOop()); ++ if (!all_affected_klasses.contains(handle)) { ++ ++ int k = i + 1; ++ for (; k<all_affected_klasses.length(); k++) { ++ if (all_affected_klasses.at(k)->is_subtype_of(cur_klass->as_klassOop())) { ++ break; ++ } ++ } ++ all_affected_klasses.insert_before(k, handle); ++ RC_TRACE(0x00000002, ("Adding newly loaded class to affected classes: %s", ++ cur_klass->name()->as_C_string())); ++ } ++ } ++ ++ cur_klass = cur_klass->next_sibling(); ++ } ++ } ++ ++ int new_count = all_affected_klasses.length() - 1 - i; ++ if (new_count != 0) { ++ ++ unlock_threads(); ++ RC_TRACE(0x00000001, ("Found new number of affected classes: %d", ++ new_count)); ++ } + } + } + +- for (int i = 0; i < _class_count; i++) { +- redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread); ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; + } +- // Disable any dependent concurrent compilations +- SystemDictionary::notice_modification(); +- +- // Set flag indicating that some invariants are no longer true. +- // See jvmtiExport.hpp for detailed explanation. +- JvmtiExport::set_has_redefined_a_class(); +- +-// check_class() is optionally called for product bits, but is +-// always called for non-product bits. +-#ifdef PRODUCT +- if (RC_TRACE_ENABLED(0x00004000)) { ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_linking); ++ // Link and verify new classes _after_ all classes have been updated in the system dictionary! ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ instanceKlassHandle new_class(the_class->new_version()); ++ ++ RC_TRACE(0x00000002, ("Linking class %d/%d %s", ++ i, ++ all_affected_klasses.length(), ++ the_class->name()->as_C_string())); ++ new_class->link_class(THREAD); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ } ++ RC_TIMER_STOP(_timer_class_linking); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } ++ ++ RC_TRACE(0x00000002, ("All classes loaded!")); ++ ++#ifdef ASSERT ++ for (int i=0; i<all_affected_klasses.length(); i++) { ++ instanceKlassHandle the_class = all_affected_klasses.at(i); ++ assert(the_class->new_version() != NULL, "Must have been redefined"); ++ instanceKlassHandle new_version = instanceKlassHandle(THREAD, the_class->new_version()); ++ assert(new_version->new_version() == NULL, "Must be newest version"); ++ ++ if (!(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL)) { ++ new_version()->print(); ++ new_version->super()->print(); ++ } ++ assert(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL, "Super class must be newest version"); ++ } ++ ++ SystemDictionary::classes_do(check_class, THREAD); ++ + #endif +- RC_TRACE_WITH_THREAD(0x00004000, thread, ("calling check_class")); +- SystemDictionary::classes_do(check_class, thread); +-#ifdef PRODUCT ++ ++ RC_TRACE(0x00000001, ("Finished verification!")); ++ return JVMTI_ERROR_NONE; ++} ++ ++void VM_RedefineClasses::lock_threads() { ++ ++ RC_TIMER_START(_timer_wait_for_locks); ++ ++ ++ JavaThread *javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->set_should_bailout(true); ++ } ++ javaThread = javaThread->next(); + } +-#endif ++ ++ int cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->compilation_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ RC_TRACE(0x00000002, ("Locked %d compiler threads", cnt)); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ javaThread->redefine_classes_mutex()->lock(); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ ++ RC_TRACE(0x00000002, ("Locked %d threads", cnt)); ++ ++ RC_TIMER_STOP(_timer_wait_for_locks); + } + +-void VM_RedefineClasses::doit_epilogue() { +- // Free os::malloc allocated memory. +- // The memory allocated in redefine will be free'ed in next VM operation. +- os::free(_scratch_classes); +- +- if (RC_TRACE_ENABLED(0x00000004)) { +- // Used to have separate timers for "doit" and "all", but the timer +- // overhead skewed the measurements. +- jlong doit_time = _timer_rsc_phase1.milliseconds() + +- _timer_rsc_phase2.milliseconds(); +- jlong all_time = _timer_vm_op_prologue.milliseconds() + doit_time; +- +- RC_TRACE(0x00000004, ("vm_op: all=" UINT64_FORMAT +- " prologue=" UINT64_FORMAT " doit=" UINT64_FORMAT, all_time, +- _timer_vm_op_prologue.milliseconds(), doit_time)); +- RC_TRACE(0x00000004, +- ("redefine_single_class: phase1=" UINT64_FORMAT " phase2=" UINT64_FORMAT, +- _timer_rsc_phase1.milliseconds(), _timer_rsc_phase2.milliseconds())); ++void VM_RedefineClasses::unlock_threads() { ++ ++ int cnt = 0; ++ JavaThread *javaThread = Threads::first(); ++ Thread *thread = Thread::current(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ if (compilerThread->compilation_mutex()->owned_by_self()) { ++ compilerThread->compilation_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); + } ++ ++ RC_TRACE(0x00000002, ("Unlocked %d compiler threads", cnt)); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ if (javaThread->redefine_classes_mutex()->owned_by_self()) { ++ javaThread->redefine_classes_mutex()->unlock(); ++ } ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ RC_TRACE(0x00000002, ("Unlocked %d threads", cnt)); + } + +-bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { +- // classes for primitives cannot be redefined +- if (java_lang_Class::is_primitive(klass_mirror)) { +- return false; +- } +- klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); +- // classes for arrays cannot be redefined +- if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { +- return false; +- } +- return true; +-} +- +-// Append the current entry at scratch_i in scratch_cp to *merge_cp_p +-// where the end of *merge_cp_p is specified by *merge_cp_length_p. For +-// direct CP entries, there is just the current entry to append. For +-// indirect and double-indirect CP entries, there are zero or more +-// referenced CP entries along with the current entry to append. +-// Indirect and double-indirect CP entries are handled by recursive +-// calls to append_entry() as needed. The referenced CP entries are +-// always appended to *merge_cp_p before the referee CP entry. These +-// referenced CP entries may already exist in *merge_cp_p in which case +-// there is nothing extra to append and only the current entry is +-// appended. +-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, +- int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, +- TRAPS) { +- +- // append is different depending on entry tag type +- switch (scratch_cp->tag_at(scratch_i).value()) { +- +- // The old verifier is implemented outside the VM. It loads classes, +- // but does not resolve constant pool entries directly so we never +- // see Class entries here with the old verifier. Similarly the old +- // verifier does not like Class entries in the input constant pool. +- // The split-verifier is implemented in the VM so it can optionally +- // and directly resolve constant pool entries to load classes. The +- // split-verifier can accept either Class entries or UnresolvedClass +- // entries in the input constant pool. We revert the appended copy +- // back to UnresolvedClass so that either verifier will be happy +- // with the constant pool entry. +- case JVM_CONSTANT_Class: +- { +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- (*merge_cp_p)->unresolved_klass_at_put(*merge_cp_length_p, +- scratch_cp->klass_name_at(scratch_i)); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // these are direct CP entries so they can be directly appended, +- // but double and long take two constant pool entries +- case JVM_CONSTANT_Double: // fall through +- case JVM_CONSTANT_Long: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p) += 2; +- } break; +- +- // these are direct CP entries so they can be directly appended +- case JVM_CONSTANT_Float: // fall through +- case JVM_CONSTANT_Integer: // fall through +- case JVM_CONSTANT_Utf8: // fall through +- +- // This was an indirect CP entry, but it has been changed into +- // an interned string so this entry can be directly appended. +- case JVM_CONSTANT_String: // fall through +- +- // These were indirect CP entries, but they have been changed into +- // Symbol*s so these entries can be directly appended. +- case JVM_CONSTANT_UnresolvedClass: // fall through +- case JVM_CONSTANT_UnresolvedString: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // this is an indirect CP entry so it needs special handling +- case JVM_CONSTANT_NameAndType: +- { +- int name_ref_i = scratch_cp->name_ref_index_at(scratch_i); +- int new_name_ref_i = 0; +- bool match = (name_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_ref_i, *merge_cp_p, name_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_ref_i = found_i; +- map_index(scratch_cp, name_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_name_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- int signature_ref_i = scratch_cp->signature_ref_index_at(scratch_i); +- int new_signature_ref_i = 0; +- match = (signature_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(signature_ref_i, *merge_cp_p, +- signature_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(signature_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != signature_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_signature_ref_i = found_i; +- map_index(scratch_cp, signature_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, signature_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_signature_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_name_ref_i and new_signature_ref_i will both be 0. +- // In that case, all we are appending is the current entry. +- if (new_name_ref_i == 0) { +- new_name_ref_i = name_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d name_ref_index change: %d to %d", +- *merge_cp_length_p, name_ref_i, new_name_ref_i)); +- } +- if (new_signature_ref_i == 0) { +- new_signature_ref_i = signature_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d signature_ref_index change: %d to %d", +- *merge_cp_length_p, signature_ref_i, new_signature_ref_i)); +- } +- +- (*merge_cp_p)->name_and_type_at_put(*merge_cp_length_p, +- new_name_ref_i, new_signature_ref_i); +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // this is a double-indirect CP entry so it needs special handling +- case JVM_CONSTANT_Fieldref: // fall through +- case JVM_CONSTANT_InterfaceMethodref: // fall through +- case JVM_CONSTANT_Methodref: +- { +- int klass_ref_i = scratch_cp->uncached_klass_ref_index_at(scratch_i); +- int new_klass_ref_i = 0; +- bool match = (klass_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(klass_ref_i, *merge_cp_p, klass_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(klass_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != klass_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_klass_ref_i = found_i; +- map_index(scratch_cp, klass_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, klass_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. Without the optimization where we +- // use JVM_CONSTANT_UnresolvedClass, then up to two entries +- // could be appended. +- new_klass_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- int name_and_type_ref_i = +- scratch_cp->uncached_name_and_type_ref_index_at(scratch_i); +- int new_name_and_type_ref_i = 0; +- match = (name_and_type_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_and_type_ref_i, *merge_cp_p, +- name_and_type_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_and_type_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_and_type_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_and_type_ref_i = found_i; +- map_index(scratch_cp, name_and_type_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_and_type_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can append more than +- // one entry so the post call query of *merge_cp_length_p +- // is required in order to get the right index for the +- // JVM_CONSTANT_NameAndType entry. +- new_name_and_type_ref_i = *merge_cp_length_p - 1; +- } +- } +- +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_klass_ref_i and new_name_and_type_ref_i will both be +- // 0. In that case, all we are appending is the current entry. +- if (new_klass_ref_i == 0) { +- new_klass_ref_i = klass_ref_i; +- } +- if (new_name_and_type_ref_i == 0) { +- new_name_and_type_ref_i = name_and_type_ref_i; +- } +- +- const char *entry_name; +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Fieldref: +- entry_name = "Fieldref"; +- (*merge_cp_p)->field_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- entry_name = "IFMethodref"; +- (*merge_cp_p)->interface_method_at_put(*merge_cp_length_p, +- new_klass_ref_i, new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_Methodref: +- entry_name = "Methodref"; +- (*merge_cp_p)->method_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- default: +- guarantee(false, "bad switch"); +- break; +- } +- +- if (klass_ref_i != new_klass_ref_i) { +- RC_TRACE(0x00080000, ("%s entry@%d class_index changed: %d to %d", +- entry_name, *merge_cp_length_p, klass_ref_i, new_klass_ref_i)); +- } +- if (name_and_type_ref_i != new_name_and_type_ref_i) { +- RC_TRACE(0x00080000, +- ("%s entry@%d name_and_type_index changed: %d to %d", +- entry_name, *merge_cp_length_p, name_and_type_ref_i, +- new_name_and_type_ref_i)); +- } +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; +- +- // At this stage, Class or UnresolvedClass could be here, but not +- // ClassIndex +- case JVM_CONSTANT_ClassIndex: // fall through +- +- // Invalid is used as the tag for the second constant pool entry +- // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should +- // not be seen by itself. +- case JVM_CONSTANT_Invalid: // fall through +- +- // At this stage, String or UnresolvedString could be here, but not +- // StringIndex +- case JVM_CONSTANT_StringIndex: // fall through +- +- // At this stage JVM_CONSTANT_UnresolvedClassInError should not be +- // here +- case JVM_CONSTANT_UnresolvedClassInError: // fall through +- +- default: +- { +- // leave a breadcrumb +- jbyte bad_value = scratch_cp->tag_at(scratch_i).value(); +- ShouldNotReachHere(); +- } break; +- } // end switch tag value +-} // end append_entry() +- +- +-void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { +- typeArrayOop save; +- +- save = scratch_class->get_method_annotations_of(i); +- scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); +- scratch_class->set_method_annotations_of(j, save); +- +- save = scratch_class->get_method_parameter_annotations_of(i); +- scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); +- scratch_class->set_method_parameter_annotations_of(j, save); +- +- save = scratch_class->get_method_default_annotations_of(i); +- scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); +- scratch_class->set_method_default_annotations_of(j, save); +-} +- +- +-jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( +- instanceKlassHandle the_class, +- instanceKlassHandle scratch_class) { ++jvmtiError VM_RedefineClasses::check_redefinition_allowed(instanceKlassHandle scratch_class) { ++ ++ ++ ++ // Compatibility mode => check for unsupported modification ++ ++ ++ assert(scratch_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(scratch_class->old_version()); ++ + int i; + + // Check superclasses, or rather their names, since superclasses themselves can be + // requested to replace. + // Check for NULL superclass first since this might be java.lang.Object + if (the_class->super() != scratch_class->super() && +- (the_class->super() == NULL || scratch_class->super() == NULL || +- Klass::cast(the_class->super())->name() != +- Klass::cast(scratch_class->super())->name())) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ (the_class->super() == NULL || scratch_class->super() == NULL || ++ Klass::cast(the_class->super())->name() != ++ Klass::cast(scratch_class->super())->name())) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + + // Check if the number, names and order of directly implemented interfaces are the same. +@@ -539,8 +678,8 @@ + } + for (i = 0; i < n_intfs; i++) { + if (Klass::cast((klassOop) k_interfaces->obj_at(i))->name() != +- Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + } + +@@ -689,12 +828,8 @@ + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(old_num); +- swap_all_method_annotations(old_num, new_num, scratch_class); + } + } +- RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni, +- k_old_method->name_and_sig_as_C_string(), oi)); + // advance to next pair of methods + ++oi; + ++ni; +@@ -703,11 +838,11 @@ + // method added, see if it is OK + new_flags = (jushort) k_new_method->access_flags().get_flags(); + if ((new_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // new methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; + } + { + u2 num = the_class->next_method_idnum(); +@@ -722,24 +857,19 @@ + idnum_owner->set_method_idnum(new_num); + } + k_new_method->set_method_idnum(num); +- swap_all_method_annotations(new_num, num, scratch_class); + } +- RC_TRACE(0x00008000, ("Method added: new: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni)); + ++ni; // advance to next new method + break; + case deleted: + // method deleted, see if it is OK + old_flags = (jushort) k_old_method->access_flags().get_flags(); + if ((old_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // deleted methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; + } +- RC_TRACE(0x00008000, ("Method deleted: old: %s [%d]", +- k_old_method->name_and_sig_as_C_string(), oi)); + ++oi; // advance to next old method + break; + default: +@@ -750,2067 +880,2266 @@ + return JVMTI_ERROR_NONE; + } + +- +-// Find new constant pool index value for old constant pool index value +-// by seaching the index map. Returns zero (0) if there is no mapped +-// value for the old constant pool index. +-int VM_RedefineClasses::find_new_index(int old_index) { +- if (_index_map_count == 0) { +- // map is empty so nothing can be found +- return 0; ++int VM_RedefineClasses::calculate_redefinition_flags(instanceKlassHandle new_class) { ++ ++ int result = Klass::NoRedefinition; ++ ++ ++ ++ RC_TRACE(0x00000002, ("Comparing different class versions of class %s", ++ new_class->name()->as_C_string())); ++ ++ assert(new_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(new_class->old_version()); ++ ++ // Check whether class is in the error init state. ++ if (the_class->is_in_error_state()) { ++ // TBD #5057930: special error code is needed in 1.6 ++ //result = Klass::union_redefinition_level(result, Klass::Invalid); + } + +- if (old_index < 1 || old_index >= _index_map_p->length()) { +- // The old_index is out of range so it is not mapped. This should +- // not happen in regular constant pool merging use, but it can +- // happen if a corrupt annotation is processed. +- return 0; ++ int i; ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check superclasses ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->is_newest_version(), ""); ++ if (the_class->super() != new_class->super()) { ++ // Super class changed ++ ++ klassOop cur_klass = the_class->super(); ++ while (cur_klass != NULL) { ++ if (!new_class->is_subclass_of(cur_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000002, ("Removed super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::RemoveSuperType | Klass::ModifyInstances | Klass::ModifyClass; ++ ++ if (!cur_klass->klass_part()->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ cur_klass->klass_part()->set_subtype_changed(true); ++ } ++ } ++ ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ ++ cur_klass = new_class->super(); ++ while (cur_klass != NULL) { ++ if (!the_class->is_subclass_of(cur_klass->klass_part()->old_version())) { ++ RC_TRACE(0x00000002, ("Added super class %s", ++ cur_klass->klass_part()->name()->as_C_string())); ++ result = result | Klass::ModifyClass | Klass::ModifyInstances; ++ } ++ cur_klass = cur_klass->klass_part()->super(); ++ } + } + +- int value = _index_map_p->at(old_index); +- if (value == -1) { +- // the old_index is not mapped +- return 0; +- } +- +- return value; +-} // end find_new_index() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// class pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_Class && t2 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->klass_name_at(index1)->as_C_string(); +- char *s2 = cp2->klass_name_at(index2)->as_C_string(); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_class_mismatch() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// string pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_string_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_String && t1 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_String && t2 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->string_at_noresolve(index1); +- char *s2 = cp2->string_at_noresolve(index2); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_string_mismatch() +- +- +-jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { +- // For consistency allocate memory using os::malloc wrapper. +- _scratch_classes = (instanceKlassHandle *) +- os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal); +- if (_scratch_classes == NULL) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } +- +- ResourceMark rm(THREAD); +- +- JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); +- // state can only be NULL if the current thread is exiting which +- // should not happen since we're trying to do a RedefineClasses +- guarantee(state != NULL, "exiting thread calling load_new_class_versions"); +- for (int i = 0; i < _class_count; i++) { +- oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); +- // classes for primitives cannot be redefined +- if (!is_modifiable_class(mirror)) { +- return JVMTI_ERROR_UNMODIFIABLE_CLASS; +- } +- klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- Symbol* the_class_sym = the_class->name(); +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loading name=%s kind=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), _class_load_kind, +- os::available_memory() >> 10)); +- +- ClassFileStream st((u1*) _class_defs[i].class_bytes, +- _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); +- +- // Parse the stream. +- Handle the_class_loader(THREAD, the_class->class_loader()); +- Handle protection_domain(THREAD, the_class->protection_domain()); +- // Set redefined class handle in JvmtiThreadState class. +- // This redefined class is sent to agent event handler for class file +- // load hook event. +- state->set_class_being_redefined(&the_class, _class_load_kind); +- +- klassOop k = SystemDictionary::parse_stream(the_class_sym, +- the_class_loader, +- protection_domain, +- &st, +- THREAD); +- // Clear class_being_redefined just to be sure. +- state->clear_class_being_redefined(); +- +- // TODO: if this is retransform, and nothing changed we can skip it +- +- instanceKlassHandle scratch_class (THREAD, k); +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("parse_stream exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- +- if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { +- return JVMTI_ERROR_UNSUPPORTED_VERSION; +- } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { +- return JVMTI_ERROR_INVALID_CLASS_FORMAT; +- } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { +- return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; +- } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { +- // The message will be "XXX (wrong name: YYY)" +- return JVMTI_ERROR_NAMES_DONT_MATCH; +- } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { // Just in case more exceptions can be thrown.. +- return JVMTI_ERROR_FAILS_VERIFICATION; ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check interfaces ++ ++ // Interfaces removed? ++ objArrayOop old_interfaces = the_class->transitive_interfaces(); ++ for (i = 0; i<old_interfaces->length(); i++) { ++ instanceKlassHandle old_interface((klassOop)old_interfaces->obj_at(i)); ++ if (!new_class->implements_interface_any_version(old_interface())) { ++ result = result | Klass::RemoveSuperType | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Removed interface %s", ++ old_interface->name()->as_C_string())); ++ ++ if (!old_interface->has_subtype_changed()) { ++ RC_TRACE(0x00000002, ("Subtype changed of interface %s", ++ old_interface->name()->as_C_string())); ++ old_interface->set_subtype_changed(true); + } + } +- +- // Ensure class is linked before redefine +- if (!the_class->is_linked()) { +- the_class->link_class(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("link_class exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; ++ } ++ ++ // Interfaces added? ++ objArrayOop new_interfaces = new_class->transitive_interfaces(); ++ for (i = 0; i<new_interfaces->length(); i++) { ++ if (!the_class->implements_interface_any_version((klassOop)new_interfaces->obj_at(i))) { ++ result = result | Klass::ModifyClass; ++ RC_TRACE(0x00000002, ("Added interface %s", ++ ((klassOop)new_interfaces->obj_at(i))->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++ ++ // Check whether class modifiers are the same. ++ jushort old_flags = (jushort) the_class->access_flags().get_flags(); ++ jushort new_flags = (jushort) new_class->access_flags().get_flags(); ++ if (old_flags != new_flags) { ++ // TODO (tw): Can this have any effects? ++ } ++ ++ // Check if the number, names, types and order of fields declared in these classes ++ // are the same. ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ // access ++ old_flags = old_fs.access_flags().as_short(); ++ new_flags = new_fs.access_flags().as_short(); ++ if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) { ++ // (tw) Can this have any effects? ++ } ++ // offset ++ if (old_fs.offset() != new_fs.offset()) { ++ result = result | Klass::ModifyInstances; ++ } ++ // name and signature ++ Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index()); ++ Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index()); ++ Symbol* name_sym2 = new_class->constants()->symbol_at(new_fs.name_index()); ++ Symbol* sig_sym2 = new_class->constants()->symbol_at(new_fs.signature_index()); ++ if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { ++ result = result | Klass::ModifyInstances; ++ } ++ } ++ ++ if (!old_fs.done() || !new_fs.done()) { ++ result = result | Klass::ModifyInstances; ++ } ++ ++ // Do a parallel walk through the old and new methods. Detect ++ // cases where they match (exist in both), have been added in ++ // the new methods, or have been deleted (exist only in the ++ // old methods). The class file parser places methods in order ++ // by method name, but does not order overloaded methods by ++ // signature. In order to determine what fate befell the methods, ++ // this code places the overloaded new methods that have matching ++ // old methods in the same order as the old methods and places ++ // new overloaded methods at the end of overloaded methods of ++ // that name. The code for this order normalization is adapted ++ // from the algorithm used in instanceKlass::find_method(). ++ // Since we are swapping out of order entries as we find them, ++ // we only have to search forward through the overloaded methods. ++ // Methods which are added and have the same name as an existing ++ // method (but different signature) will be put at the end of ++ // the methods with that name, and the name mismatch code will ++ // handle them. ++ objArrayHandle k_old_methods(the_class->methods()); ++ objArrayHandle k_new_methods(new_class->methods()); ++ int n_old_methods = k_old_methods->length(); ++ int n_new_methods = k_new_methods->length(); ++ ++ int ni = 0; ++ int oi = 0; ++ while (true) { ++ methodOop k_old_method; ++ methodOop k_new_method; ++ enum { matched, added, deleted, undetermined } method_was = undetermined; ++ ++ if (oi >= n_old_methods) { ++ if (ni >= n_new_methods) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ method_was = added; ++ } else if (ni >= n_new_methods) { ++ // Old method, at the end, is deleted ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ method_was = deleted; ++ } else { ++ // There are more methods in both the old and new lists ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ if (k_old_method->name() != k_new_method->name()) { ++ // Methods are sorted by method name, so a mismatch means added ++ // or deleted ++ if (k_old_method->name()->fast_compare(k_new_method->name()) > 0) { ++ method_was = added; + } else { +- return JVMTI_ERROR_INTERNAL; ++ method_was = deleted; ++ } ++ } else if (k_old_method->signature() == k_new_method->signature()) { ++ // Both the name and signature match ++ method_was = matched; ++ } else { ++ // The name matches, but the signature doesn't, which means we have to ++ // search forward through the new overloaded methods. ++ int nj; // outside the loop for post-loop check ++ for (nj = ni + 1; nj < n_new_methods; nj++) { ++ methodOop m = (methodOop)k_new_methods->obj_at(nj); ++ if (k_old_method->name() != m->name()) { ++ // reached another method name so no more overloaded methods ++ method_was = deleted; ++ break; ++ } ++ if (k_old_method->signature() == m->signature()) { ++ // found a match so swap the methods ++ k_new_methods->obj_at_put(ni, m); ++ k_new_methods->obj_at_put(nj, k_new_method); ++ k_new_method = m; ++ method_was = matched; ++ break; ++ } ++ } ++ ++ if (nj >= n_new_methods) { ++ // reached the end without a match; so method was deleted ++ method_was = deleted; + } + } + } + +- // Do the validity checks in compare_and_normalize_class_versions() +- // before verifying the byte codes. By doing these checks first, we +- // limit the number of functions that require redirection from +- // the_class to scratch_class. In particular, we don't have to +- // modify JNI GetSuperclass() and thus won't change its performance. +- jvmtiError res = compare_and_normalize_class_versions(the_class, +- scratch_class); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ switch (method_was) { ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ // (tw) Can this have any effects? Probably yes on vtables? ++ result = result | Klass::ModifyClass; + } +- +- // verify what the caller passed us + { +- // The bug 6214132 caused the verification to fail. +- // Information about the_class and scratch_class is temporarily +- // recorded into jvmtiThreadState. This data is used to redirect +- // the_class to scratch_class in the JVM_* functions called by the +- // verifier. Please, refer to jvmtiThreadState.hpp for the detailed +- // description. +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify( +- scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes exception: '%s'", ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller the bytecodes are bad +- return JVMTI_ERROR_FAILS_VERIFICATION; ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = new_class->method_with_idnum(old_num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(old_num); ++ RC_TRACE(0x00000002, ("swapping idnum of new and old method %d / %d!", ++ new_num, ++ old_num)); ++ // swap_all_method_annotations(old_num, new_num, new_class); + } + } +- +- res = merge_cp_and_rewrite(the_class, scratch_class, THREAD); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni, ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ result = result | Klass::ModifyClass; + } +- +- if (VerifyMergedCPBytecodes) { +- // verify what we have done during constant pool merging +- { +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD); ++ { ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ result = result | Klass::ModifyClass; + } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes post merge-CP exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller that constant pool merging screwed up +- return JVMTI_ERROR_INTERNAL; ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = new_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ //swap_all_method_annotations(new_num, num, new_class); ++ } ++ RC_TRACE(0x00000001, ("Method added: new: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni)); ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ RC_TRACE(0x00000001, ("Method deleted: old: %s [%d]", ++ k_old_method->name_and_sig_as_C_string(), oi)); ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ if (new_class()->size() != new_class->old_version()->size()) { ++ result |= Klass::ModifyClassSize; ++ } ++ ++ if (new_class->size_helper() != ((instanceKlass*)(new_class->old_version()->klass_part()))->size_helper()) { ++ result |= Klass::ModifyInstanceSize; ++ } ++ ++ methodHandle instanceTransformerMethod(new_class->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature())); ++ if (!instanceTransformerMethod.is_null() && !instanceTransformerMethod->is_static()) { ++ result |= Klass::HasInstanceTransformer; ++ } ++ ++ // (tw) Check method bodies to be able to return NoChange? ++ return result; ++} ++ ++void VM_RedefineClasses::calculate_instance_update_information(klassOop new_version) { ++ ++ class UpdateFieldsEvolutionClosure : public FieldEvolutionClosure { ++ ++ private: ++ ++ GrowableArray<int> info; ++ int curPosition; ++ bool copy_backwards; ++ ++ public: ++ ++ bool does_copy_backwards() { ++ return copy_backwards; ++ } ++ ++ UpdateFieldsEvolutionClosure(klassOop klass) { ++ ++ int base_offset = instanceOopDesc::base_offset_in_bytes(); ++ ++ if (klass->klass_part()->newest_version() == SystemDictionary::Reference_klass()->klass_part()->newest_version()) { ++ base_offset += java_lang_ref_Reference::number_of_fake_oop_fields*size_of_type(T_OBJECT); ++ } ++ ++ info.append(base_offset); ++ info.append(0); ++ curPosition = base_offset; ++ copy_backwards = false; ++ } ++ ++ GrowableArray<int> &finish() { ++ info.append(0); ++ return info; ++ } ++ ++ virtual void do_new_field(fieldDescriptor* fd){ ++ int alignment = fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } ++ ++ int size = size_of_type(fd->field_type()); ++ fill(size); ++ } ++ ++ private: ++ ++ void fill(int size) { ++ if (info.length() > 0 && info.at(info.length() - 1) < 0) { ++ (*info.adr_at(info.length() - 1)) -= size; ++ } else { ++ info.append(-size); ++ } ++ ++ curPosition += size; ++ } ++ ++ int size_of_type(BasicType type) { ++ int size = 0; ++ switch(type) { ++ case T_BOOLEAN: ++ size = sizeof(jboolean); ++ break; ++ ++ case T_CHAR: ++ size = (sizeof(jchar)); ++ break; ++ ++ case T_FLOAT: ++ size = (sizeof(jfloat)); ++ break; ++ ++ case T_DOUBLE: ++ size = (sizeof(jdouble)); ++ break; ++ ++ case T_BYTE: ++ size = (sizeof(jbyte)); ++ break; ++ ++ case T_SHORT: ++ size = (sizeof(jshort)); ++ break; ++ ++ case T_INT: ++ size = (sizeof(jint)); ++ break; ++ ++ case T_LONG: ++ size = (sizeof(jlong)); ++ break; ++ ++ case T_OBJECT: ++ case T_ARRAY: ++ if (UseCompressedOops) { ++ size = sizeof(narrowOop); ++ } else { ++ size = (sizeof(oop)); ++ } ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ assert(size > 0, ""); ++ return size; ++ ++ } ++ ++ public: ++ ++ virtual void do_old_field(fieldDescriptor* fd){} ++ ++ virtual void do_changed_field(fieldDescriptor* old_fd, fieldDescriptor *new_fd){ ++ ++ int alignment = new_fd->offset() - curPosition; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } ++ ++ assert(old_fd->field_type() == new_fd->field_type(), ""); ++ assert(curPosition == new_fd->offset(), "must be correct offset!"); ++ ++ int offset = old_fd->offset(); ++ int size = size_of_type(old_fd->field_type()); ++ ++ int prevEnd = -1; ++ if (info.length() > 0 && info.at(info.length() - 1) > 0) { ++ prevEnd = info.at(info.length() - 2) + info.at(info.length() - 1); ++ } ++ ++ if (prevEnd == offset) { ++ info.at_put(info.length() - 2, info.at(info.length() - 2) + size); ++ } else { ++ info.append(size); ++ info.append(offset); ++ } ++ ++ if (old_fd->offset() < new_fd->offset()) { ++ copy_backwards = true; ++ } ++ ++ transfer_special_access_flags(old_fd, new_fd); ++ ++ curPosition += size; ++ } ++ }; ++ ++ UpdateFieldsEvolutionClosure cl(new_version); ++ ((instanceKlass*)new_version->klass_part())->do_fields_evolution(&cl); ++ ++ GrowableArray<int> result = cl.finish(); ++ ((instanceKlass*)new_version->klass_part())->store_update_information(result); ++ ((instanceKlass*)new_version->klass_part())->set_copying_backwards(cl.does_copy_backwards()); ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Instance update information for %s:", ++ new_version->klass_part()->name()->as_C_string())); ++ if (cl.does_copy_backwards()) { ++ RC_TRACE(0x00000002, ("\tDoes copy backwards!")); ++ } ++ for (int i=0; i<result.length(); i++) { ++ int curNum = result.at(i); ++ if (curNum < 0) { ++ RC_TRACE(0x00000002, ("\t%d CLEAN", curNum)); ++ } else if (curNum > 0) { ++ RC_TRACE(0x00000002, ("\t%d COPY from %d", curNum, result.at(i + 1))); ++ i++; ++ } else { ++ RC_TRACE(0x00000002, ("\tEND")); ++ } ++ } ++ } ++} ++ ++Symbol* VM_RedefineClasses::signature_to_class_name(Symbol* signature) { ++ assert(FieldType::is_obj(signature), ""); ++ return SymbolTable::new_symbol(signature->as_C_string() + 1, signature->utf8_length() - 2, Thread::current()); ++} ++ ++void VM_RedefineClasses::calculate_type_check_information(klassOop klass) { ++ if (klass->klass_part()->is_redefining()) { ++ klass = klass->klass_part()->old_version(); ++ } ++ ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(klass); ++ GrowableArray< Pair<int, klassOop> > type_check_information; ++ ++ class MyFieldClosure : public FieldClosure { ++ ++ public: ++ ++ GrowableArray< Pair<int, klassOop> > *_arr; ++ ++ MyFieldClosure(GrowableArray< Pair<int, klassOop> > *arr) { ++ _arr = arr; ++ } ++ ++ virtual void do_field(fieldDescriptor* fd) { ++ if (fd->field_type() == T_OBJECT) { ++ Symbol* signature = fd->signature(); ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ klassOop field_klass; ++ if (is_field_dangerous(name, fd, field_klass)) { ++ RC_TRACE(0x00000002, ("Found dangerous field %s in klass %s of type %s", ++ fd->name()->as_C_string(), ++ fd->field_holder()->klass_part()->name()->as_C_string(), ++ name->as_C_string())); ++ _arr->append(Pair<int, klassOop>(fd->offset(), field_klass->klass_part()->newest_version())); ++ } ++ } ++ ++ // Array fields can never be a problem! ++ } ++ } ++ ++ bool is_field_dangerous(Symbol* klass_name, fieldDescriptor *fd, klassOop &field_klass) { ++ field_klass = SystemDictionary::find(klass_name, fd->field_holder()->klass_part()->class_loader(), ++ fd->field_holder()->klass_part()->protection_domain(), Thread::current()); ++ if(field_klass != NULL) { ++ if (field_klass->klass_part()->is_redefining()) { ++ field_klass = field_klass->klass_part()->old_version(); ++ } ++ if (field_klass->klass_part()->has_subtype_changed()) { ++ return true; ++ } ++ } ++ return false; ++ } ++ }; ++ ++ MyFieldClosure fieldClosure(&type_check_information); ++ cur_instance_klass->do_nonstatic_fields(&fieldClosure); ++ ++ if (type_check_information.length() > 0) { ++ type_check_information.append(Pair<int, klassOop>(-1, NULL)); ++ cur_instance_klass->store_type_check_information(type_check_information); ++ } ++} ++ ++bool VM_RedefineClasses::check_field_value_types() { ++ ++ Thread *THREAD = Thread::current(); ++ class CheckFieldTypesClosure : public ObjectClosure { ++ ++ private: ++ ++ bool _result; ++ ++ public: ++ ++ CheckFieldTypesClosure() { ++ _result = true; ++ } ++ ++ bool result() { return _result; } ++ ++ virtual void do_object(oop obj) { ++ ++ if (!_result) { ++ return; ++ } ++ ++ if (obj->is_objArray()) { ++ ++ objArrayOop array = objArrayOop(obj); ++ ++ klassOop element_klass = objArrayKlass::cast(array->klass())->element_klass(); ++ ++ if (element_klass->klass_part()->has_subtype_changed()) { ++ int length = array->length(); ++ for (int i=0; i<length; i++) { ++ oop element = array->obj_at(i); ++ if (element != NULL && element->blueprint()->newest_version()->klass_part()->is_redefining()) { ++ // Check subtype relationship to static type of array ++ if (!element->blueprint()->newest_version()->klass_part()->is_subtype_of(element_klass->klass_part()->newest_version())) { ++ RC_TRACE(0x00000001, ("Array value is INVALID - abort redefinition (static_type=%s, index=%d, dynamic_type=%s)", ++ element_klass->klass_part()->name()->as_C_string(), ++ i, ++ element->blueprint()->name()->as_C_string())); ++ _result = false; ++ break; ++ } ++ } ++ } ++ } ++ ++ } else { ++ Pair<int, klassOop> *cur = obj->klass()->klass_part()->type_check_information(); ++ if (cur != NULL) { ++ // Type check information exists for this oop ++ while ((*cur).left() != -1) { ++ check_field(obj, (*cur).left(), (*cur).right()); ++ cur++; ++ } + } + } + } + +- Rewriter::rewrite(scratch_class, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- Rewriter::relocate_and_link(scratch_class, THREAD); +- } +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } +- } +- +- _scratch_classes[i] = scratch_class; +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), os::available_memory() >> 10)); +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Map old_index to new_index as needed. scratch_cp is only needed +-// for RC_TRACE() calls. +-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, +- int old_index, int new_index) { +- if (find_new_index(old_index) != 0) { +- // old_index is already mapped +- return; +- } +- +- if (old_index == new_index) { +- // no mapping is needed +- return; +- } +- +- _index_map_p->at_put(old_index, new_index); +- _index_map_count++; +- +- RC_TRACE(0x00040000, ("mapped tag %d at index %d to %d", +- scratch_cp->tag_at(old_index).value(), old_index, new_index)); +-} // end map_index() +- +- +-// Merge old_cp and scratch_cp and return the results of the merge via +-// merge_cp_p. The number of entries in *merge_cp_p is returned via +-// merge_cp_length_p. The entries in old_cp occupy the same locations +-// in *merge_cp_p. Also creates a map of indices from entries in +-// scratch_cp to the corresponding entry in *merge_cp_p. Index map +-// entries are only created for entries in scratch_cp that occupy a +-// different location in *merged_cp_p. +-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS) { +- +- if (merge_cp_p == NULL) { +- assert(false, "caller must provide scatch constantPool"); +- return false; // robustness +- } +- if (merge_cp_length_p == NULL) { +- assert(false, "caller must provide scatch CP length"); +- return false; // robustness +- } +- // Worst case we need old_cp->length() + scratch_cp()->length(), +- // but the caller might be smart so make sure we have at least +- // the minimum. +- if ((*merge_cp_p)->length() < old_cp->length()) { +- assert(false, "merge area too small"); +- return false; // robustness +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("old_cp_len=%d, scratch_cp_len=%d", old_cp->length(), +- scratch_cp->length())); +- +- { +- // Pass 0: +- // The old_cp is copied to *merge_cp_p; this means that any code +- // using old_cp does not have to change. This work looks like a +- // perfect fit for constantPoolOop::copy_cp_to(), but we need to +- // handle one special case: +- // - revert JVM_CONSTANT_Class to JVM_CONSTANT_UnresolvedClass +- // This will make verification happy. +- +- int old_i; // index into old_cp +- +- // index zero (0) is not used in constantPools +- for (old_i = 1; old_i < old_cp->length(); old_i++) { +- // leave debugging crumb +- jbyte old_tag = old_cp->tag_at(old_i).value(); +- switch (old_tag) { +- case JVM_CONSTANT_Class: +- case JVM_CONSTANT_UnresolvedClass: +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- // May be resolving while calling this so do the same for +- // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) +- (*merge_cp_p)->unresolved_klass_at_put(old_i, +- old_cp->klass_name_at(old_i)); +- break; +- +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // just copy the entry to *merge_cp_p, but double and long take +- // two constant pool entries +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- old_i++; +- break; +- +- default: +- // just copy the entry to *merge_cp_p +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- break; +- } +- } // end for each old_cp entry +- +- // We don't need to sanity check that *merge_cp_length_p is within +- // *merge_cp_p bounds since we have the minimum on-entry check above. +- (*merge_cp_length_p) = old_i; +- } +- +- // merge_cp_len should be the same as old_cp->length() at this point +- // so this trace message is really a "warm-and-breathing" message. +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 0: merge_cp_len=%d", *merge_cp_length_p)); +- +- int scratch_i; // index into scratch_cp +- { +- // Pass 1a: +- // Compare scratch_cp entries to the old_cp entries that we have +- // already copied to *merge_cp_p. In this pass, we are eliminating +- // exact duplicates (matching entry at same index) so we only +- // compare entries in the common indice range. +- int increment = 1; +- int pass1a_length = MIN2(old_cp->length(), scratch_cp->length()); +- for (scratch_i = 1; scratch_i < pass1a_length; scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- bool match = scratch_cp->compare_entry_to(scratch_i, *merge_cp_p, +- scratch_i, CHECK_0); +- if (match) { +- // found a match at the same index so nothing more to do +- continue; +- } else if (is_unresolved_class_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved class entry at the same index +- // with the same string value. Since Pass 0 reverted any +- // class entries to unresolved class entries in *merge_cp_p, +- // we go with the unresolved class entry. +- continue; +- } else if (is_unresolved_string_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved string entry at the same index +- // with the same string value. We can live with whichever +- // happens to be at scratch_i in *merge_cp_p. +- continue; +- } +- +- int found_i = scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, +- CHECK_0); +- if (found_i != 0) { +- guarantee(found_i != scratch_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // The find_matching_entry() call above could fail to find a match +- // due to a resolved versus unresolved class or string entry situation +- // like we solved above with the is_unresolved_*_mismatch() calls. +- // However, we would have to call is_unresolved_*_mismatch() over +- // all of *merge_cp_p (potentially) and that doesn't seem to be +- // worth the time. +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1a: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- +- if (scratch_i < scratch_cp->length()) { +- // Pass 1b: +- // old_cp is smaller than scratch_cp so there are entries in +- // scratch_cp that we have not yet processed. We take care of +- // those now. +- int increment = 1; +- for (; scratch_i < scratch_cp->length(); scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- int found_i = +- scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, CHECK_0); +- if (found_i != 0) { +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- } +- +- return true; +-} // end merge_constant_pools() +- +- +-// Merge constant pools between the_class and scratch_class and +-// potentially rewrite bytecodes in scratch_class to use the merged +-// constant pool. +-jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class, +- TRAPS) { +- // worst case merged constant pool length is old and new combined +- int merge_cp_length = the_class->constants()->length() +- + scratch_class->constants()->length(); +- +- constantPoolHandle old_cp(THREAD, the_class->constants()); +- constantPoolHandle scratch_cp(THREAD, scratch_class->constants()); +- +- // Constant pools are not easily reused so we allocate a new one +- // each time. +- // merge_cp is created unsafe for concurrent GC processing. It +- // should be marked safe before discarding it. Even though +- // garbage, if it crosses a card boundary, it may be scanned +- // in order to find the start of the first complete object on the card. +- constantPoolHandle merge_cp(THREAD, +- oopFactory::new_constantPool(merge_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- int orig_length = old_cp->orig_length(); +- if (orig_length == 0) { +- // This old_cp is an actual original constant pool. We save +- // the original length in the merged constant pool so that +- // merge_constant_pools() can be more efficient. If a constant +- // pool has a non-zero orig_length() value, then that constant +- // pool was created by a merge operation in RedefineClasses. +- merge_cp->set_orig_length(old_cp->length()); +- } else { +- // This old_cp is a merged constant pool from a previous +- // RedefineClasses() calls so just copy the orig_length() +- // value. +- merge_cp->set_orig_length(old_cp->orig_length()); +- } +- +- ResourceMark rm(THREAD); +- _index_map_count = 0; +- _index_map_p = new intArray(scratch_cp->length(), -1); +- +- bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, +- &merge_cp_length, THREAD); +- if (!result) { +- // The merge can fail due to memory allocation failure or due +- // to robustness checks. +- return JVMTI_ERROR_INTERNAL; +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("merge_cp_len=%d, index_map_len=%d", merge_cp_length, _index_map_count)); +- +- if (_index_map_count == 0) { +- // there is nothing to map between the new and merged constant pools +- +- if (old_cp->length() == scratch_cp->length()) { +- // The old and new constant pools are the same length and the +- // index map is empty. This means that the three constant pools +- // are equivalent (but not the same). Unfortunately, the new +- // constant pool has not gone through link resolution nor have +- // the new class bytecodes gone through constant pool cache +- // rewriting so we can't use the old constant pool with the new +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else if (old_cp->length() < scratch_cp->length()) { +- // The old constant pool has fewer entries than the new constant +- // pool and the index map is empty. This means the new constant +- // pool is a superset of the old constant pool. However, the old +- // class bytecodes have already gone through constant pool cache +- // rewriting so we can't use the new constant pool with the old +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else { +- // The old constant pool has more entries than the new constant +- // pool and the index map is empty. This means that both the old +- // and merged constant pools are supersets of the new constant +- // pool. +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool; the previous new constant pool will +- // get GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- // drop local ref to the merged constant pool +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); +- } +- } else { +- if (RC_TRACE_ENABLED(0x00040000)) { +- // don't want to loop unless we are tracing +- int count = 0; +- for (int i = 1; i < _index_map_p->length(); i++) { +- int value = _index_map_p->at(i); +- +- if (value != -1) { +- RC_TRACE_WITH_THREAD(0x00040000, THREAD, +- ("index_map[%d]: old=%d new=%d", count, i, value)); +- count++; ++ void check_field(oop obj, int offset, klassOop static_type) { ++ oop field_value = obj->obj_field(offset); ++ if (field_value != NULL) { ++ // Field is not null ++ if (field_value->klass()->klass_part()->newest_version()->klass_part()->is_subtype_of(static_type)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Field value is OK (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Field value is INVALID - abort redefinition (klass=%s, static_type=%s, offset=%d, dynamic_type=%s)", ++ obj->klass()->klass_part()->name()->as_C_string(), ++ static_type->klass_part()->name()->as_C_string(), ++ offset, ++ field_value->klass()->klass_part()->name()->as_C_string())); ++ _result = false; + } + } + } +- +- // We have entries mapped between the new and merged constant pools +- // so we have to rewrite some constant pool references. +- if (!rewrite_cp_refs(scratch_class, THREAD)) { +- return JVMTI_ERROR_INTERNAL; ++ }; ++ ++ CheckFieldTypesClosure myObjectClosure; ++ ++ // make sure that heap is parsable (fills TLABs with filler objects) ++ Universe::heap()->ensure_parsability(false); // no need to retire TLABs ++ ++ // do the iteration ++ // If this operation encounters a bad object when using CMS, ++ // consider using safe_object_iterate() which avoids perm gen ++ // objects that may contain bad references. ++ Universe::heap()->object_iterate(&myObjectClosure); ++ ++ // when sharing is enabled we must iterate over the shared spaces ++ if (UseSharedSpaces) { ++ GenCollectedHeap* gch = GenCollectedHeap::heap(); ++ CompactingPermGenGen* gen = (CompactingPermGenGen*)gch->perm_gen(); ++ gen->ro_space()->object_iterate(&myObjectClosure); ++ gen->rw_space()->object_iterate(&myObjectClosure); ++ } ++ ++ return myObjectClosure.result(); ++} ++ ++void VM_RedefineClasses::clear_type_check_information(klassOop k) { ++ if (k->klass_part()->is_redefining()) { ++ k = k->klass_part()->old_version(); ++ } ++ ++ // We found an instance klass! ++ instanceKlass *cur_instance_klass = instanceKlass::cast(k); ++ cur_instance_klass->clear_type_check_information(); ++} ++ ++void VM_RedefineClasses::update_active_methods() { ++ ++ RC_TRACE(0x00000002, ("Updating active methods")); ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x0000000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ int bci = jvf->bci(); ++ RC_TRACE(0x00000400, ("found method: %s / bci=%d", jvf->method()->name()->as_C_string(), bci)); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ if (jvf->method()->new_version() != NULL && jvf->is_interpreted_frame()) { ++ ++ ++ RC_TRACE(0x00000002, ("Found method that should just be updated to the newest version %s", ++ jvf->method()->name_and_sig_as_C_string())); ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ int code_size = jvf->method()->code_size(); ++ char *code_base_old = (char*)jvf->method()->code_base(); ++ char *code_base_new = (char*)jvf->method()->new_version()->code_base(); ++ for (int i=0; i<code_size; i++) { ++ tty->print_cr("old=%d new=%d", *code_base_old++, *code_base_new++); ++ } ++ jvf->method()->print_codes_on(tty); ++ jvf->method()->new_version()->print_codes_on(tty); ++ } ++ ++ assert(jvf->is_interpreted_frame(), "Every frame must be interpreted!"); ++ interpretedVFrame *iframe = (interpretedVFrame *)jvf; ++ ++ ++ if (RC_TRACE_ENABLED(0x01000000)) { ++ constantPoolCacheOop cp_old = jvf->method()->constants()->cache(); ++ tty->print_cr("old cp"); ++ for (int i=0; i<cp_old->length(); i++) { ++ cp_old->entry_at(i)->print(tty, i); ++ } ++ constantPoolCacheOop cp_new = jvf->method()->new_version()->constants()->cache(); ++ tty->print_cr("new cp"); ++ for (int i=0; i<cp_new->length(); i++) { ++ cp_new->entry_at(i)->print(tty, i); ++ } ++ } ++ ++ iframe->set_method(jvf->method()->new_version(), bci); ++ RC_TRACE(0x00000002, ("Updated method to newer version")); ++ assert(jvf->method()->new_version() == NULL, "must be latest version"); ++ ++ } ++ } ++ } ++ vf = vf->sender(); ++ } + } + +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool so now the rewritten bytecodes have +- // valid references; the previous new constant pool will get +- // GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- merge_cp()->set_is_conc_safe(true); ++ // Advance to next thread ++ java_thread = java_thread->next(); + } +- assert(old_cp()->is_conc_safe(), "Just checking"); +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +- +- return JVMTI_ERROR_NONE; +-} // end merge_cp_and_rewrite() +- +- +-// Rewrite constant pool references in klass scratch_class. +-bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, +- TRAPS) { +- +- // rewrite constant pool references in the methods: +- if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { +- // propagate failure back to caller ++} ++ ++void VM_RedefineClasses::method_forwarding() { ++ ++ int forwarding_count = 0; ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00008000, ("found method: %s", ++ jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ methodOop m = jvf->method(); ++ int bci = jvf->bci(); ++ RC_TRACE(0x00008000, ("klass redef %d", ++ klass->is_redefining())); ++ ++ if (klass->new_version() != NULL && m->new_version() == NULL) { ++ RC_TRACE(0x00008000, ("found potential forwarding method: %s", ++ m->name()->as_C_string())); ++ ++ klassOop new_klass = klass->newest_version(); ++ methodOop new_method = new_klass->klass_part()->lookup_method(m->name(), m->signature()); ++ RC_TRACE(0x00000002, ("%d %d", ++ new_method, ++ new_method->constMethod()->has_code_section_table())); ++ ++ if (new_method != NULL && new_method->constMethod()->has_code_section_table()) { ++ RC_TRACE(0x00008000, ("found code section table for method: %s", ++ new_method->name()->as_C_string())); ++ m->set_forward_method(new_method); ++ if (new_method->max_locals() != m->max_locals()) { ++ tty->print_cr("new_m max locals: %d old_m max locals: %d", new_method->max_locals(), m->max_locals()); ++ } ++ assert(new_method->max_locals() == m->max_locals(), "number of locals must match"); ++ assert(new_method->max_stack() == m->max_stack(), "number of stack values must match"); ++ if (jvf->is_interpreted_frame()) { ++ if (m->is_in_code_section(bci)) { ++ // We must transfer now and cannot delay until next NOP. ++ int new_bci = m->calculate_forward_bci(bci, new_method); ++ interpretedVFrame* iframe = interpretedVFrame::cast(jvf); ++ RC_TRACE(0x00000002, ("Transfering execution of %s to new method old_bci=%d new_bci=%d", ++ new_method->name()->as_C_string(), ++ bci, ++ new_bci)); ++ iframe->set_method(new_method, new_bci); ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because %d is not in a code section", ++ new_method->name()->as_C_string(), ++ bci)); ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Delaying method forwarding of %s because method is compiled", ++ new_method->name()->as_C_string())); ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); ++ } ++ ++ RC_TRACE(0x00000001, ("Method forwarding applied to %d methods", ++ forwarding_count)); ++} ++ ++bool VM_RedefineClasses::check_method_stacks() { ++ ++ JavaThread *java_thread = Threads::first(); ++ while (java_thread != NULL) { ++ ++ int stack_depth = 0; ++ if (java_thread->has_last_Java_frame()) { ++ ++ RC_TRACE(0x00000400, ("checking stack of Java thread %s", java_thread->name())); ++ ++ // vframes are resource allocated ++ Thread* current_thread = Thread::current(); ++ ResourceMark rm(current_thread); ++ HandleMark hm(current_thread); ++ ++ RegisterMap reg_map(java_thread); ++ frame f = java_thread->last_frame(); ++ vframe* vf = vframe::new_vframe(&f, ®_map, java_thread); ++ frame* last_entry_frame = NULL; ++ ++ while (vf != NULL) { ++ if (vf->is_java_frame()) { ++ // java frame (interpreted, compiled, ...) ++ javaVFrame *jvf = javaVFrame::cast(vf); ++ ++ if (!(jvf->method()->is_native())) { ++ RC_TRACE(0x00000400, ("found method: %s", jvf->method()->name()->as_C_string())); ++ ResourceMark rm(Thread::current()); ++ HandleMark hm; ++ instanceKlassHandle klass(jvf->method()->method_holder()); ++ ++ StackValueCollection *locals = jvf->locals(); ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ for (int i=0; i<locals->size(); i++) { ++ StackValue *stack_value = locals->at(i); ++ if (stack_value->type() == T_OBJECT) { ++ Handle obj = stack_value->get_obj(); ++ if (!obj.is_null() && obj->klass()->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType)) { ++ ++ // OK, so this is a possible failure => check local variable table, if it could be OK. ++ bool result = false; ++ methodOop method = jvf->method(); ++ if (method->has_localvariable_table()) { ++ LocalVariableTableElement *elem = jvf->method()->localvariable_table_start(); ++ for (int j=0; j<method->localvariable_table_length(); j++) { ++ ++ if (elem->slot == i) { ++ ++ // Matching index found ++ ++ if (elem->start_bci <= jvf->bci() && elem->start_bci + elem->length > jvf->bci()) { ++ ++ // Also in range!! ++ Symbol* signature = jvf->method()->constants()->symbol_at(elem->descriptor_cp_index); ++ Symbol* klass_name = signature_to_class_name(signature); ++ ++ klassOop local_klass = SystemDictionary::find(klass_name, jvf->method()->method_holder()->klass_part()->class_loader(), jvf->method()->method_holder()->klass_part()->protection_domain(), Thread::current())->klass_part()->newest_version(); ++ klassOop cur = obj->klass()->klass_part()->newest_version(); ++ ++ // Field is not null ++ if (cur->klass_part()->newest_version()->klass_part()->is_subtype_of(local_klass)) { ++ // We are OK ++ RC_TRACE(0x00008000, ("Local variable value is OK (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), cur->klass_part()->name()->as_C_string())); ++ result = true; ++ } else { ++ // Failure! ++ RC_TRACE(0x00000001, ("Local variable value is INVALID - abort redefinition (local_klass=%s, cur_klass=%s)", ++ local_klass->klass_part()->name()->as_C_string(), ++ cur->klass_part()->name()->as_C_string())); ++ return false; ++ } ++ } ++ } ++ ++ elem++; ++ } ++ } else { ++ RC_TRACE(0x00000002, ("Method %s does not have a local variable table => abort", ++ method->name_and_sig_as_C_string())); ++ } ++ ++ if (!result) { ++ return false; ++ } ++ ++ RC_TRACE(0x00008000, ("Verifying class %s", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ ++ Symbol* exception_name; ++ const size_t message_buffer_len = klass->name()->utf8_length() + 1024; ++ char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len); ++ ++ Thread::current()->set_pretend_new_universe(true); ++ ClassVerifier split_verifier(klass, Thread::current()); ++ split_verifier.verify_method(jvf->method(), Thread::current()); ++ exception_name = split_verifier.result(); ++ Thread::current()->set_pretend_new_universe(false); ++ ++ if (exception_name != NULL) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", ++ jvf->method()->method_holder()->klass_part()->name()->as_C_string())); ++ RC_TRACE(0x00000001, ("Exception: %s", ++ exception_name->as_C_string())); ++ RC_TRACE(0x00000001, ("Message: %s", ++ message_buffer)); ++ Thread::current()->clear_pending_exception(); ++ return false; ++ } ++ ++ } ++ } ++ } ++ } ++ } ++ vf = vf->sender(); ++ } ++ } ++ ++ // Advance to next thread ++ java_thread = java_thread->next(); ++ } ++ ++ return true; ++} ++ ++bool VM_RedefineClasses::check_method(methodOop method) { ++ ++ ++ return true; ++} ++ ++// Warning: destroys redefinition level values of klasses. ++bool VM_RedefineClasses::check_loaded_methods() { ++ ++ class CheckLoadedMethodsClosure : public ObjectClosure { ++ ++ private: ++ ++ bool _result; ++ GrowableArray<klassOop> *_dangerous_klasses; ++ ++ public: ++ CheckLoadedMethodsClosure(GrowableArray<klassOop> *dangerous_klasses) { ++ _result = true; ++ _dangerous_klasses = dangerous_klasses; ++ } ++ ++ bool result() { ++ return _result; ++ } ++ ++ bool is_class_dangerous(klassOop k) { ++ return k->klass_part()->newest_version()->klass_part()->check_redefinition_flag(Klass::RemoveSuperType); ++ } ++ ++ bool can_be_affected(instanceKlass *klass) { ++ ++ constantPoolOop cp = klass->constants(); ++ ++ Thread *THREAD = Thread::current(); ++ klassOop k; ++ Symbol* symbol; ++ ++ for (int i=1; i<cp->length(); i++) { ++ jbyte tag = cp->tag_at(i).value(); ++ switch(tag) { ++ case JVM_CONSTANT_Long: ++ case JVM_CONSTANT_Double: ++ i++; ++ break; ++ ++ case JVM_CONSTANT_Utf8: ++ case JVM_CONSTANT_Unicode: ++ case JVM_CONSTANT_Integer: ++ case JVM_CONSTANT_Float: ++ case JVM_CONSTANT_String: ++ case JVM_CONSTANT_Fieldref: ++ case JVM_CONSTANT_Methodref: ++ case JVM_CONSTANT_InterfaceMethodref: ++ case JVM_CONSTANT_ClassIndex: ++ case JVM_CONSTANT_UnresolvedString: ++ case JVM_CONSTANT_StringIndex: ++ case JVM_CONSTANT_UnresolvedClassInError: ++ case JVM_CONSTANT_Object: ++ // do nothing ++ break; ++ ++ case JVM_CONSTANT_Class: ++ k = cp->klass_at(i, CHECK_(true)); ++ if (is_class_dangerous(k)) { ++ RC_TRACE(0x00000002, ("Class %s is potentially affected, because at cp[%d] references class %s", ++ klass->name()->as_C_string(), ++ i, ++ k->klass_part()->name()->as_C_string())); ++ return true; ++ } ++ break; ++ ++ case JVM_CONSTANT_NameAndType: ++ symbol = cp->symbol_at(cp->signature_ref_index_at(i)); ++ if (symbol->byte_at(0) == '(') { ++ // This must be a method ++ SignatureStream signatureStream(symbol); ++ while (true) { ++ ++ if (signatureStream.is_array()) { ++ Symbol* cur_signature = signatureStream.as_symbol(Thread::current()); ++ if (is_type_signature_dangerous(cur_signature)) { ++ return true; ++ } ++ } else if (signatureStream.is_object()) { ++ if (is_symbol_dangerous(signatureStream.as_symbol(Thread::current()))) { ++ return true; ++ } ++ } ++ ++ if (signatureStream.at_return_type()) { ++ break; ++ } ++ ++ signatureStream.next(); ++ } ++ ++ } else if (is_type_signature_dangerous(symbol)) { ++ return true; ++ } ++ break; ++ ++ case JVM_CONSTANT_UnresolvedClass: ++ symbol = cp->unresolved_klass_at(i); ++ if (is_symbol_dangerous(symbol)) { ++ return true; ++ } ++ break; ++ ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ return false; ++ } ++ ++ bool is_type_signature_dangerous(Symbol* signature) { ++ // This must be a field type ++ if (FieldType::is_obj(signature)) { ++ Symbol* name = signature_to_class_name(signature); ++ if (is_symbol_dangerous(name)) { ++ return true; ++ } ++ } else if (FieldType::is_array(signature)) { ++ //jint dimension; ++ //Symbol* object_key; ++ FieldArrayInfo fd; ++ FieldType::get_array_info(signature, fd, Thread::current()); ++ if (is_symbol_dangerous(fd.object_key())) { ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ bool is_symbol_dangerous(Symbol* symbol) { ++ for (int i=0; i<_dangerous_klasses->length(); i++) { ++ if(_dangerous_klasses->at(i)->klass_part()->name() == symbol) { ++ RC_TRACE(0x00000002, ("Found constant pool index %d references class %s", ++ i, ++ symbol->as_C_string())); ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ virtual void do_object(oop obj) { ++ ++ if (!_result) return; ++ ++ klassOop klassObj = (klassOop)obj; ++ Thread *THREAD = Thread::current(); ++ ++ // We found an instance klass! ++ instanceKlass *klass = instanceKlass::cast(klassObj); ++ instanceKlassHandle handle(klassObj); ++ ++ RC_TRACE(0x00000400, ("Check if verification is necessary for class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ ++ if (!can_be_affected(klass)) { ++ RC_TRACE(0x00000400, ("Skipping verification of class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ return; ++ } ++ ++ if (handle->major_version() < Verifier::STACKMAP_ATTRIBUTE_MAJOR_VERSION) { ++ RC_TRACE(0x00000001, ("Failing because cannot verify class %s major_version=%d", handle->name()->as_C_string(), handle->major_version())); ++ _result = false; ++ return; ++ } ++ ++ RC_TRACE(0x00000001, ("Verifying class %s", handle->name()->as_C_string())); ++ ++ if (!Verifier::verify(handle, Verifier::NoException, true, false, Thread::current())) { ++ ++ RC_TRACE(0x00000001, ("Verification of class %s failed", handle->name()->as_C_string())); ++ //Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ //RC_TRACE(0x00000002, ("exception when verifying class: '%s'", ex_name->as_C_string()); ++ //PENDING_EXCEPTION->print(); ++ CLEAR_PENDING_EXCEPTION; ++ _result = false; ++ } ++ ++ /*int method_count = klass->methods()->length(); ++ for (int i=0; i<method_count; i++) { ++ methodOop cur_method = (methodOop)klass->methods()->obj_at(i); ++ if (!check_method(cur_method)) { ++ RC_TRACE(0x00000001, ("Failed to verify consistency of method %s of klass %s", cur_method->name()->as_C_string(), klass->name()->as_C_string()); ++ } ++ }*/ ++ } ++ }; ++ ++ // TODO: Check bytecodes in case of interface => class or class => interface etc.. ++ ++ GrowableArray<klassOop> dangerous_klasses; ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle handle = _new_classes->at(i); ++ if (handle->check_redefinition_flag(Klass::RemoveSuperType)) { ++ dangerous_klasses.append(handle()); ++ } ++ } ++ ++ CheckLoadedMethodsClosure checkLoadedMethodsClosure(&dangerous_klasses); ++ Thread::current()->set_pretend_new_universe(true); ++ SystemDictionary::classes_do(&checkLoadedMethodsClosure); ++ Thread::current()->set_pretend_new_universe(false); ++ ++ ++ return checkLoadedMethodsClosure.result(); ++} ++ ++bool VM_RedefineClasses::check_type_consistency() { ++ ++ Universe::set_verify_in_progress(true); ++ ++ SystemDictionary::classes_do(calculate_type_check_information); ++ bool result = check_field_value_types(); ++ SystemDictionary::classes_do(clear_type_check_information); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong field or array element value!")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the class_annotations: +- if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ result = check_method_stacks(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong value on the stack")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the fields_annotations: +- if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ result = check_loaded_methods(); ++ if (!result) { ++ RC_TRACE(0x00000001, ("Aborting redefinition because of wrong loaded method")); ++ Universe::set_verify_in_progress(false); + return false; + } + +- // rewrite constant pool references in the methods_annotations: +- if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller ++ RC_TRACE(0x00000001, ("Verification passed => hierarchy change is valid!")); ++ Universe::set_verify_in_progress(false); ++ return true; ++} ++ ++void VM_RedefineClasses::rollback() { ++ RC_TRACE(0x00000001, ("Rolling back redefinition!")); ++ SystemDictionary::rollback_redefinition(); ++ ++ RC_TRACE(0x00000001, ("After rolling back system dictionary!")); ++ for (int i=0; i<_new_classes->length(); i++) { ++ SystemDictionary::remove_from_hierarchy(_new_classes->at(i)); ++ } ++ ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle new_class = _new_classes->at(i); ++ new_class->set_redefining(false); ++ new_class->old_version()->klass_part()->set_new_version(NULL); ++ new_class->set_old_version(NULL); ++ } ++ ++} ++ ++template <class T> void VM_RedefineClasses::do_oop_work(T* p) { ++ T heap_oop = oopDesc::load_heap_oop(p); ++ if (!oopDesc::is_null(heap_oop)) { ++ oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); ++ if (obj->is_instanceKlass()) { ++ klassOop klass = (klassOop)obj; ++ // DCEVM: note: can overwrite owner of old_klass constants pool with new_klass, so we need to fix it back later ++ if (klass->new_version() != NULL && klass->new_version()->klass_part()->is_redefining()) { ++ obj = klass->klass_part()->new_version(); ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ } ++ } else if (obj->blueprint()->newest_version() == SystemDictionary::Class_klass()->klass_part()->newest_version()) { ++ // update references to java.lang.Class to point to newest version. Only update references to non-primitive ++ // java.lang.Class instances. ++ klassOop klass_oop = java_lang_Class::as_klassOop(obj); ++ if (klass_oop != NULL) { ++ if (klass_oop->new_version() != NULL && klass_oop->new_version()->klass_part()->is_redefining()) { ++ obj = klass_oop->new_version()->java_mirror(); ++ } else if (klass_oop->klass_part()->is_redefining()) { ++ obj = klass_oop->java_mirror(); ++ } ++ oopDesc::encode_store_heap_oop_not_null(p, obj); ++ ++ ++ // FIXME: DCEVM: better implementation? ++ // Starting from JDK 7 java_mirror can be kept in the regular heap. Therefore, it is possible ++ // that new java_mirror is in the young generation whereas p is in tenured generation. In that ++ // case we need to run write barrier to make sure card table is properly updated. This will ++ // allow JVM to detect reference in tenured generation properly during young generation GC. ++ if (Universe::heap()->is_in_reserved(p)) { ++ if (GenCollectedHeap::heap()->is_in_young(obj)) { ++ GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); ++ assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); ++ CardTableRS* _rs = (CardTableRS*)rs; ++ _rs->inline_write_ref_field_gc(p, obj); ++ } ++ } ++ } ++ } ++ } ++} ++ ++void VM_RedefineClasses::swap_marks(oop first, oop second) { ++ markOop first_mark = first->mark(); ++ markOop second_mark = second->mark(); ++ first->set_mark(second_mark); ++ second->set_mark(first_mark); ++} ++ ++void VM_RedefineClasses::doit() { ++ Thread *thread = Thread::current(); ++ ++ RC_TRACE(0x00000001, ("Entering doit!")); ++ ++ ++ if ((_max_redefinition_flags & Klass::RemoveSuperType) != 0) { ++ ++ RC_TIMER_START(_timer_check_type); ++ ++ if (!check_type_consistency()) { ++ // (tw) TODO: Rollback the class redefinition ++ rollback(); ++ RC_TRACE(0x00000001, ("Detected type inconsistency!")); ++ _result = JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ RC_TIMER_STOP(_timer_check_type); ++ return; ++ } ++ ++ RC_TIMER_STOP(_timer_check_type); ++ ++ } else { ++ RC_TRACE(0x00000001, ("No type narrowing => skipping check for type inconsistency")); ++ } ++ ++ if (UseMethodForwardPoints) { ++ RC_TRACE(0x00000001, ("Check stack for forwarding methods to new version")); ++ method_forwarding(); ++ } ++ ++ if (UseSharedSpaces) { ++ // Sharing is enabled so we remap the shared readonly space to ++ // shared readwrite, private just in case we need to redefine ++ // a shared class. We do the remap during the doit() phase of ++ // the safepoint to be safer. ++ if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { ++ RC_TRACE(0x00000001, ("failed to remap shared readonly space to readwrite, private")); ++ _result = JVMTI_ERROR_INTERNAL; ++ return; ++ } ++ } ++ ++ RC_TIMER_START(_timer_prepare_redefinition); ++ for (int i = 0; i < _new_classes->length(); i++) { ++ redefine_single_class(_new_classes->at(i), thread); ++ } ++ ++ // Deoptimize all compiled code that depends on this class ++ flush_dependent_code(instanceKlassHandle(Thread::current(), (klassOop)NULL), Thread::current()); ++ ++ // Adjust constantpool caches and vtables for all classes ++ // that reference methods of the evolved class. ++ SystemDictionary::classes_do(adjust_cpool_cache, Thread::current()); ++ ++ RC_TIMER_STOP(_timer_prepare_redefinition); ++ RC_TIMER_START(_timer_redefinition); ++ ++ class ChangePointersOopClosure : public OopClosure { ++ virtual void do_oop(oop* o) { ++ do_oop_work(o); ++ } ++ ++ virtual void do_oop(narrowOop* o) { ++ do_oop_work(o); ++ } ++ }; ++ ++ class ChangePointersObjectClosure : public ObjectClosure { ++ ++ private: ++ ++ OopClosure *_closure; ++ bool _needs_instance_update; ++ GrowableArray<oop> *_updated_oops; ++ ++ public: ++ ChangePointersObjectClosure(OopClosure *closure) : _closure(closure), _needs_instance_update(false), _updated_oops(NULL) {} ++ ++ bool needs_instance_update() { ++ return _needs_instance_update; ++ } ++ ++ GrowableArray<oop> *updated_oops() { return _updated_oops; } ++ ++ virtual void do_object(oop obj) { ++ if (!obj->is_instanceKlass()) { ++ obj->oop_iterate(_closure); ++ ++ if (obj->blueprint()->is_redefining()) { ++ ++ if (obj->blueprint()->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ if (_updated_oops == NULL) { ++ _updated_oops = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(100, true); ++ } ++ _updated_oops->append(obj); ++ } ++ ++ if(obj->blueprint()->update_information() != NULL || obj->is_perm()) { ++ ++ assert(obj->blueprint()->old_version() != NULL, "must have old version"); ++ obj->set_klass_no_check(obj->blueprint()->old_version()); ++ ++ if (obj->size() != obj->size_given_klass(obj->blueprint()->new_version()->klass_part()) || obj->is_perm()) { ++ // We need an instance update => set back to old klass ++ _needs_instance_update = true; ++ ++ } else { ++ MarkSweep::update_fields(obj, obj); ++ assert(obj->blueprint()->is_redefining(), "update fields resets the klass"); ++ } ++ } ++ } ++ ++ } else { ++ instanceKlass *klass = instanceKlass::cast((klassOop)obj); ++ if (klass->is_redefining()) { ++ // DCEVM: We need to restorte constants pool owner which was updated by do_oop_work ++ instanceKlass* old_klass = instanceKlass::cast(klass->old_version()); ++ old_klass->constants()->set_pool_holder(klass->old_version()); ++ ++ // Initialize the new class! Special static initialization that does not execute the ++ // static constructor but copies static field values from the old class if name ++ // and signature of a static field match. ++ klass->initialize_redefined_class(); ++ } ++ // idubrov: FIXME: we probably don't need that since oop's will be visited in a regular way... ++ // idubrov: need to check if there is a test to verify that fields referencing class being updated ++ // idubrov: will get new version of that class ++ //klass->iterate_static_fields(_closure); ++ } ++ } ++ }; ++ ++ ChangePointersOopClosure oopClosure; ++ ChangePointersObjectClosure objectClosure(&oopClosure); ++ ++ { ++ SharedHeap::heap()->gc_prologue(true); ++ Universe::root_oops_do(&oopClosure); ++ Universe::heap()->object_iterate(&objectClosure); ++ SharedHeap::heap()->gc_epilogue(false); ++ } ++ ++ // Swap marks to have same hashcodes ++ for (int i=0; i<_new_classes->length(); i++) { ++ swap_marks(_new_classes->at(i)(), _new_classes->at(i)->old_version()); ++ swap_marks(_new_classes->at(i)->java_mirror(), _new_classes->at(i)->old_version()->java_mirror()); ++ } ++ ++ _updated_oops = objectClosure.updated_oops(); ++ ++ if (objectClosure.needs_instance_update()){ ++ ++ // Do a full garbage collection to update the instance sizes accordingly ++ RC_TRACE(0x00000001, ("Before performing full GC!")); ++ Universe::set_redefining_gc_run(true); ++ JvmtiGCMarker jgcm; ++ notify_gc_begin(true); ++ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); ++ notify_gc_end(); ++ Universe::set_redefining_gc_run(false); ++ RC_TRACE(0x00000001, ("GC done!")); ++ } ++ ++ ++ if (RC_TRACE_ENABLED(0x00000001)) { ++ if (_updated_oops != NULL) { ++ RC_TRACE(0x00000001, ("%d object(s) updated!", _updated_oops->length())); ++ } else { ++ RC_TRACE(0x00000001, ("No objects updated!")); ++ } ++ } ++ ++ // Unmark klassOops as "redefining" ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur = _new_classes->at(i)(); ++ _new_classes->at(i)->set_redefining(false); ++ _new_classes->at(i)->clear_update_information(); ++ _new_classes->at(i)->update_supers_to_newest_version(); ++ ++ if (((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses() != NULL) { ++ update_array_classes_to_newest_version(((instanceKlass *)cur->klass_part()->old_version()->klass_part())->array_klasses()); ++ ++ // Transfer the array classes, otherwise we might get cast exceptions when casting array types. ++ ((instanceKlass*)cur->klass_part())->set_array_klasses(((instanceKlass*)cur->klass_part()->old_version()->klass_part())->array_klasses()); ++ ++ oop new_mirror = _new_classes->at(i)->java_mirror(); ++ oop old_mirror = _new_classes->at(i)->old_version()->java_mirror(); ++ java_lang_Class::set_array_klass(new_mirror, java_lang_Class::array_klass(old_mirror)); ++ } ++ } ++ ++ for (int i=T_BOOLEAN; i<=T_LONG; i++) { ++ update_array_classes_to_newest_version(Universe::typeArrayKlassObj((BasicType)i)); ++ } ++ ++ // Disable any dependent concurrent compilations ++ SystemDictionary::notice_modification(); ++ ++ // Set flag indicating that some invariants are no longer true. ++ // See jvmtiExport.hpp for detailed explanation. ++ JvmtiExport::set_has_redefined_a_class(); ++ ++ // Clean up caches in the compiler interface and compiler threads ++ CompileBroker::cleanup_after_redefinition(); ++ ++#ifdef ASSERT ++ ++ // Universe::verify(); ++ // JNIHandles::verify(); ++ ++ SystemDictionary::classes_do(check_class, thread); ++#endif ++ ++ update_active_methods(); ++ RC_TIMER_STOP(_timer_redefinition); ++ ++} ++ ++void VM_RedefineClasses::update_array_classes_to_newest_version(klassOop smallest_dimension) { ++ ++ arrayKlass *curArrayKlass = arrayKlass::cast(smallest_dimension); ++ assert(curArrayKlass->lower_dimension() == NULL, "argument must be smallest dimension"); ++ ++ ++ while (curArrayKlass != NULL) { ++ klassOop higher_dimension = curArrayKlass->higher_dimension(); ++ klassOop lower_dimension = curArrayKlass->lower_dimension(); ++ curArrayKlass->update_supers_to_newest_version(); ++ ++ curArrayKlass = NULL; ++ if (higher_dimension != NULL) { ++ curArrayKlass = arrayKlass::cast(higher_dimension); ++ } ++ } ++ ++} ++ ++void VM_RedefineClasses::doit_epilogue() { ++ ++ RC_TIMER_START(_timer_vm_op_epilogue); ++ ++ unlock_threads(); ++ ++ ResourceMark mark; ++ ++ VM_GC_Operation::doit_epilogue(); ++ RC_TRACE(0x00000001, ("GC Operation epilogue finished! ")); ++ ++ GrowableArray<methodHandle> instanceTransformerMethods; ++ ++ // Call static transformers ++ for (int i=0; i<_new_classes->length(); i++) { ++ ++ instanceKlassHandle klass = _new_classes->at(i); ++ ++ // Transfer init state ++ if (klass->old_version() != NULL) { ++ instanceKlass::ClassState state = instanceKlass::cast(klass->old_version())->init_state(); ++ if (state > instanceKlass::linked) { ++ klass->initialize(Thread::current()); ++ } ++ } ++ ++ // Find instance transformer method ++ ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ ++ RC_TRACE(0x00008000, ("Call instance transformer of %s instance", klass->name()->as_C_string())); ++ klassOop cur_klass = klass(); ++ while (cur_klass != NULL) { ++ methodOop method = ((instanceKlass*)cur_klass->klass_part())->find_method(vmSymbols::transformer_name(), vmSymbols::void_method_signature()); ++ if (method != NULL) { ++ methodHandle instanceTransformerMethod(method); ++ instanceTransformerMethods.append(instanceTransformerMethod); ++ break; ++ } else { ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ assert(cur_klass != NULL, "must have instance transformer method"); ++ } else { ++ instanceTransformerMethods.append(methodHandle(Thread::current(), NULL)); ++ } ++ } ++ ++ ++ // Call instance transformers ++ if (_updated_oops != NULL) { ++ ++ for (int i=0; i<_updated_oops->length(); i++) { ++ assert(_updated_oops->at(i) != NULL, "must not be null!"); ++ Handle cur(_updated_oops->at(i)); ++ instanceKlassHandle klass(cur->klass()); ++ ++ if (klass->check_redefinition_flag(Klass::HasInstanceTransformer)) { ++ ++ methodHandle method = instanceTransformerMethods.at(klass->redefinition_index()); ++ ++ RC_TRACE(0x00008000, ("executing transformer method")); ++ ++ Thread *__the_thread__ = Thread::current(); ++ JavaValue result(T_VOID); ++ JavaCallArguments args(cur); ++ JavaCalls::call(&result, ++ method, ++ &args, ++ THREAD); ++ ++ // TODO: What to do with an exception here? ++ if (HAS_PENDING_EXCEPTION) { ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ RC_TRACE(0x00000002, ("exception when executing transformer: '%s'", ++ ex_name->as_C_string())); ++ CLEAR_PENDING_EXCEPTION; ++ } ++ } ++ } ++ ++ delete _updated_oops; ++ _updated_oops = NULL; ++ } ++ ++ // Free the array of scratch classes ++ delete _new_classes; ++ _new_classes = NULL; ++ RC_TRACE(0x00000001, ("Redefinition finished!")); ++ ++ RC_TIMER_STOP(_timer_vm_op_epilogue); ++} ++ ++bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { ++ // classes for primitives cannot be redefined ++ if (java_lang_Class::is_primitive(klass_mirror)) { + return false; + } +- +- // rewrite constant pool references in the methods_parameter_annotations: +- if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller ++ klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); ++ // classes for arrays cannot be redefined ++ if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { + return false; + } +- +- // rewrite constant pool references in the methods_default_annotations: +- if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; ++ return true; ++} ++ ++#ifdef ASSERT ++ ++void VM_RedefineClasses::verify_classes(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ ++ instanceKlassHandle k_handle(THREAD, k_oop); ++ Verifier::verify(k_handle, Verifier::ThrowException, true, true, THREAD); ++ k_oop = k_oop->klass_part()->old_version(); + } +- +- return true; +-} // end rewrite_cp_refs() +- +- +-// Rewrite constant pool references in the methods. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods(THREAD, scratch_class->methods()); +- +- if (methods.is_null() || methods->length() == 0) { +- // no methods so nothing to do +- return true; +- } +- +- // rewrite constant pool references in the methods: +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- methodHandle new_method; +- rewrite_cp_refs_in_method(method, &new_method, CHECK_false); +- if (!new_method.is_null()) { +- // the method has been replaced so save the new method version +- methods->obj_at_put(i, new_method()); ++} ++ ++#endif ++ ++// Rewrite faster byte-codes back to their slower equivalent. Undoes rewriting happening in templateTable_xxx.cpp ++// The reason is that once we zero cpool caches, we need to re-resolve all entries again. Faster bytecodes do not ++// do that, they assume that cache entry is resolved already. ++static void unpatch_bytecode(methodOop method) { ++ RawBytecodeStream bcs(method); ++ Bytecodes::Code code; ++ Bytecodes::Code java_code; ++ while (!bcs.is_last_bytecode()) { ++ code = bcs.raw_next(); ++ address bcp = bcs.bcp(); ++ ++ if (code == Bytecodes::_breakpoint) { ++ int bci = method->bci_from(bcp); ++ code = method->orig_bytecode_at(bci); ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ // Let breakpoint table handling unpatch bytecode ++ method->set_orig_bytecode_at(bci, java_code); ++ } ++ } else { ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ *bcp = java_code; ++ } ++ } ++ ++ // Additionally, we need to unpatch bytecode at bcp+1 for fast_xaccess (which would be fast field access) ++ if (code == Bytecodes::_fast_iaccess_0 || code == Bytecodes::_fast_aaccess_0 || code == Bytecodes::_fast_faccess_0) { ++ Bytecodes::Code code2 = Bytecodes::code_or_bp_at(bcp + 1); ++ assert(code2 == Bytecodes::_fast_igetfield || ++ code2 == Bytecodes::_fast_agetfield || ++ code2 == Bytecodes::_fast_fgetfield, ""); ++ *(bcp + 1) = Bytecodes::java_code(code2); + } + } +- +- return true; + } + +- +-// Rewrite constant pool references in the specific method. This code +-// was adapted from Rewriter::rewrite_method(). +-void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method, +- methodHandle *new_method_p, TRAPS) { +- +- *new_method_p = methodHandle(); // default is no new method +- +- // We cache a pointer to the bytecodes here in code_base. If GC +- // moves the methodOop, then the bytecodes will also move which +- // will likely cause a crash. We create a No_Safepoint_Verifier +- // object to detect whether we pass a possible safepoint in this +- // code block. +- No_Safepoint_Verifier nsv; +- +- // Bytecodes and their length +- address code_base = method->code_base(); +- int code_length = method->code_size(); +- +- int bc_length; +- for (int bci = 0; bci < code_length; bci += bc_length) { +- address bcp = code_base + bci; +- Bytecodes::Code c = (Bytecodes::Code)(*bcp); +- +- bc_length = Bytecodes::length_for(c); +- if (bc_length == 0) { +- // More complicated bytecodes report a length of zero so +- // we have to try again a slightly different way. +- bc_length = Bytecodes::length_at(method(), bcp); +- } +- +- assert(bc_length != 0, "impossible bytecode length"); +- +- switch (c) { +- case Bytecodes::_ldc: +- { +- int cp_index = *(bcp + 1); +- int new_index = find_new_index(cp_index); +- +- if (StressLdcRewrite && new_index == 0) { +- // If we are stressing ldc -> ldc_w rewriting, then we +- // always need a new_index value. +- new_index = cp_index; +- } +- if (new_index != 0) { +- // the original index is mapped so we have more work to do +- if (!StressLdcRewrite && new_index <= max_jubyte) { +- // The new value can still use ldc instead of ldc_w +- // unless we are trying to stress ldc -> ldc_w rewriting +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- *(bcp + 1) = new_index; +- } else { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s->ldc_w@" INTPTR_FORMAT " old=%d, new=%d", +- Bytecodes::name(c), bcp, cp_index, new_index)); +- // the new value needs ldc_w instead of ldc +- u_char inst_buffer[4]; // max instruction size is 4 bytes +- bcp = (address)inst_buffer; +- // construct new instruction sequence +- *bcp = Bytecodes::_ldc_w; +- bcp++; +- // Rewriter::rewrite_method() does not rewrite ldc -> ldc_w. +- // See comment below for difference between put_Java_u2() +- // and put_native_u2(). +- Bytes::put_Java_u2(bcp, new_index); +- +- Relocator rc(method, NULL /* no RelocatorListener needed */); +- methodHandle m; +- { +- Pause_No_Safepoint_Verifier pnsv(&nsv); +- +- // ldc is 2 bytes and ldc_w is 3 bytes +- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD); +- if (m.is_null() || HAS_PENDING_EXCEPTION) { +- guarantee(false, "insert_space_at() failed"); +- } +- } +- +- // return the new method so that the caller can update +- // the containing class +- *new_method_p = method = m; +- // switch our bytecode processing loop from the old method +- // to the new method +- code_base = method->code_base(); +- code_length = method->code_size(); +- bcp = code_base + bci; +- c = (Bytecodes::Code)(*bcp); +- bc_length = Bytecodes::length_for(c); +- assert(bc_length != 0, "sanity check"); +- } // end we need ldc_w instead of ldc +- } // end if there is a mapped index +- } break; +- +- // these bytecodes have a two-byte constant pool index +- case Bytecodes::_anewarray : // fall through +- case Bytecodes::_checkcast : // fall through +- case Bytecodes::_getfield : // fall through +- case Bytecodes::_getstatic : // fall through +- case Bytecodes::_instanceof : // fall through +- case Bytecodes::_invokeinterface: // fall through +- case Bytecodes::_invokespecial : // fall through +- case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokevirtual : // fall through +- case Bytecodes::_ldc_w : // fall through +- case Bytecodes::_ldc2_w : // fall through +- case Bytecodes::_multianewarray : // fall through +- case Bytecodes::_new : // fall through +- case Bytecodes::_putfield : // fall through +- case Bytecodes::_putstatic : +- { +- address p = bcp + 1; +- int cp_index = Bytes::get_Java_u2(p); +- int new_index = find_new_index(cp_index); +- if (new_index != 0) { +- // the original index is mapped so update w/ new value +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- // Rewriter::rewrite_method() uses put_native_u2() in this +- // situation because it is reusing the constant pool index +- // location for a native index into the constantPoolCache. +- // Since we are updating the constant pool index prior to +- // verification and constantPoolCache initialization, we +- // need to keep the new index in Java byte order. +- Bytes::put_Java_u2(p, new_index); +- } +- } break; +- } +- } // end for each bytecode +-} // end rewrite_cp_refs_in_method() +- +- +-// Rewrite constant pool references in the class_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- typeArrayHandle class_annotations(THREAD, +- scratch_class->class_annotations()); +- if (class_annotations.is_null() || class_annotations->length() == 0) { +- // no class_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_annotations length=%d", class_annotations->length())); +- +- int byte_i = 0; // byte index into class_annotations +- return rewrite_cp_refs_in_annotations_typeArray(class_annotations, byte_i, +- THREAD); +-} +- +- +-// Rewrite constant pool references in an annotations typeArray. This +-// "structure" is adapted from the RuntimeVisibleAnnotations_attribute +-// that is described in section 4.8.15 of the 2nd-edition of the VM spec: +-// +-// annotations_typeArray { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for num_annotations field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for num_annotations field")); +- return false; +- } +- +- u2 num_annotations = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_annotations=%d", num_annotations)); +- +- int calc_num_annotations = 0; +- for (; calc_num_annotations < num_annotations; calc_num_annotations++) { +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad annotation_struct at %d", calc_num_annotations)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_annotations == calc_num_annotations, "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotations_typeArray() +- +- +-// Rewrite constant pool references in the annotation struct portion of +-// an annotations_typeArray. This "structure" is from section 4.8.15 of +-// the 2nd-edition of the VM spec: +-// +-// struct annotation { +-// u2 type_index; +-// u2 num_element_value_pairs; +-// { +-// u2 element_name_index; +-// element_value value; +-// } element_value_pairs[num_element_value_pairs]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- if ((byte_i_ref + 2 + 2) > annotations_typeArray->length()) { +- // not enough room for smallest annotation_struct +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for annotation_struct")); +- return false; +- } +- +- u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray, +- byte_i_ref, "mapped old type_index=%d", THREAD); +- +- u2 num_element_value_pairs = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr( +- byte_i_ref)); +- byte_i_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_index=%d num_element_value_pairs=%d", type_index, +- num_element_value_pairs)); +- +- int calc_num_element_value_pairs = 0; +- for (; calc_num_element_value_pairs < num_element_value_pairs; +- calc_num_element_value_pairs++) { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for another element_name_index, let alone +- // the rest of another component +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for element_name_index")); +- return false; +- } +- +- u2 element_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old element_name_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("element_name_index=%d", element_name_index)); +- +- if (!rewrite_cp_refs_in_element_value(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad element_value at %d", calc_num_element_value_pairs)); +- // propagate failure back to caller +- return false; +- } +- } // end for each component +- assert(num_element_value_pairs == calc_num_element_value_pairs, +- "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotation_struct() +- +- +-// Rewrite a constant pool reference at the current position in +-// annotations_typeArray if needed. Returns the original constant +-// pool reference if a rewrite was not needed or the new constant +-// pool reference if a rewrite was needed. +-u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS) { +- +- address cp_index_addr = (address) +- annotations_typeArray->byte_at_addr(byte_i_ref); +- u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr); +- u2 new_cp_index = find_new_index(old_cp_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index)); +- Bytes::put_Java_u2(cp_index_addr, new_cp_index); +- old_cp_index = new_cp_index; +- } +- byte_i_ref += 2; +- return old_cp_index; +-} +- +- +-// Rewrite constant pool references in the element_value portion of an +-// annotations_typeArray. This "structure" is from section 4.8.15.1 of +-// the 2nd-edition of the VM spec: +-// +-// struct element_value { +-// u1 tag; +-// union { +-// u2 const_value_index; +-// { +-// u2 type_name_index; +-// u2 const_name_index; +-// } enum_const_value; +-// u2 class_info_index; +-// annotation annotation_value; +-// struct { +-// u2 num_values; +-// element_value values[num_values]; +-// } array_value; +-// } value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_element_value( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 1) > annotations_typeArray->length()) { +- // not enough room for a tag let alone the rest of an element_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a tag")); +- return false; +- } +- +- u1 tag = annotations_typeArray->byte_at(byte_i_ref); +- byte_i_ref++; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("tag='%c'", tag)); +- +- switch (tag) { +- // These BaseType tag values are from Table 4.2 in VM spec: +- case 'B': // byte +- case 'C': // char +- case 'D': // double +- case 'F': // float +- case 'I': // int +- case 'J': // long +- case 'S': // short +- case 'Z': // boolean +- +- // The remaining tag values are from Table 4.8 in the 2nd-edition of +- // the VM spec: +- case 's': +- { +- // For the above tag values (including the BaseType values), +- // value.const_value_index is right union field. +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a const_value_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a const_value_index")); +- return false; +- } +- +- u2 const_value_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_value_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("const_value_index=%d", const_value_index)); +- } break; +- +- case 'e': +- { +- // for the above tag value, value.enum_const_value is right union field +- +- if ((byte_i_ref + 4) > annotations_typeArray->length()) { +- // not enough room for a enum_const_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a enum_const_value")); +- return false; +- } +- +- u2 type_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old type_name_index=%d", THREAD); +- +- u2 const_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_name_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_name_index=%d const_name_index=%d", type_name_index, +- const_name_index)); +- } break; +- +- case 'c': +- { +- // for the above tag value, value.class_info_index is right union field +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a class_info_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a class_info_index")); +- return false; +- } +- +- u2 class_info_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old class_info_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_info_index=%d", class_info_index)); +- } break; +- +- case '@': +- // For the above tag value, value.attr_value is the right union +- // field. This is a nested annotation. +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- break; +- +- case '[': +- { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a num_values field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_values field")); +- return false; +- } +- +- // For the above tag value, value.array_value is the right union +- // field. This is an array of nested element_value. +- u2 num_values = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("num_values=%d", num_values)); +- +- int calc_num_values = 0; +- for (; calc_num_values < num_values; calc_num_values++) { +- if (!rewrite_cp_refs_in_element_value( +- annotations_typeArray, byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad nested element_value at %d", calc_num_values)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_values == calc_num_values, "sanity check"); +- } break; +- +- default: +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("bad tag=0x%x", tag)); +- return false; +- } // end decode tag field +- +- return true; +-} // end rewrite_cp_refs_in_element_value() +- +- +-// Rewrite constant pool references in a fields_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle fields_annotations(THREAD, +- scratch_class->fields_annotations()); +- +- if (fields_annotations.is_null() || fields_annotations->length() == 0) { +- // no fields_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("fields_annotations length=%d", fields_annotations->length())); +- +- for (int i = 0; i < fields_annotations->length(); i++) { +- typeArrayHandle field_annotations(THREAD, +- (typeArrayOop)fields_annotations->obj_at(i)); +- if (field_annotations.is_null() || field_annotations->length() == 0) { +- // this field does not have any annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into field_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(field_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad field_annotations at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_fields_annotations() +- +- +-// Rewrite constant pool references in a methods_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_annotations(THREAD, +- scratch_class->methods_annotations()); +- +- if (methods_annotations.is_null() || methods_annotations->length() == 0) { +- // no methods_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_annotations length=%d", methods_annotations->length())); +- +- for (int i = 0; i < methods_annotations->length(); i++) { +- typeArrayHandle method_annotations(THREAD, +- (typeArrayOop)methods_annotations->obj_at(i)); +- if (method_annotations.is_null() || method_annotations->length() == 0) { +- // this method does not have any annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into method_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(method_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_annotations at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_annotations() +- +- +-// Rewrite constant pool references in a methods_parameter_annotations +-// field. This "structure" is adapted from the +-// RuntimeVisibleParameterAnnotations_attribute described in section +-// 4.8.17 of the 2nd-edition of the VM spec: +-// +-// methods_parameter_annotations_typeArray { +-// u1 num_parameters; +-// { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } parameter_annotations[num_parameters]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_parameter_annotations(THREAD, +- scratch_class->methods_parameter_annotations()); +- +- if (methods_parameter_annotations.is_null() +- || methods_parameter_annotations->length() == 0) { +- // no methods_parameter_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_parameter_annotations length=%d", +- methods_parameter_annotations->length())); +- +- for (int i = 0; i < methods_parameter_annotations->length(); i++) { +- typeArrayHandle method_parameter_annotations(THREAD, +- (typeArrayOop)methods_parameter_annotations->obj_at(i)); +- if (method_parameter_annotations.is_null() +- || method_parameter_annotations->length() == 0) { +- // this method does not have any parameter annotations so skip it +- continue; +- } +- +- if (method_parameter_annotations->length() < 1) { +- // not enough room for a num_parameters field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_parameters field at %d", i)); +- return false; +- } +- +- int byte_i = 0; // byte index into method_parameter_annotations +- +- u1 num_parameters = method_parameter_annotations->byte_at(byte_i); +- byte_i++; +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_parameters=%d", num_parameters)); +- +- int calc_num_parameters = 0; +- for (; calc_num_parameters < num_parameters; calc_num_parameters++) { +- if (!rewrite_cp_refs_in_annotations_typeArray( +- method_parameter_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_parameter_annotations at %d", calc_num_parameters)); +- // propagate failure back to caller +- return false; +- } +- } +- assert(num_parameters == calc_num_parameters, "sanity check"); +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_parameter_annotations() +- +- +-// Rewrite constant pool references in a methods_default_annotations +-// field. This "structure" is adapted from the AnnotationDefault_attribute +-// that is described in section 4.8.19 of the 2nd-edition of the VM spec: +-// +-// methods_default_annotations_typeArray { +-// element_value default_value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_default_annotations(THREAD, +- scratch_class->methods_default_annotations()); +- +- if (methods_default_annotations.is_null() +- || methods_default_annotations->length() == 0) { +- // no methods_default_annotations so nothing to do +- return true; +- } +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_default_annotations length=%d", +- methods_default_annotations->length())); +- +- for (int i = 0; i < methods_default_annotations->length(); i++) { +- typeArrayHandle method_default_annotations(THREAD, +- (typeArrayOop)methods_default_annotations->obj_at(i)); +- if (method_default_annotations.is_null() +- || method_default_annotations->length() == 0) { +- // this method does not have any default annotations so skip it +- continue; +- } +- +- int byte_i = 0; // byte index into method_default_annotations +- +- if (!rewrite_cp_refs_in_element_value( +- method_default_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad default element_value at %d", i)); +- // propagate failure back to caller +- return false; +- } +- } +- +- return true; +-} // end rewrite_cp_refs_in_methods_default_annotations() +- +- +-// Rewrite constant pool references in the method's stackmap table. +-// These "structures" are adapted from the StackMapTable_attribute that +-// is described in section 4.8.4 of the 6.0 version of the VM spec +-// (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// stack_map { +-// u2 number_of_entries; +-// stack_map_frame entries[number_of_entries]; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table( +- methodHandle method, TRAPS) { +- +- if (!method->has_stackmap_table()) { +- return; +- } +- +- typeArrayOop stackmap_data = method->stackmap_data(); +- address stackmap_p = (address)stackmap_data->byte_at_addr(0); +- address stackmap_end = stackmap_p + stackmap_data->length(); +- +- assert(stackmap_p + 2 <= stackmap_end, "no room for number_of_entries"); +- u2 number_of_entries = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("number_of_entries=%u", number_of_entries)); +- +- // walk through each stack_map_frame +- u2 calc_number_of_entries = 0; +- for (; calc_number_of_entries < number_of_entries; calc_number_of_entries++) { +- // The stack_map_frame structure is a u1 frame_type followed by +- // 0 or more bytes of data: +- // +- // union stack_map_frame { +- // same_frame; +- // same_locals_1_stack_item_frame; +- // same_locals_1_stack_item_frame_extended; +- // chop_frame; +- // same_frame_extended; +- // append_frame; +- // full_frame; +- // } +- +- assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type"); +- // The Linux compiler does not like frame_type to be u1 or u2. It +- // issues the following warning for the first if-statement below: +- // +- // "warning: comparison is always true due to limited range of data type" +- // +- u4 frame_type = *stackmap_p; +- stackmap_p++; +- +- // same_frame { +- // u1 frame_type = SAME; /* 0-63 */ +- // } +- if (frame_type >= 0 && frame_type <= 63) { +- // nothing more to do for same_frame +- } +- +- // same_locals_1_stack_item_frame { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */ +- // verification_type_info stack[1]; +- // } +- else if (frame_type >= 64 && frame_type <= 127) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // reserved for future use +- else if (frame_type >= 128 && frame_type <= 246) { +- // nothing more to do for reserved frame_types +- } +- +- // same_locals_1_stack_item_frame_extended { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */ +- // u2 offset_delta; +- // verification_type_info stack[1]; +- // } +- else if (frame_type == 247) { +- stackmap_p += 2; +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // chop_frame { +- // u1 frame_type = CHOP; /* 248-250 */ +- // u2 offset_delta; +- // } +- else if (frame_type >= 248 && frame_type <= 250) { +- stackmap_p += 2; +- } +- +- // same_frame_extended { +- // u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/ +- // u2 offset_delta; +- // } +- else if (frame_type == 251) { +- stackmap_p += 2; +- } +- +- // append_frame { +- // u1 frame_type = APPEND; /* 252-254 */ +- // u2 offset_delta; +- // verification_type_info locals[frame_type - 251]; +- // } +- else if (frame_type >= 252 && frame_type <= 254) { +- assert(stackmap_p + 2 <= stackmap_end, +- "no room for offset_delta"); +- stackmap_p += 2; +- u1 len = frame_type - 251; +- for (u1 i = 0; i < len; i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- +- // full_frame { +- // u1 frame_type = FULL_FRAME; /* 255 */ +- // u2 offset_delta; +- // u2 number_of_locals; +- // verification_type_info locals[number_of_locals]; +- // u2 number_of_stack_items; +- // verification_type_info stack[number_of_stack_items]; +- // } +- else if (frame_type == 255) { +- assert(stackmap_p + 2 + 2 <= stackmap_end, +- "no room for smallest full_frame"); +- stackmap_p += 2; +- +- u2 number_of_locals = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 locals_i = 0; locals_i < number_of_locals; locals_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // Use the largest size for the number_of_stack_items, but only get +- // the right number of bytes. +- u2 number_of_stack_items = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 stack_i = 0; stack_i < number_of_stack_items; stack_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- } // end while there is a stack_map_frame +- assert(number_of_entries == calc_number_of_entries, "sanity check"); +-} // end rewrite_cp_refs_in_stack_map_table() +- +- +-// Rewrite constant pool references in the verification type info +-// portion of the method's stackmap table. These "structures" are +-// adapted from the StackMapTable_attribute that is described in +-// section 4.8.4 of the 6.0 version of the VM spec (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// The verification_type_info structure is a u1 tag followed by 0 or +-// more bytes of data: +-// +-// union verification_type_info { +-// Top_variable_info; +-// Integer_variable_info; +-// Float_variable_info; +-// Long_variable_info; +-// Double_variable_info; +-// Null_variable_info; +-// UninitializedThis_variable_info; +-// Object_variable_info; +-// Uninitialized_variable_info; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_verification_type_info( +- address& stackmap_p_ref, address stackmap_end, u2 frame_i, +- u1 frame_type, TRAPS) { +- +- assert(stackmap_p_ref + 1 <= stackmap_end, "no room for tag"); +- u1 tag = *stackmap_p_ref; +- stackmap_p_ref++; +- +- switch (tag) { +- // Top_variable_info { +- // u1 tag = ITEM_Top; /* 0 */ +- // } +- // verificationType.hpp has zero as ITEM_Bogus instead of ITEM_Top +- case 0: // fall through +- +- // Integer_variable_info { +- // u1 tag = ITEM_Integer; /* 1 */ +- // } +- case ITEM_Integer: // fall through +- +- // Float_variable_info { +- // u1 tag = ITEM_Float; /* 2 */ +- // } +- case ITEM_Float: // fall through +- +- // Double_variable_info { +- // u1 tag = ITEM_Double; /* 3 */ +- // } +- case ITEM_Double: // fall through +- +- // Long_variable_info { +- // u1 tag = ITEM_Long; /* 4 */ +- // } +- case ITEM_Long: // fall through +- +- // Null_variable_info { +- // u1 tag = ITEM_Null; /* 5 */ +- // } +- case ITEM_Null: // fall through +- +- // UninitializedThis_variable_info { +- // u1 tag = ITEM_UninitializedThis; /* 6 */ +- // } +- case ITEM_UninitializedThis: +- // nothing more to do for the above tag types +- break; +- +- // Object_variable_info { +- // u1 tag = ITEM_Object; /* 7 */ +- // u2 cpool_index; +- // } +- case ITEM_Object: +- { +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for cpool_index"); +- u2 cpool_index = Bytes::get_Java_u2(stackmap_p_ref); +- u2 new_cp_index = find_new_index(cpool_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("mapped old cpool_index=%d", cpool_index)); +- Bytes::put_Java_u2(stackmap_p_ref, new_cp_index); +- cpool_index = new_cp_index; +- } +- stackmap_p_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, cpool_index=%d", frame_i, +- frame_type, cpool_index)); +- } break; +- +- // Uninitialized_variable_info { +- // u1 tag = ITEM_Uninitialized; /* 8 */ +- // u2 offset; +- // } +- case ITEM_Uninitialized: +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for offset"); +- stackmap_p_ref += 2; +- break; +- +- default: +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, bad tag=0x%x", frame_i, frame_type, tag)); +- ShouldNotReachHere(); +- break; +- } // end switch (tag) +-} // end rewrite_cp_refs_in_verification_type_info() +- +- +-// Change the constant pool associated with klass scratch_class to +-// scratch_cp. If shrink is true, then scratch_cp_length elements +-// are copied from scratch_cp to a smaller constant pool and the +-// smaller constant pool is associated with scratch_class. +-void VM_RedefineClasses::set_new_constant_pool( +- instanceKlassHandle scratch_class, constantPoolHandle scratch_cp, +- int scratch_cp_length, bool shrink, TRAPS) { +- assert(!shrink || scratch_cp->length() >= scratch_cp_length, "sanity check"); +- +- if (shrink) { +- // scratch_cp is a merged constant pool and has enough space for a +- // worst case merge situation. We want to associate the minimum +- // sized constant pool with the klass to save space. +- constantPoolHandle smaller_cp(THREAD, +- oopFactory::new_constantPool(scratch_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- // preserve orig_length() value in the smaller copy +- int orig_length = scratch_cp->orig_length(); +- assert(orig_length != 0, "sanity check"); +- smaller_cp->set_orig_length(orig_length); +- scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); +- scratch_cp = smaller_cp; +- smaller_cp()->set_is_conc_safe(true); +- } +- +- // attach new constant pool to klass +- scratch_cp->set_pool_holder(scratch_class()); +- +- // attach klass to new constant pool +- scratch_class->set_constants(scratch_cp()); +- +- int i; // for portability +- +- // update each field in klass to use new constant pool indices as needed +- for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) { +- jshort cur_index = fs.name_index(); +- jshort new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-name_index change: %d to %d", cur_index, new_index)); +- fs.set_name_index(new_index); +- } +- cur_index = fs.signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-signature_index change: %d to %d", cur_index, new_index)); +- fs.set_signature_index(new_index); +- } +- cur_index = fs.initval_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-initval_index change: %d to %d", cur_index, new_index)); +- fs.set_initval_index(new_index); +- } +- cur_index = fs.generic_signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-generic_signature change: %d to %d", cur_index, new_index)); +- fs.set_generic_signature_index(new_index); +- } +- } // end for each field +- +- // Update constant pool indices in the inner classes info to use +- // new constant indices as needed. The inner classes info is a +- // quadruple: +- // (inner_class_info, outer_class_info, inner_name, inner_access_flags) +- InnerClassesIterator iter(scratch_class); +- for (; !iter.done(); iter.next()) { +- int cur_index = iter.inner_class_info_index(); +- if (cur_index == 0) { +- continue; // JVM spec. allows null inner class refs so skip it +- } +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_class_info change: %d to %d", cur_index, new_index)); +- iter.set_inner_class_info_index(new_index); +- } +- cur_index = iter.outer_class_info_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("outer_class_info change: %d to %d", cur_index, new_index)); +- iter.set_outer_class_info_index(new_index); +- } +- cur_index = iter.inner_name_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_name change: %d to %d", cur_index, new_index)); +- iter.set_inner_name_index(new_index); +- } +- } // end for each inner class +- +- // Attach each method in klass to the new constant pool and update +- // to use new constant pool indices as needed: +- objArrayHandle methods(THREAD, scratch_class->methods()); +- for (i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- method->set_constants(scratch_cp()); +- +- int new_index = find_new_index(method->name_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-name_index change: %d to %d", method->name_index(), +- new_index)); +- method->set_name_index(new_index); +- } +- new_index = find_new_index(method->signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-signature_index change: %d to %d", +- method->signature_index(), new_index)); +- method->set_signature_index(new_index); +- } +- new_index = find_new_index(method->generic_signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-generic_signature_index change: %d to %d", +- method->generic_signature_index(), new_index)); +- method->set_generic_signature_index(new_index); +- } +- +- // Update constant pool indices in the method's checked exception +- // table to use new constant indices as needed. +- int cext_length = method->checked_exceptions_length(); +- if (cext_length > 0) { +- CheckedExceptionElement * cext_table = +- method->checked_exceptions_start(); +- for (int j = 0; j < cext_length; j++) { +- int cur_index = cext_table[j].class_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("cext-class_cp_index change: %d to %d", cur_index, new_index)); +- cext_table[j].class_cp_index = (u2)new_index; +- } +- } // end for each checked exception table entry +- } // end if there are checked exception table entries +- +- // Update each catch type index in the method's exception table +- // to use new constant pool indices as needed. The exception table +- // holds quadruple entries of the form: +- // (beg_bci, end_bci, handler_bci, klass_index) +- +- ExceptionTable ex_table(method()); +- int ext_length = ex_table.length(); +- +- for (int j = 0; j < ext_length; j ++) { +- int cur_index = ex_table.catch_type_index(j); +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("ext-klass_index change: %d to %d", cur_index, new_index)); +- ex_table.set_catch_type_index(j, new_index); +- } +- } // end for each exception table entry +- +- // Update constant pool indices in the method's local variable +- // table to use new constant indices as needed. The local variable +- // table hold sextuple entries of the form: +- // (start_pc, length, name_index, descriptor_index, signature_index, slot) +- int lvt_length = method->localvariable_table_length(); +- if (lvt_length > 0) { +- LocalVariableTableElement * lv_table = +- method->localvariable_table_start(); +- for (int j = 0; j < lvt_length; j++) { +- int cur_index = lv_table[j].name_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-name_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].name_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].descriptor_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-descriptor_cp_index change: %d to %d", cur_index, +- new_index)); +- lv_table[j].descriptor_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].signature_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-signature_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].signature_cp_index = (u2)new_index; +- } +- } // end for each local variable table entry +- } // end if there are local variable table entries +- +- rewrite_cp_refs_in_stack_map_table(method, THREAD); +- } // end for each method +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +-} // end set_new_constant_pool() +- +- +-// Unevolving classes may point to methods of the_class directly ++// Unevolving classes may point to old methods directly + // from their constant pool caches, itables, and/or vtables. We + // use the SystemDictionary::classes_do() facility and this helper +-// to fix up these pointers. ++// to fix up these pointers. Additional field offsets and vtable indices ++// in the constant pool cache entries are fixed. + // + // Note: We currently don't support updating the vtable in + // arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. +-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(klassOop k_oop, +- oop initiating_loader, TRAPS) { ++void VM_RedefineClasses::adjust_cpool_cache(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ //tty->print_cr("name=%s", k_oop->klass_part()->name()->as_C_string()); ++/* ++ methodOop *matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ methodOop *matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ ++ for (int i=0; i<_matching_methods_length; i++) { ++ matching_old_methods[i] = (methodOop)_old_methods->obj_at(_matching_old_methods[i]); ++ matching_new_methods[i] = (methodOop)_new_methods->obj_at(_matching_new_methods[i]); ++ }*/ ++ ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; ++ ++ constantPoolHandle other_cp; ++ constantPoolCacheOop cp_cache; ++ ++ other_cp = constantPoolHandle(ik->constants()); ++ ++ for (int i=0; i<other_cp->length(); i++) { ++ if (other_cp->tag_at(i).is_klass()) { ++ klassOop klass = other_cp->klass_at(i, THREAD); ++ if (klass->klass_part()->new_version() != NULL) { ++ ++ // (tw) TODO: check why/if this is necessary ++ other_cp->klass_at_put(i, klass->klass_part()->new_version()); ++ } ++ klass = other_cp->klass_at(i, THREAD); ++ assert(klass->klass_part()->new_version() == NULL, "Must be new klass!"); ++ } ++ } ++ ++ cp_cache = other_cp->cache(); ++ ++ if (cp_cache != NULL) { ++ cp_cache->adjust_entries(NULL, ++ NULL, ++ 0); ++ } ++ ++ // If bytecode rewriting is enabled, we also need to unpatch bytecode to force resolution of zeroied entries ++ if (RewriteBytecodes) { ++ ik->methods_do(unpatch_bytecode); ++ } ++ } ++ k_oop = k_oop->klass_part()->old_version(); ++ } ++} ++ ++void VM_RedefineClasses::update_jmethod_ids() { ++ for (int j = 0; j < _matching_methods_length; ++j) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ RC_TRACE(0x00008000, ("matching method %s", old_method->name_and_sig_as_C_string())); ++ ++ jmethodID jmid = old_method->find_jmethod_id_or_null(); ++ if (old_method->new_version() != NULL && jmid == NULL) { ++ // (tw) Have to create jmethodID in this case ++ jmid = old_method->jmethod_id(); ++ } ++ ++ if (jmid != NULL) { ++ // There is a jmethodID, change it to point to the new method ++ methodHandle new_method_h((methodOop)_new_methods->obj_at(_matching_new_methods[j])); ++ if (old_method->new_version() == NULL) { ++ methodHandle old_method_h((methodOop)_old_methods->obj_at(_matching_old_methods[j])); ++ jmethodID new_jmethod_id = JNIHandles::make_jmethod_id(old_method_h); ++ bool result = instanceKlass::cast(old_method_h->method_holder())->update_jmethod_id(old_method_h(), new_jmethod_id); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for old method assigned to %d / result=%d", new_jmethod_id, result); ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } else { ++ jmethodID mid = new_method_h->jmethod_id(); ++ bool result = instanceKlass::cast(new_method_h->method_holder())->update_jmethod_id(new_method_h(), jmid); ++ //RC_TRACE(0x00008000, ("Changed jmethodID for new method assigned to %d / result=%d", jmid, result); ++ ++ } ++ JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); ++ //RC_TRACE(0x00008000, ("changing method associated with jmethod id %d to %s", (int)jmid, new_method_h->name()->as_C_string()); ++ assert(JNIHandles::resolve_jmethod_id(jmid) == (methodOop)_new_methods->obj_at(_matching_new_methods[j]), "should be replaced"); ++ jmethodID mid = ((methodOop)_new_methods->obj_at(_matching_new_methods[j]))->jmethod_id(); ++ assert(JNIHandles::resolve_non_null((jobject)mid) == new_method_h(), "must match!"); ++ ++ //RC_TRACE(0x00008000, ("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } ++ } ++} ++ ++ ++// Deoptimize all compiled code that depends on this class. ++// ++// If the can_redefine_classes capability is obtained in the onload ++// phase then the compiler has recorded all dependencies from startup. ++// In that case we need only deoptimize and throw away all compiled code ++// that depends on the class. ++// ++// If can_redefine_classes is obtained sometime after the onload ++// phase then the dependency information may be incomplete. In that case ++// the first call to RedefineClasses causes all compiled code to be ++// thrown away. As can_redefine_classes has been obtained then ++// all future compilations will record dependencies so second and ++// subsequent calls to RedefineClasses need only throw away code ++// that depends on the class. ++// ++void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { ++ assert_locked_or_safepoint(Compile_lock); ++ ++ // All dependencies have been recorded from startup or this is a second or ++ // subsequent use of RedefineClasses ++ ++ // For now deopt all ++ // (tw) TODO: Improve the dependency system such that we can safely deopt only a subset of the methods ++ if (0 && JvmtiExport::all_dependencies_are_recorded()) { ++ Universe::flush_evol_dependents_on(k_h); ++ } else { ++ CodeCache::mark_all_nmethods_for_deoptimization(); ++ ++ ResourceMark rm(THREAD); ++ DeoptimizationMarker dm; ++ ++ // Deoptimize all activations depending on marked nmethods ++ Deoptimization::deoptimize_dependents(); ++ ++ // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) ++ CodeCache::make_marked_nmethods_not_entrant(); ++ ++ // From now on we know that the dependency information is complete ++ JvmtiExport::set_all_dependencies_are_recorded(true); ++ } ++} ++ ++void VM_RedefineClasses::compute_added_deleted_matching_methods() { ++ methodOop old_method; ++ methodOop new_method; ++ ++ _matching_old_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _matching_new_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _added_methods = NEW_RESOURCE_ARRAY(int, _new_methods->length()); ++ _deleted_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ ++ _matching_methods_length = 0; ++ _deleted_methods_length = 0; ++ _added_methods_length = 0; ++ ++ int nj = 0; ++ int oj = 0; ++ while (true) { ++ if (oj >= _old_methods->length()) { ++ if (nj >= _new_methods->length()) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ new_method = (methodOop) _new_methods->obj_at(nj); ++ _added_methods[_added_methods_length++] = nj; ++ ++nj; ++ } else if (nj >= _new_methods->length()) { ++ // Old method, at the end, is deleted ++ old_method = (methodOop) _old_methods->obj_at(oj); ++ _deleted_methods[_deleted_methods_length++] = oj; ++ ++oj; ++ } else { ++ old_method = (methodOop) _old_methods->obj_at(oj); ++ new_method = (methodOop) _new_methods->obj_at(nj); ++ if (old_method->name() == new_method->name()) { ++ if (old_method->signature() == new_method->signature()) { ++ _matching_old_methods[_matching_methods_length ] = oj;//old_method; ++ _matching_new_methods[_matching_methods_length++] = nj;//new_method; ++ ++nj; ++ ++oj; ++ } else { ++ // added overloaded have already been moved to the end, ++ // so this is a deleted overloaded method ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; ++ ++oj; ++ } ++ } else { // names don't match ++ if (old_method->name()->fast_compare(new_method->name()) > 0) { ++ // new method ++ _added_methods[_added_methods_length++] = nj;//new_method; ++ ++nj; ++ } else { ++ // deleted method ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; ++ ++oj; ++ } ++ } ++ } ++ } ++ assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); ++ assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); ++ RC_TRACE(0x00008000, ("Matching methods = %d / deleted methods = %d / added methods = %d", ++ _matching_methods_length, _deleted_methods_length, _added_methods_length)); ++} ++ ++ ++ ++// Install the redefinition of a class: ++// - house keeping (flushing breakpoints and caches, deoptimizing ++// dependent compiled code) ++// - adjusting constant pool caches and vtables in other classes ++void VM_RedefineClasses::redefine_single_class(instanceKlassHandle the_new_class, TRAPS) { ++ ++ ResourceMark rm(THREAD); ++ ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ instanceKlassHandle the_old_class = instanceKlassHandle(THREAD, the_new_class->old_version()); ++ ++#ifndef JVMTI_KERNEL ++ // Remove all breakpoints in methods of this class ++ JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); ++ jvmti_breakpoints.clearall_in_class_at_safepoint(the_old_class()); ++#endif // !JVMTI_KERNEL ++ ++ if (the_old_class() == Universe::reflect_invoke_cache()->klass()) { ++ // We are redefining java.lang.reflect.Method. Method.invoke() is ++ // cached and users of the cache care about each active version of ++ // the method so we have to track this previous version. ++ // Do this before methods get switched ++ Universe::reflect_invoke_cache()->add_previous_version( ++ the_old_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); ++ } ++ ++ _old_methods = the_old_class->methods(); ++ _new_methods = the_new_class->methods(); ++ _the_class_oop = the_old_class(); ++ compute_added_deleted_matching_methods(); ++ ++ // track which methods are EMCP for add_previous_version() call below ++ ++ // (tw) TODO: Check if we need the concept of EMCP? ++ BitMap emcp_methods(_old_methods->length()); ++ int emcp_method_count = 0; ++ emcp_methods.clear(); // clears 0..(length() - 1) ++ ++ // We need to mark methods as old!! ++ check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); ++ update_jmethod_ids(); ++ ++ // keep track of previous versions of this class ++ the_new_class->add_previous_version(the_old_class, &emcp_methods, ++ emcp_method_count); ++ ++ // TODO: ++ transfer_old_native_function_registrations(the_old_class); ++ ++ ++#ifdef ASSERT ++ ++// klassOop systemLookup1 = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++// assert(systemLookup1 == the_new_class(), "New class must be in system dictionary!"); ++ ++ //JNIHandles::verify(); ++ ++// klassOop systemLookup = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++ ++// assert(systemLookup == the_new_class(), "New class must be in system dictionary!"); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ for (int i=0; i<the_new_class->methods()->length(); i++) { ++ assert(((methodOop)the_new_class->methods()->obj_at(i))->method_holder() == the_new_class(), "method holder must match!"); ++ } ++ ++ _old_methods->verify(); ++ _new_methods->verify(); ++ ++ the_new_class->vtable()->verify(tty); ++ the_old_class->vtable()->verify(tty); ++ ++#endif ++ ++ // increment the classRedefinedCount field in the_class and in any ++ // direct and indirect subclasses of the_class ++ increment_class_counter((instanceKlass *)the_old_class()->klass_part(), THREAD); ++ ++} ++ ++ ++void VM_RedefineClasses::check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p) { ++ RC_TRACE(0x00008000, ("Checking matching methods for EMCP")); ++ *emcp_method_count_p = 0; ++ int obsolete_count = 0; ++ int old_index = 0; ++ for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ methodOop new_method = (methodOop)_new_methods->obj_at(_matching_new_methods[j]); ++ methodOop old_array_method; ++ ++ // Maintain an old_index into the _old_methods array by skipping ++ // deleted methods ++ while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) ++ != old_method) { ++ ++old_index; ++ } ++ ++ if (MethodComparator::methods_EMCP(old_method, new_method)) { ++ // The EMCP definition from JSR-163 requires the bytecodes to be ++ // the same with the exception of constant pool indices which may ++ // differ. However, the constants referred to by those indices ++ // must be the same. ++ // ++ // We use methods_EMCP() for comparison since constant pool ++ // merging can remove duplicate constant pool entries that were ++ // present in the old method and removed from the rewritten new ++ // method. A faster binary comparison function would consider the ++ // old and new methods to be different when they are actually ++ // EMCP. ++ ++ // track which methods are EMCP for add_previous_version() call ++ emcp_methods->set_bit(old_index); ++ (*emcp_method_count_p)++; ++ ++ // An EMCP method is _not_ obsolete. An obsolete method has a ++ // different jmethodID than the current method. An EMCP method ++ // has the same jmethodID as the current method. Having the ++ // same jmethodID for all EMCP versions of a method allows for ++ // a consistent view of the EMCP methods regardless of which ++ // EMCP method you happen to have in hand. For example, a ++ // breakpoint set in one EMCP method will work for all EMCP ++ // versions of the method including the current one. ++ ++ old_method->set_new_version(new_method); ++ new_method->set_old_version(old_method); ++ ++ RC_TRACE(0x00008000, ("Found EMCP method %s", old_method->name_and_sig_as_C_string())); ++ ++ // Transfer breakpoints ++ instanceKlass *ik = instanceKlass::cast(old_method->method_holder()); ++ for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = bp->next()) { ++ RC_TRACE(0x00000002, ("Checking breakpoint")); ++ RC_TRACE(0x00000002, ("%d / %d", ++ bp->match(old_method), bp->match(new_method))); ++ if (bp->match(old_method)) { ++ assert(bp->match(new_method), "if old method is method, then new method must match too"); ++ RC_TRACE(0x00000002, ("Found a breakpoint in an old EMCP method")); ++ new_method->set_breakpoint(bp->bci()); ++ } ++ } ++ ++ ++ ++ } else { ++ // mark obsolete methods as such ++ old_method->set_is_obsolete(); ++ obsolete_count++; ++ ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ old_method->set_is_old(); ++ } ++ for (int i = 0; i < _deleted_methods_length; ++i) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_deleted_methods[i]); ++ ++ //assert(old_method->vtable_index() < 0, ++ // "cannot delete methods with vtable entries");; ++ ++ // Mark all deleted methods as old and obsolete ++ old_method->set_is_old(); ++ old_method->set_is_obsolete(); ++ ++obsolete_count; ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ RC_TRACE(0x00008000, ("mark deleted %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string())); ++ } ++ //assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), "sanity check"); ++ RC_TRACE(0x00008000, ("EMCP_cnt=%d, obsolete_cnt=%d !", *emcp_method_count_p, obsolete_count)); ++} ++ ++// Increment the classRedefinedCount field in the specific instanceKlass ++// and in all direct and indirect subclasses. ++void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { ++ oop class_mirror = ik->java_mirror(); ++ klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); ++ int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; ++ java_lang_Class::set_classRedefinedCount(class_mirror, new_count); ++ RC_TRACE(0x00008000, ("updated count for class=%s to %d", ik->external_name(), new_count)); ++} ++ ++#ifndef PRODUCT ++void VM_RedefineClasses::check_class(klassOop k_oop, TRAPS) { + Klass *k = k_oop->klass_part(); + if (k->oop_is_instance()) { + HandleMark hm(THREAD); + instanceKlass *ik = (instanceKlass *) k; +- +- // HotSpot specific optimization! HotSpot does not currently +- // support delegation from the bootstrap class loader to a +- // user-defined class loader. This means that if the bootstrap +- // class loader is the initiating class loader, then it will also +- // be the defining class loader. This also means that classes +- // loaded by the bootstrap class loader cannot refer to classes +- // loaded by a user-defined class loader. Note: a user-defined +- // class loader can delegate to the bootstrap class loader. +- // +- // If the current class being redefined has a user-defined class +- // loader as its defining class loader, then we can skip all +- // classes loaded by the bootstrap class loader. +- bool is_user_defined = +- instanceKlass::cast(_the_class_oop)->class_loader() != NULL; +- if (is_user_defined && ik->class_loader() == NULL) { +- return; +- } +- +- // This is a very busy routine. We don't want too much tracing +- // printed out. +- bool trace_name_printed = false; +- +- // Very noisy: only enable this call if you are trying to determine +- // that a specific class gets found by this routine. +- // RC_TRACE macro has an embedded ResourceMark +- // RC_TRACE_WITH_THREAD(0x00100000, THREAD, +- // ("adjust check: name=%s", ik->external_name())); +- // trace_name_printed = true; +- +- // Fix the vtable embedded in the_class and subclasses of the_class, +- // if one exists. We discard scratch_class and we don't keep an +- // instanceKlass around to hold obsolete methods so we don't have +- // any other instanceKlass embedded vtables to update. The vtable +- // holds the methodOops for virtual (but not final) methods. +- if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { +- // ik->vtable() creates a wrapper object; rm cleans it up ++ assert(ik->is_newest_version(), "must be latest version in system dictionary"); ++ ++ if (ik->vtable_length() > 0) { + ResourceMark rm(THREAD); +- ik->vtable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // If the current class has an itable and we are either redefining an +- // interface or if the current class is a subclass of the_class, then +- // we potentially have to fix the itable. If we are redefining an +- // interface, then we have to call adjust_method_entries() for +- // every instanceKlass that has an itable since there isn't a +- // subclass relationship between an interface and an instanceKlass. +- if (ik->itable_length() > 0 && (Klass::cast(_the_class_oop)->is_interface() +- || ik->is_subclass_of(_the_class_oop))) { +- // ik->itable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->itable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // The constant pools in other classes (other_cp) can refer to +- // methods in the_class. We have to update method information in +- // other_cp's cache. If other_cp has a previous version, then we +- // have to repeat the process for each previous version. The +- // constant pool cache holds the methodOops for non-virtual +- // methods and for virtual, final methods. +- // +- // Special case: if the current class is the_class, then new_cp +- // has already been attached to the_class and old_cp has already +- // been added as a previous version. The new_cp doesn't have any +- // cached references to old methods so it doesn't need to be +- // updated. We can simply start with the previous version(s) in +- // that case. +- constantPoolHandle other_cp; +- constantPoolCacheOop cp_cache; +- +- if (k_oop != _the_class_oop) { +- // this klass' constant pool cache may need adjustment +- other_cp = constantPoolHandle(ik->constants()); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); ++ if (!ik->vtable()->check_no_old_entries()) { ++ RC_TRACE(0x00000001, ("size of class: %d\n", ++ k_oop->size())); ++ RC_TRACE(0x00000001, ("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ++ ik->signature_name())); ++ assert(false, "OLD method found"); + } +- } +- { +- ResourceMark rm(THREAD); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // the previous versions' constant pool caches may need adjustment +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- other_cp = pv_info->prev_constant_pool_handle(); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up +- } +-} +- +-void VM_RedefineClasses::update_jmethod_ids() { +- for (int j = 0; j < _matching_methods_length; ++j) { +- methodOop old_method = _matching_old_methods[j]; +- jmethodID jmid = old_method->find_jmethod_id_or_null(); +- if (jmid != NULL) { +- // There is a jmethodID, change it to point to the new method +- methodHandle new_method_h(_matching_new_methods[j]); +- JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); +- assert(JNIHandles::resolve_jmethod_id(jmid) == _matching_new_methods[j], +- "should be replaced"); ++ ++ ik->vtable()->verify(tty, true); + } + } + } + +-void VM_RedefineClasses::check_methods_and_mark_as_obsolete( +- BitMap *emcp_methods, int * emcp_method_count_p) { +- *emcp_method_count_p = 0; +- int obsolete_count = 0; +- int old_index = 0; +- for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { +- methodOop old_method = _matching_old_methods[j]; +- methodOop new_method = _matching_new_methods[j]; +- methodOop old_array_method; +- +- // Maintain an old_index into the _old_methods array by skipping +- // deleted methods +- while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) +- != old_method) { +- ++old_index; ++#endif ++ ++VM_RedefineClasses::FindAffectedKlassesClosure::FindAffectedKlassesClosure( GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result ) ++{ ++ assert(original_klasses != NULL && result != NULL, ""); ++ this->_original_klasses = original_klasses; ++ this->_result = result; ++ SystemDictionary::classes_do(this); ++} ++ ++void VM_RedefineClasses::FindAffectedKlassesClosure::do_object( oop obj ) ++{ ++ klassOop klass = (klassOop)obj; ++ assert(!_result->contains(klass), "must not occur more than once!"); ++ assert(klass->klass_part()->new_version() == NULL, "Only last version is valid entry in system dictionary"); ++ ++ for(int i=0; i<_original_klasses->length(); i++) { ++ instanceKlassHandle cur = _original_klasses->at(i); ++ if (cur() != klass && klass->klass_part()->is_subtype_of(cur()) && !_original_klasses->contains(klass)) { ++ RC_TRACE(0x00008000, ("Found affected class: %s", klass->klass_part()->name()->as_C_string())); ++ _result->append(klass); ++ break; + } +- +- if (MethodComparator::methods_EMCP(old_method, new_method)) { +- // The EMCP definition from JSR-163 requires the bytecodes to be +- // the same with the exception of constant pool indices which may +- // differ. However, the constants referred to by those indices +- // must be the same. +- // +- // We use methods_EMCP() for comparison since constant pool +- // merging can remove duplicate constant pool entries that were +- // present in the old method and removed from the rewritten new +- // method. A faster binary comparison function would consider the +- // old and new methods to be different when they are actually +- // EMCP. +- // +- // The old and new methods are EMCP and you would think that we +- // could get rid of one of them here and now and save some space. +- // However, the concept of EMCP only considers the bytecodes and +- // the constant pool entries in the comparison. Other things, +- // e.g., the line number table (LNT) or the local variable table +- // (LVT) don't count in the comparison. So the new (and EMCP) +- // method can have a new LNT that we need so we can't just +- // overwrite the new method with the old method. +- // +- // When this routine is called, we have already attached the new +- // methods to the_class so the old methods are effectively +- // overwritten. However, if an old method is still executing, +- // then the old method cannot be collected until sometime after +- // the old method call has returned. So the overwriting of old +- // methods by new methods will save us space except for those +- // (hopefully few) old methods that are still executing. +- // +- // A method refers to a constMethodOop and this presents another +- // possible avenue to space savings. The constMethodOop in the +- // new method contains possibly new attributes (LNT, LVT, etc). +- // At first glance, it seems possible to save space by replacing +- // the constMethodOop in the old method with the constMethodOop +- // from the new method. The old and new methods would share the +- // same constMethodOop and we would save the space occupied by +- // the old constMethodOop. However, the constMethodOop contains +- // a back reference to the containing method. Sharing the +- // constMethodOop between two methods could lead to confusion in +- // the code that uses the back reference. This would lead to +- // brittle code that could be broken in non-obvious ways now or +- // in the future. +- // +- // Another possibility is to copy the constMethodOop from the new +- // method to the old method and then overwrite the new method with +- // the old method. Since the constMethodOop contains the bytecodes +- // for the method embedded in the oop, this option would change +- // the bytecodes out from under any threads executing the old +- // method and make the thread's bcp invalid. Since EMCP requires +- // that the bytecodes be the same modulo constant pool indices, it +- // is straight forward to compute the correct new bcp in the new +- // constMethodOop from the old bcp in the old constMethodOop. The +- // time consuming part would be searching all the frames in all +- // of the threads to find all of the calls to the old method. +- // +- // It looks like we will have to live with the limited savings +- // that we get from effectively overwriting the old methods +- // when the new methods are attached to the_class. +- +- // track which methods are EMCP for add_previous_version() call +- emcp_methods->set_bit(old_index); +- (*emcp_method_count_p)++; +- +- // An EMCP method is _not_ obsolete. An obsolete method has a +- // different jmethodID than the current method. An EMCP method +- // has the same jmethodID as the current method. Having the +- // same jmethodID for all EMCP versions of a method allows for +- // a consistent view of the EMCP methods regardless of which +- // EMCP method you happen to have in hand. For example, a +- // breakpoint set in one EMCP method will work for all EMCP +- // versions of the method including the current one. +- } else { +- // mark obsolete methods as such +- old_method->set_is_obsolete(); +- obsolete_count++; +- +- // obsolete methods need a unique idnum +- u2 num = instanceKlass::cast(_the_class_oop)->next_method_idnum(); +- if (num != constMethodOopDesc::UNSET_IDNUM) { +-// u2 old_num = old_method->method_idnum(); +- old_method->set_method_idnum(num); +-// TO DO: attach obsolete annotations to obsolete method's new idnum ++ } ++} ++ ++jvmtiError VM_RedefineClasses::do_topological_class_sorting( const jvmtiClassDefinition *class_defs, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS) ++{ ++ GrowableArray< Pair<klassOop, klassOop> > *links = new GrowableArray< Pair<klassOop, klassOop> >(); ++ ++ for (int i=0; i<class_count; i++) { ++ ++ oop mirror = JNIHandles::resolve_non_null(class_defs[i].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ instanceKlassHandle the_class(THREAD, the_class_oop); ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ++ ClassFileStream st((u1*) class_defs[i].class_bytes, ++ class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ ClassFileParser cfp(&st); ++ ++ GrowableArray<Symbol*> symbolArr; ++ RC_TRACE(0x00000002, ("Before find super symbols of class %s", ++ the_class->name()->as_C_string())); ++ cfp.findSuperSymbols(the_class->name(), the_class_loader, protection_domain, the_class, symbolArr, THREAD); ++ ++ for (int j=0; j<symbolArr.length(); j++) { ++ Symbol* sym = symbolArr.at(j); ++ ++ RC_TRACE(0x00008000, ("Before adding link to super class %s", sym->as_C_string())); ++ ++ for (int k=0; k<arr->length(); k++) { ++ klassOop curOop = arr->at(k)(); ++ // (tw) TODO: Check if we get aliasing problems with different class loaders? ++ if (curOop->klass_part()->name() == sym /*&& curOop->klass_part()->class_loader() == the_class_loader()*/) { ++ RC_TRACE(0x00000002, ("Found class to link")); ++ links->append(Pair<klassOop, klassOop>(curOop, the_class())); ++ break; ++ } + } +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); + } +- old_method->set_is_old(); + } +- for (int i = 0; i < _deleted_methods_length; ++i) { +- methodOop old_method = _deleted_methods[i]; +- +- assert(old_method->vtable_index() < 0, +- "cannot delete methods with vtable entries");; +- +- // Mark all deleted methods as old and obsolete +- old_method->set_is_old(); +- old_method->set_is_obsolete(); +- ++obsolete_count; +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark deleted %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); ++ ++ ++ RC_TRACE(0x00000001, ("Identified links between classes! ")); ++ ++ for (int i=0; i<affected->length(); i++) { ++ ++ instanceKlassHandle klass = affected->at(i); ++ ++ klassOop superKlass = klass->super(); ++ if (affected->contains(superKlass)) { ++ links->append(Pair<klassOop, klassOop>(superKlass, klass())); ++ } ++ ++ objArrayOop superInterfaces = klass->local_interfaces(); ++ for (int j=0; j<superInterfaces->length(); j++) { ++ klassOop interfaceKlass = (klassOop)superInterfaces->obj_at(j); ++ if (arr->contains(interfaceKlass)) { ++ links->append(Pair<klassOop, klassOop>(interfaceKlass, klass())); ++ } ++ } + } +- assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), +- "sanity check"); +- RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p, +- obsolete_count)); ++ ++ if (RC_TRACE_ENABLED(0x00000002)) { ++ RC_TRACE(0x00000002, ("Identified links: ")); ++ for (int i=0; i<links->length(); i++) { ++ RC_TRACE(0x00000002, ("%s to %s", ++ links->at(i).left()->klass_part()->name()->as_C_string(), ++ links->at(i).right()->klass_part()->name()->as_C_string())); ++ } ++ } ++ ++ for (int i=0; i<arr->length(); i++) { ++ ++ int j; ++ for (j=i; j<arr->length(); j++) { ++ ++ int k; ++ for (k=0; k<links->length(); k++) { ++ ++ klassOop k1 = links->adr_at(k)->right(); ++ klassOop k2 = arr->at(j)(); ++ if (k1 == k2) { ++ break; ++ } ++ } ++ ++ if (k == links->length()) { ++ break; ++ } ++ } ++ ++ if (j == arr->length()) { ++ // circle detected ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } ++ ++ for (int k=0; k<links->length(); k++) { ++ if (links->adr_at(k)->left() == arr->at(j)()) { ++ links->at_put(k, links->at(links->length() - 1)); ++ links->remove_at(links->length() - 1); ++ k--; ++ } ++ } ++ ++ instanceKlassHandle tmp = arr->at(j); ++ arr->at_put(j, arr->at(i)); ++ arr->at_put(i, tmp); ++ } ++ ++ return JVMTI_ERROR_NONE; + } + ++void VM_RedefineClasses::oops_do(OopClosure *closure) { ++ ++ if (_updated_oops != NULL) { ++ for (int i=0; i<_updated_oops->length(); i++) { ++ closure->do_oop(_updated_oops->adr_at(i)); ++ } ++ } ++} ++ ++void VM_RedefineClasses::transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to) { ++ to->set_is_field_modification_watched(from->is_field_modification_watched()); ++ to->set_is_field_access_watched(from->is_field_access_watched()); ++ if (from->is_field_modification_watched() || from->is_field_access_watched()) { ++ RC_TRACE(0x00000002, ("Transfered watch for field %s", ++ from->name()->as_C_string())); ++ } ++ update_klass_field_access_flag(to); ++} ++ ++void VM_RedefineClasses::update_klass_field_access_flag(fieldDescriptor *fd) { ++ instanceKlass* ik = instanceKlass::cast(fd->field_holder()); ++ FieldInfo* fi = FieldInfo::from_field_array(ik->fields(), fd->index()); ++ fi->set_access_flags(fd->access_flags().as_short()); ++} ++ ++ + // This internal class transfers the native function registration from old methods + // to new methods. It is designed to handle both the simple case of unchanged + // native methods and the complex cases of native method prefixes being added and/or +@@ -2832,7 +3161,7 @@ + // Same, caused by prefix removal only 3_2_1_m -> 3_2_m + // + class TransferNativeFunctionRegistration { +- private: ++private: + instanceKlassHandle the_class; + int prefix_count; + char** prefixes; +@@ -2845,42 +3174,42 @@ + // (2) with the prefix. + // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) + methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, +- Symbol* signature) { +- TempNewSymbol name_symbol = SymbolTable::probe(name_str, (int)name_len); +- if (name_symbol != NULL) { +- methodOop method = Klass::cast(the_class())->lookup_method(name_symbol, signature); +- if (method != NULL) { +- // Even if prefixed, intermediate methods must exist. +- if (method->is_native()) { +- // Wahoo, we found a (possibly prefixed) version of the method, return it. +- return method; +- } +- if (depth < prefix_count) { +- // Try applying further prefixes (other than this one). +- method = search_prefix_name_space(depth+1, name_str, name_len, signature); +- if (method != NULL) { +- return method; // found ++ Symbol* signature) { ++ Symbol* name_symbol = SymbolTable::probe(name_str, (int)name_len); ++ if (name_symbol != NULL) { ++ methodOop method = Klass::cast(the_class()->klass_part()->new_version())->lookup_method(name_symbol, signature); ++ if (method != NULL) { ++ // Even if prefixed, intermediate methods must exist. ++ if (method->is_native()) { ++ // Wahoo, we found a (possibly prefixed) version of the method, return it. ++ return method; + } +- +- // Try adding this prefix to the method name and see if it matches +- // another method name. +- char* prefix = prefixes[depth]; +- size_t prefix_len = strlen(prefix); +- size_t trial_len = name_len + prefix_len; +- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); +- strcpy(trial_name_str, prefix); +- strcat(trial_name_str, name_str); +- method = search_prefix_name_space(depth+1, trial_name_str, trial_len, +- signature); +- if (method != NULL) { +- // If found along this branch, it was prefixed, mark as such +- method->set_is_prefixed_native(); +- return method; // found ++ if (depth < prefix_count) { ++ // Try applying further prefixes (other than this one). ++ method = search_prefix_name_space(depth+1, name_str, name_len, signature); ++ if (method != NULL) { ++ return method; // found ++ } ++ ++ // Try adding this prefix to the method name and see if it matches ++ // another method name. ++ char* prefix = prefixes[depth]; ++ size_t prefix_len = strlen(prefix); ++ size_t trial_len = name_len + prefix_len; ++ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); ++ strcpy(trial_name_str, prefix); ++ strcat(trial_name_str, name_str); ++ method = search_prefix_name_space(depth+1, trial_name_str, trial_len, ++ signature); ++ if (method != NULL) { ++ // If found along this branch, it was prefixed, mark as such ++ method->set_is_prefixed_native(); ++ return method; // found ++ } + } + } + } +- } +- return NULL; // This whole branch bore nothing ++ return NULL; // This whole branch bore nothing + } + + // Return the method name with old prefixes stripped away. +@@ -2905,10 +3234,10 @@ + ResourceMark rm; + char* name_str = method_name_without_prefixes(method); + return search_prefix_name_space(0, name_str, strlen(name_str), +- method->signature()); ++ method->signature()); + } + +- public: ++public: + + // Construct a native method transfer processor for this class. + TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { +@@ -2919,9 +3248,9 @@ + } + + // Attempt to transfer any of the old or deleted methods that are native +- void transfer_registrations(methodOop* old_methods, int methods_length) { ++ void transfer_registrations(instanceKlassHandle old_klass, int* old_methods, int methods_length) { + for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; ++ methodOop old_method = (methodOop)old_klass->methods()->obj_at(old_methods[j]); + + if (old_method->is_native() && old_method->has_native_function()) { + methodOop new_method = strip_and_search_for_new_native(old_method); +@@ -2930,7 +3259,9 @@ + // Redefine does not send events (except CFLH), certainly not this + // behind the scenes re-registration. + new_method->set_native_function(old_method->native_function(), +- !methodOopDesc::native_bind_event_is_interesting); ++ !methodOopDesc::native_bind_event_is_interesting); ++ ++ RC_TRACE(0x00008000, ("Transfering native function for method %s", old_method->name()->as_C_string())); + } + } + } +@@ -2938,534 +3269,8 @@ + }; + + // Don't lose the association between a native method and its JNI function. +-void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle the_class) { +- TransferNativeFunctionRegistration transfer(the_class); +- transfer.transfer_registrations(_deleted_methods, _deleted_methods_length); +- transfer.transfer_registrations(_matching_old_methods, _matching_methods_length); ++void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle old_klass) { ++ TransferNativeFunctionRegistration transfer(old_klass); ++ transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); ++ transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); + } +- +-// Deoptimize all compiled code that depends on this class. +-// +-// If the can_redefine_classes capability is obtained in the onload +-// phase then the compiler has recorded all dependencies from startup. +-// In that case we need only deoptimize and throw away all compiled code +-// that depends on the class. +-// +-// If can_redefine_classes is obtained sometime after the onload +-// phase then the dependency information may be incomplete. In that case +-// the first call to RedefineClasses causes all compiled code to be +-// thrown away. As can_redefine_classes has been obtained then +-// all future compilations will record dependencies so second and +-// subsequent calls to RedefineClasses need only throw away code +-// that depends on the class. +-// +-void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { +- assert_locked_or_safepoint(Compile_lock); +- +- // All dependencies have been recorded from startup or this is a second or +- // subsequent use of RedefineClasses +- if (JvmtiExport::all_dependencies_are_recorded()) { +- Universe::flush_evol_dependents_on(k_h); +- } else { +- CodeCache::mark_all_nmethods_for_deoptimization(); +- +- ResourceMark rm(THREAD); +- DeoptimizationMarker dm; +- +- // Deoptimize all activations depending on marked nmethods +- Deoptimization::deoptimize_dependents(); +- +- // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) +- CodeCache::make_marked_nmethods_not_entrant(); +- +- // From now on we know that the dependency information is complete +- JvmtiExport::set_all_dependencies_are_recorded(true); +- } +-} +- +-void VM_RedefineClasses::compute_added_deleted_matching_methods() { +- methodOop old_method; +- methodOop new_method; +- +- _matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _added_methods = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length()); +- _deleted_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- +- _matching_methods_length = 0; +- _deleted_methods_length = 0; +- _added_methods_length = 0; +- +- int nj = 0; +- int oj = 0; +- while (true) { +- if (oj >= _old_methods->length()) { +- if (nj >= _new_methods->length()) { +- break; // we've looked at everything, done +- } +- // New method at the end +- new_method = (methodOop) _new_methods->obj_at(nj); +- _added_methods[_added_methods_length++] = new_method; +- ++nj; +- } else if (nj >= _new_methods->length()) { +- // Old method, at the end, is deleted +- old_method = (methodOop) _old_methods->obj_at(oj); +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } else { +- old_method = (methodOop) _old_methods->obj_at(oj); +- new_method = (methodOop) _new_methods->obj_at(nj); +- if (old_method->name() == new_method->name()) { +- if (old_method->signature() == new_method->signature()) { +- _matching_old_methods[_matching_methods_length ] = old_method; +- _matching_new_methods[_matching_methods_length++] = new_method; +- ++nj; +- ++oj; +- } else { +- // added overloaded have already been moved to the end, +- // so this is a deleted overloaded method +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } +- } else { // names don't match +- if (old_method->name()->fast_compare(new_method->name()) > 0) { +- // new method +- _added_methods[_added_methods_length++] = new_method; +- ++nj; +- } else { +- // deleted method +- _deleted_methods[_deleted_methods_length++] = old_method; +- ++oj; +- } +- } +- } +- } +- assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); +- assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); +-} +- +- +- +-// Install the redefinition of a class: +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding a weak reference to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-void VM_RedefineClasses::redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS) { +- +- RC_TIMER_START(_timer_rsc_phase1); +- +- oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass); +- klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- +- // Remove all breakpoints in methods of this class +- JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); +- jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); +- +- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) { +- // We are redefining java.lang.reflect.Method. Method.invoke() is +- // cached and users of the cache care about each active version of +- // the method so we have to track this previous version. +- // Do this before methods get switched +- Universe::reflect_invoke_cache()->add_previous_version( +- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); +- } +- +- // Deoptimize all compiled code that depends on this class +- flush_dependent_code(the_class, THREAD); +- +- _old_methods = the_class->methods(); +- _new_methods = scratch_class->methods(); +- _the_class_oop = the_class_oop; +- compute_added_deleted_matching_methods(); +- update_jmethod_ids(); +- +- // Attach new constant pool to the original klass. The original +- // klass still refers to the old constant pool (for now). +- scratch_class->constants()->set_pool_holder(the_class()); +- +-#if 0 +- // In theory, with constant pool merging in place we should be able +- // to save space by using the new, merged constant pool in place of +- // the old constant pool(s). By "pool(s)" I mean the constant pool in +- // the klass version we are replacing now and any constant pool(s) in +- // previous versions of klass. Nice theory, doesn't work in practice. +- // When this code is enabled, even simple programs throw NullPointer +- // exceptions. I'm guessing that this is caused by some constant pool +- // cache difference between the new, merged constant pool and the +- // constant pool that was just being used by the klass. I'm keeping +- // this code around to archive the idea, but the code has to remain +- // disabled for now. +- +- // Attach each old method to the new constant pool. This can be +- // done here since we are past the bytecode verification and +- // constant pool optimization phases. +- for (int i = _old_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)_old_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- +- { +- // walk all previous versions of the klass +- instanceKlass *ik = (instanceKlass *)the_class()->klass_part(); +- PreviousVersionWalker pvw(ik); +- instanceKlassHandle ikh; +- do { +- ikh = pvw.next_previous_version(); +- if (!ikh.is_null()) { +- ik = ikh(); +- +- // attach previous version of klass to the new constant pool +- ik->set_constants(scratch_class->constants()); +- +- // Attach each method in the previous version of klass to the +- // new constant pool +- objArrayOop prev_methods = ik->methods(); +- for (int i = prev_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)prev_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- } +- } while (!ikh.is_null()); +- } +-#endif +- +- // Replace methods and constantpool +- the_class->set_methods(_new_methods); +- scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods, +- // and to be able to undo operation easily. +- +- constantPoolOop old_constants = the_class->constants(); +- the_class->set_constants(scratch_class->constants()); +- scratch_class->set_constants(old_constants); // See the previous comment. +-#if 0 +- // We are swapping the guts of "the new class" with the guts of "the +- // class". Since the old constant pool has just been attached to "the +- // new class", it seems logical to set the pool holder in the old +- // constant pool also. However, doing this will change the observable +- // class hierarchy for any old methods that are still executing. A +- // method can query the identity of its "holder" and this query uses +- // the method's constant pool link to find the holder. The change in +- // holding class from "the class" to "the new class" can confuse +- // things. +- // +- // Setting the old constant pool's holder will also cause +- // verification done during vtable initialization below to fail. +- // During vtable initialization, the vtable's class is verified to be +- // a subtype of the method's holder. The vtable's class is "the +- // class" and the method's holder is gotten from the constant pool +- // link in the method itself. For "the class"'s directly implemented +- // methods, the method holder is "the class" itself (as gotten from +- // the new constant pool). The check works fine in this case. The +- // check also works fine for methods inherited from super classes. +- // +- // Miranda methods are a little more complicated. A miranda method is +- // provided by an interface when the class implementing the interface +- // does not provide its own method. These interfaces are implemented +- // internally as an instanceKlass. These special instanceKlasses +- // share the constant pool of the class that "implements" the +- // interface. By sharing the constant pool, the method holder of a +- // miranda method is the class that "implements" the interface. In a +- // non-redefine situation, the subtype check works fine. However, if +- // the old constant pool's pool holder is modified, then the check +- // fails because there is no class hierarchy relationship between the +- // vtable's class and "the new class". +- +- old_constants->set_pool_holder(scratch_class()); +-#endif +- +- // track which methods are EMCP for add_previous_version() call below +- BitMap emcp_methods(_old_methods->length()); +- int emcp_method_count = 0; +- emcp_methods.clear(); // clears 0..(length() - 1) +- check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); +- transfer_old_native_function_registrations(the_class); +- +- // The class file bytes from before any retransformable agents mucked +- // with them was cached on the scratch class, move to the_class. +- // Note: we still want to do this if nothing needed caching since it +- // should get cleared in the_class too. +- if (the_class->get_cached_class_file_bytes() == 0) { +- // the_class doesn't have a cache yet so copy it +- the_class->set_cached_class_file( +- scratch_class->get_cached_class_file_bytes(), +- scratch_class->get_cached_class_file_len()); +- } +-#ifndef PRODUCT +- else { +- assert(the_class->get_cached_class_file_bytes() == +- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match"); +- assert(the_class->get_cached_class_file_len() == +- scratch_class->get_cached_class_file_len(), "cache lens must match"); +- } +-#endif +- +- // Replace inner_classes +- typeArrayOop old_inner_classes = the_class->inner_classes(); +- the_class->set_inner_classes(scratch_class->inner_classes()); +- scratch_class->set_inner_classes(old_inner_classes); +- +- // Initialize the vtable and interface table after +- // methods have been rewritten +- { +- ResourceMark rm(THREAD); +- // no exception should happen here since we explicitly +- // do not check loader constraints. +- // compare_and_normalize_class_versions has already checked: +- // - classloaders unchanged, signatures unchanged +- // - all instanceKlasses for redefined classes reused & contents updated +- the_class->vtable()->initialize_vtable(false, THREAD); +- the_class->itable()->initialize_itable(false, THREAD); +- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception"); +- } +- +- // Leave arrays of jmethodIDs and itable index cache unchanged +- +- // Copy the "source file name" attribute from new class version +- the_class->set_source_file_name(scratch_class->source_file_name()); +- +- // Copy the "source debug extension" attribute from new class version +- the_class->set_source_debug_extension( +- scratch_class->source_debug_extension(), +- scratch_class->source_debug_extension() == NULL ? 0 : +- (int)strlen(scratch_class->source_debug_extension())); +- +- // Use of javac -g could be different in the old and the new +- if (scratch_class->access_flags().has_localvariable_table() != +- the_class->access_flags().has_localvariable_table()) { +- +- AccessFlags flags = the_class->access_flags(); +- if (scratch_class->access_flags().has_localvariable_table()) { +- flags.set_has_localvariable_table(); +- } else { +- flags.clear_has_localvariable_table(); +- } +- the_class->set_access_flags(flags); +- } +- +- // Replace class annotation fields values +- typeArrayOop old_class_annotations = the_class->class_annotations(); +- the_class->set_class_annotations(scratch_class->class_annotations()); +- scratch_class->set_class_annotations(old_class_annotations); +- +- // Replace fields annotation fields values +- objArrayOop old_fields_annotations = the_class->fields_annotations(); +- the_class->set_fields_annotations(scratch_class->fields_annotations()); +- scratch_class->set_fields_annotations(old_fields_annotations); +- +- // Replace methods annotation fields values +- objArrayOop old_methods_annotations = the_class->methods_annotations(); +- the_class->set_methods_annotations(scratch_class->methods_annotations()); +- scratch_class->set_methods_annotations(old_methods_annotations); +- +- // Replace methods parameter annotation fields values +- objArrayOop old_methods_parameter_annotations = +- the_class->methods_parameter_annotations(); +- the_class->set_methods_parameter_annotations( +- scratch_class->methods_parameter_annotations()); +- scratch_class->set_methods_parameter_annotations(old_methods_parameter_annotations); +- +- // Replace methods default annotation fields values +- objArrayOop old_methods_default_annotations = +- the_class->methods_default_annotations(); +- the_class->set_methods_default_annotations( +- scratch_class->methods_default_annotations()); +- scratch_class->set_methods_default_annotations(old_methods_default_annotations); +- +- // Replace minor version number of class file +- u2 old_minor_version = the_class->minor_version(); +- the_class->set_minor_version(scratch_class->minor_version()); +- scratch_class->set_minor_version(old_minor_version); +- +- // Replace major version number of class file +- u2 old_major_version = the_class->major_version(); +- the_class->set_major_version(scratch_class->major_version()); +- scratch_class->set_major_version(old_major_version); +- +- // Replace CP indexes for class and name+type of enclosing method +- u2 old_class_idx = the_class->enclosing_method_class_index(); +- u2 old_method_idx = the_class->enclosing_method_method_index(); +- the_class->set_enclosing_method_indices( +- scratch_class->enclosing_method_class_index(), +- scratch_class->enclosing_method_method_index()); +- scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx); +- +- // keep track of previous versions of this class +- the_class->add_previous_version(scratch_class, &emcp_methods, +- emcp_method_count); +- +- RC_TIMER_STOP(_timer_rsc_phase1); +- RC_TIMER_START(_timer_rsc_phase2); +- +- // Adjust constantpool caches and vtables for all classes +- // that reference methods of the evolved class. +- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); +- +- if (the_class->oop_map_cache() != NULL) { +- // Flush references to any obsolete methods from the oop map cache +- // so that obsolete methods are not pinned. +- the_class->oop_map_cache()->flush_obsolete_entries(); +- } +- +- // increment the classRedefinedCount field in the_class and in any +- // direct and indirect subclasses of the_class +- increment_class_counter((instanceKlass *)the_class()->klass_part(), THREAD); +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), +- java_lang_Class::classRedefinedCount(the_class_mirror), +- os::available_memory() >> 10)); +- +- RC_TIMER_STOP(_timer_rsc_phase2); +-} // end redefine_single_class() +- +- +-// Increment the classRedefinedCount field in the specific instanceKlass +-// and in all direct and indirect subclasses. +-void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { +- oop class_mirror = ik->java_mirror(); +- klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); +- int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; +- java_lang_Class::set_classRedefinedCount(class_mirror, new_count); +- +- if (class_oop != _the_class_oop) { +- // _the_class_oop count is printed at end of redefine_single_class() +- RC_TRACE_WITH_THREAD(0x00000008, THREAD, +- ("updated count in subclass=%s to %d", ik->external_name(), new_count)); +- } +- +- for (Klass *subk = ik->subklass(); subk != NULL; +- subk = subk->next_sibling()) { +- if (subk->oop_is_instance()) { +- // Only update instanceKlasses +- instanceKlass *subik = (instanceKlass*)subk; +- // recursively do subclasses of the current subclass +- increment_class_counter(subik, THREAD); +- } +- } +-} +- +-void VM_RedefineClasses::check_class(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; +- bool no_old_methods = true; // be optimistic +- ResourceMark rm(THREAD); +- +- // a vtable should never contain old or obsolete methods +- if (ik->vtable_length() > 0 && +- !ik->vtable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassVtable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->vtable()->dump_vtable(); +- } +- no_old_methods = false; +- } +- +- // an itable should never contain old or obsolete methods +- if (ik->itable_length() > 0 && +- !ik->itable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassItable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->itable()->dump_itable(); +- } +- no_old_methods = false; +- } +- +- // the constant pool cache should never contain old or obsolete methods +- if (ik->constants() != NULL && +- ik->constants()->cache() != NULL && +- !ik->constants()->cache()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("cp-cache::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->constants()->cache()->dump_cache(); +- } +- no_old_methods = false; +- } +- +- if (!no_old_methods) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- dump_methods(); +- } else { +- tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " +- "to see more info about the following guarantee() failure."); +- } +- guarantee(false, "OLD and/or OBSOLETE method(s) found"); +- } +- } +-} +- +-void VM_RedefineClasses::dump_methods() { +- int j; +- RC_TRACE(0x00004000, ("_old_methods --")); +- for (j = 0; j < _old_methods->length(); ++j) { +- methodOop m = (methodOop) _old_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_new_methods --")); +- for (j = 0; j < _new_methods->length(); ++j) { +- methodOop m = (methodOop) _new_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_matching_(old/new)_methods --")); +- for (j = 0; j < _matching_methods_length; ++j) { +- methodOop m = _matching_old_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- m = _matching_new_methods[j]; +- RC_TRACE_NO_CR(0x00004000, (" (%5d) ", m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_deleted_methods --")); +- for (j = 0; j < _deleted_methods_length; ++j) { +- methodOop m = _deleted_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_added_methods --")); +- for (j = 0; j < _added_methods_length; ++j) { +- methodOop m = _added_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +-} +diff -r 6c6a2299029a src/share/vm/prims/jvmtiRedefineClasses.hpp +--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1,26 +1,29 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ ++* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. ++* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++* ++* This code is free software; you can redistribute it and/or modify it ++* under the terms of the GNU General Public License version 2 only, as ++* published by the Free Software Foundation. ++* ++* This code is distributed in the hope that it will be useful, but WITHOUT ++* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++* version 2 for more details (a copy is included in the LICENSE file that ++* accompanied this code). ++* ++* You should have received a copy of the GNU General Public License version ++* 2 along with this work; if not, write to the Free Software Foundation, ++* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++* ++* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++* or visit www.oracle.com if you need additional information or have any ++* questions. ++* ++*/ ++ ++// New version that allows arbitrary changes to already loaded classes. ++// Modifications done by: Thomas Wuerthinger <thomas.wuerthinger@gmail.com> + + #ifndef SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP + #define SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP +@@ -32,331 +35,28 @@ + #include "oops/objArrayOop.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" + #include "runtime/vm_operations.hpp" ++#include "gc_implementation/shared/vmGCOperations.hpp" + +-// Introduction: +-// +-// The RedefineClasses() API is used to change the definition of one or +-// more classes. While the API supports redefining more than one class +-// in a single call, in general, the API is discussed in the context of +-// changing the definition of a single current class to a single new +-// class. For clarity, the current class is will always be called +-// "the_class" and the new class will always be called "scratch_class". +-// +-// The name "the_class" is used because there is only one structure +-// that represents a specific class; redefinition does not replace the +-// structure, but instead replaces parts of the structure. The name +-// "scratch_class" is used because the structure that represents the +-// new definition of a specific class is simply used to carry around +-// the parts of the new definition until they are used to replace the +-// appropriate parts in the_class. Once redefinition of a class is +-// complete, scratch_class is thrown away. +-// +-// +-// Implementation Overview: +-// +-// The RedefineClasses() API is mostly a wrapper around the VM op that +-// does the real work. The work is split in varying degrees between +-// doit_prologue(), doit() and doit_epilogue(). +-// +-// 1) doit_prologue() is called by the JavaThread on the way to a +-// safepoint. It does parameter verification and loads scratch_class +-// which involves: +-// - parsing the incoming class definition using the_class' class +-// loader and security context +-// - linking scratch_class +-// - merging constant pools and rewriting bytecodes as needed +-// for the merged constant pool +-// - verifying the bytecodes in scratch_class +-// - setting up the constant pool cache and rewriting bytecodes +-// as needed to use the cache +-// - finally, scratch_class is compared to the_class to verify +-// that it is a valid replacement class +-// - if everything is good, then scratch_class is saved in an +-// instance field in the VM operation for the doit() call +-// +-// Note: A JavaThread must do the above work. +-// +-// 2) doit() is called by the VMThread during a safepoint. It installs +-// the new class definition(s) which involves: +-// - retrieving the scratch_class from the instance field in the +-// VM operation +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding weak reference(s) to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-// - telling the SystemDictionary to notice our changes +-// +-// Note: the above work must be done by the VMThread to be safe. +-// +-// 3) doit_epilogue() is called by the JavaThread after the VM op +-// is finished and the safepoint is done. It simply cleans up +-// memory allocated in doit_prologue() and used in doit(). +-// +-// +-// Constant Pool Details: +-// +-// When the_class is redefined, we cannot just replace the constant +-// pool in the_class with the constant pool from scratch_class because +-// that could confuse obsolete methods that may still be running. +-// Instead, the constant pool from the_class, old_cp, is merged with +-// the constant pool from scratch_class, scratch_cp. The resulting +-// constant pool, merge_cp, replaces old_cp in the_class. +-// +-// The key part of any merging algorithm is the entry comparison +-// function so we have to know the types of entries in a constant pool +-// in order to merge two of them together. Constant pools can contain +-// up to 12 different kinds of entries; the JVM_CONSTANT_Unicode entry +-// is not presently used so we only have to worry about the other 11 +-// entry types. For the purposes of constant pool merging, it is +-// helpful to know that the 11 entry types fall into 3 different +-// subtypes: "direct", "indirect" and "double-indirect". +-// +-// Direct CP entries contain data and do not contain references to +-// other CP entries. The following are direct CP entries: +-// JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} +-// +-// Indirect CP entries contain 1 or 2 references to a direct CP entry +-// and no other data. The following are indirect CP entries: +-// JVM_CONSTANT_{Class,NameAndType,String} +-// +-// Double-indirect CP entries contain two references to indirect CP +-// entries and no other data. The following are double-indirect CP +-// entries: +-// JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} +-// +-// When comparing entries between two constant pools, the entry types +-// are compared first and if they match, then further comparisons are +-// made depending on the entry subtype. Comparing direct CP entries is +-// simply a matter of comparing the data associated with each entry. +-// Comparing both indirect and double-indirect CP entries requires +-// recursion. +-// +-// Fortunately, the recursive combinations are limited because indirect +-// CP entries can only refer to direct CP entries and double-indirect +-// CP entries can only refer to indirect CP entries. The following is +-// an example illustration of the deepest set of indirections needed to +-// access the data associated with a JVM_CONSTANT_Fieldref entry: +-// +-// JVM_CONSTANT_Fieldref { +-// class_index => JVM_CONSTANT_Class { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-1> +-// } +-// } +-// name_and_type_index => JVM_CONSTANT_NameAndType { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-2> +-// } +-// descriptor_index => JVM_CONSTANT_Utf8 { +-// <data-3> +-// } +-// } +-// } +-// +-// The above illustration is not a data structure definition for any +-// computer language. The curly braces ('{' and '}') are meant to +-// delimit the context of the "fields" in the CP entry types shown. +-// Each indirection from the JVM_CONSTANT_Fieldref entry is shown via +-// "=>", e.g., the class_index is used to indirectly reference a +-// JVM_CONSTANT_Class entry where the name_index is used to indirectly +-// reference a JVM_CONSTANT_Utf8 entry which contains the interesting +-// <data-1>. In order to understand a JVM_CONSTANT_Fieldref entry, we +-// have to do a total of 5 indirections just to get to the CP entries +-// that contain the interesting pieces of data and then we have to +-// fetch the three pieces of data. This means we have to do a total of +-// (5 + 3) * 2 == 16 dereferences to compare two JVM_CONSTANT_Fieldref +-// entries. +-// +-// Here is the indirection, data and dereference count for each entry +-// type: +-// +-// JVM_CONSTANT_Class 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Double 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Fieldref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Float 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Integer 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_InterfaceMethodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Long 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Methodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_NameAndType 1 indir, 2 data, 4 derefs +-// JVM_CONSTANT_String 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Utf8 0 indir, 1 data, 1 deref +-// +-// So different subtypes of CP entries require different amounts of +-// work for a proper comparison. +-// +-// Now that we've talked about the different entry types and how to +-// compare them we need to get back to merging. This is not a merge in +-// the "sort -u" sense or even in the "sort" sense. When we merge two +-// constant pools, we copy all the entries from old_cp to merge_cp, +-// preserving entry order. Next we append all the unique entries from +-// scratch_cp to merge_cp and we track the index changes from the +-// location in scratch_cp to the possibly new location in merge_cp. +-// When we are done, any obsolete code that is still running that +-// uses old_cp should not be able to observe any difference if it +-// were to use merge_cp. As for the new code in scratch_class, it is +-// modified to use the appropriate index values in merge_cp before it +-// is used to replace the code in the_class. +-// +-// There is one small complication in copying the entries from old_cp +-// to merge_cp. Two of the CP entry types are special in that they are +-// lazily resolved. Before explaining the copying complication, we need +-// to digress into CP entry resolution. +-// +-// JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in +-// the class file, but are not stored in memory as such until they are +-// resolved. The entries are not resolved unless they are used because +-// resolution is expensive. During class file parsing the entries are +-// initially stored in memory as JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries. These special CP entry types +-// indicate that the JVM_CONSTANT_Class and JVM_CONSTANT_String entries +-// have been parsed, but the index values in the entries have not been +-// validated. After the entire constant pool has been parsed, the index +-// values can be validated and then the entries are converted into +-// JVM_CONSTANT_UnresolvedClass and JVM_CONSTANT_UnresolvedString +-// entries. During this conversion process, the UTF8 values that are +-// indirectly referenced by the JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries are changed into Symbol*s and the +-// entries are modified to refer to the Symbol*s. This optimization +-// eliminates one level of indirection for those two CP entry types and +-// gets the entries ready for verification. During class file parsing +-// it is also possible for JVM_CONSTANT_UnresolvedString entries to be +-// resolved into JVM_CONSTANT_String entries. Verification expects to +-// find JVM_CONSTANT_UnresolvedClass and either JVM_CONSTANT_String or +-// JVM_CONSTANT_UnresolvedString entries and not JVM_CONSTANT_Class +-// entries. +-// +-// Now we can get back to the copying complication. When we copy +-// entries from old_cp to merge_cp, we have to revert any +-// JVM_CONSTANT_Class entries to JVM_CONSTANT_UnresolvedClass entries +-// or verification will fail. +-// +-// It is important to explicitly state that the merging algorithm +-// effectively unresolves JVM_CONSTANT_Class entries that were in the +-// old_cp when they are changed into JVM_CONSTANT_UnresolvedClass +-// entries in the merge_cp. This is done both to make verification +-// happy and to avoid adding more brittleness between RedefineClasses +-// and the constant pool cache. By allowing the constant pool cache +-// implementation to (re)resolve JVM_CONSTANT_UnresolvedClass entries +-// into JVM_CONSTANT_Class entries, we avoid having to embed knowledge +-// about those algorithms in RedefineClasses. +-// +-// Appending unique entries from scratch_cp to merge_cp is straight +-// forward for direct CP entries and most indirect CP entries. For the +-// indirect CP entry type JVM_CONSTANT_NameAndType and for the double- +-// indirect CP entry types, the presence of more than one piece of +-// interesting data makes appending the entries more complicated. +-// +-// For the JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} entry types, +-// the entry is simply copied from scratch_cp to the end of merge_cp. +-// If the index in scratch_cp is different than the destination index +-// in merge_cp, then the change in index value is tracked. +-// +-// Note: the above discussion for the direct CP entries also applies +-// to the JVM_CONSTANT_Unresolved{Class,String} entry types. +-// +-// For the JVM_CONSTANT_{Class,String} entry types, since there is only +-// one data element at the end of the recursion, we know that we have +-// either one or two unique entries. If the JVM_CONSTANT_Utf8 entry is +-// unique then it is appended to merge_cp before the current entry. +-// If the JVM_CONSTANT_Utf8 entry is not unique, then the current entry +-// is updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// Note: the above discussion for JVM_CONSTANT_{Class,String} entry +-// types is theoretical. Since those entry types have already been +-// optimized into JVM_CONSTANT_Unresolved{Class,String} entry types, +-// they are handled as direct CP entries. +-// +-// For the JVM_CONSTANT_NameAndType entry type, since there are two +-// data elements at the end of the recursions, we know that we have +-// between one and three unique entries. Any unique JVM_CONSTANT_Utf8 +-// entries are appended to merge_cp before the current entry. For any +-// JVM_CONSTANT_Utf8 entries that are not unique, the current entry is +-// updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// For the JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} entry +-// types, since there are two indirect CP entries and three data +-// elements at the end of the recursions, we know that we have between +-// one and six unique entries. See the JVM_CONSTANT_Fieldref diagram +-// above for an example of all six entries. The uniqueness algorithm +-// for the JVM_CONSTANT_Class and JVM_CONSTANT_NameAndType entries is +-// covered above. Any unique entries are appended to merge_cp before +-// the current entry. For any entries that are not unique, the current +-// entry is updated to refer to the duplicate entry in merge_cp before +-// it is appended to merge_cp. Again, any changes in index values are +-// tracked as needed. +-// +-// +-// Other Details: +-// +-// Details for other parts of RedefineClasses need to be written. +-// This is a placeholder section. +-// +-// +-// Open Issues (in no particular order): +-// +-// - How do we serialize the RedefineClasses() API without deadlocking? +-// +-// - SystemDictionary::parse_stream() was called with a NULL protection +-// domain since the initial version. This has been changed to pass +-// the_class->protection_domain(). This change has been tested with +-// all NSK tests and nothing broke, but what will adding it now break +-// in ways that we don't test? +-// +-// - GenerateOopMap::rewrite_load_or_store() has a comment in its +-// (indirect) use of the Relocator class that the max instruction +-// size is 4 bytes. goto_w and jsr_w are 5 bytes and wide/iinc is +-// 6 bytes. Perhaps Relocator only needs a 4 byte buffer to do +-// what it does to the bytecodes. More investigation is needed. +-// +-// - java.lang.Object methods can be called on arrays. This is +-// implemented via the arrayKlassOop vtable which we don't +-// update. For example, if we redefine java.lang.Object.toString(), +-// then the new version of the method will not be called for array +-// objects. +-// +-// - How do we know if redefine_single_class() and the guts of +-// instanceKlass are out of sync? I don't think this can be +-// automated, but we should probably order the work in +-// redefine_single_class() to match the order of field +-// definitions in instanceKlass. We also need to add some +-// comments about keeping things in sync. +-// +-// - set_new_constant_pool() is huge and we should consider refactoring +-// it into smaller chunks of work. +-// +-// - The exception table update code in set_new_constant_pool() defines +-// const values that are also defined in a local context elsewhere. +-// The same literal values are also used in elsewhere. We need to +-// coordinate a cleanup of these constants with Runtime. +-// ++#define RC_ABORT(error) { _result = error; return false; } + +-class VM_RedefineClasses: public VM_Operation { ++class VM_RedefineClasses: public VM_GC_Operation { + private: ++ + // These static fields are needed by SystemDictionary::classes_do() + // facility and the adjust_cpool_cache_and_vtable() helper: + static objArrayOop _old_methods; + static objArrayOop _new_methods; +- static methodOop* _matching_old_methods; +- static methodOop* _matching_new_methods; +- static methodOop* _deleted_methods; +- static methodOop* _added_methods; ++ static int* _matching_old_methods; ++ static int* _matching_new_methods; ++ static int* _deleted_methods; ++ static int* _added_methods; + static int _matching_methods_length; + static int _deleted_methods_length; + static int _added_methods_length; + static klassOop _the_class_oop; + ++ static int _revision_number; ++ + // The instance fields are used to pass information from + // doit_prologue() to doit() and doit_epilogue(). + jint _class_count; +@@ -370,36 +70,29 @@ + // _index_map_p contains any entries. + int _index_map_count; + intArray * _index_map_p; +- // ptr to _class_count scratch_classes +- instanceKlassHandle * _scratch_classes; +- jvmtiError _res; ++ GrowableArray<instanceKlassHandle>* _new_classes; ++ GrowableArray<oop>* _updated_oops; ++ jvmtiError _result; ++ int _max_redefinition_flags; + + // Performance measurement support. These timers do not cover all + // the work done for JVM/TI RedefineClasses() but they do cover + // the heavy lifting. +- elapsedTimer _timer_rsc_phase1; +- elapsedTimer _timer_rsc_phase2; +- elapsedTimer _timer_vm_op_prologue; ++ elapsedTimer _timer_total; ++ elapsedTimer _timer_prologue; ++ elapsedTimer _timer_class_linking; ++ elapsedTimer _timer_class_loading; ++ elapsedTimer _timer_check_type; ++ elapsedTimer _timer_prepare_redefinition; ++ elapsedTimer _timer_wait_for_locks; ++ elapsedTimer _timer_redefinition; ++ elapsedTimer _timer_vm_op_epilogue; + +- // These routines are roughly in call order unless otherwise noted. +- +- // Load the caller's new class definition(s) into _scratch_classes. +- // Constant pool merging work is done here as needed. Also calls +- // compare_and_normalize_class_versions() to verify the class +- // definition(s). ++ jvmtiError check_redefinition_allowed(instanceKlassHandle new_class); ++ jvmtiError find_sorted_affected_classes(GrowableArray<instanceKlassHandle> *all_affected_klasses); ++ jvmtiError find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed); + jvmtiError load_new_class_versions(TRAPS); + +- // Verify that the caller provided class definition(s) that meet +- // the restrictions of RedefineClasses. Normalize the order of +- // overloaded methods as needed. +- jvmtiError compare_and_normalize_class_versions( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class); +- +- // Swap annotations[i] with annotations[j] +- // Used by compare_and_normalize_class_versions() when normalizing +- // overloaded methods or changing idnum as when adding or deleting methods. +- void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); +- + // Figure out which new methods match old methods in name and signature, + // which methods have been added, and which are no longer present + void compute_added_deleted_matching_methods(); +@@ -407,95 +100,100 @@ + // Change jmethodIDs to point to the new methods + void update_jmethod_ids(); + +- // In addition to marking methods as obsolete, this routine +- // records which methods are EMCP (Equivalent Module Constant +- // Pool) in the emcp_methods BitMap and returns the number of +- // EMCP methods via emcp_method_count_p. This information is +- // used when information about the previous version of the_class +- // is squirreled away. +- void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, +- int * emcp_method_count_p); +- void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ class FindAffectedKlassesClosure : public ObjectClosure { + +- // Unevolving classes may point to methods of the_class directly +- // from their constant pool caches, itables, and/or vtables. We +- // use the SystemDictionary::classes_do() facility and this helper +- // to fix up these pointers. +- static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS); ++ private: ++ GrowableArray<instanceKlassHandle> *_original_klasses; ++ GrowableArray<instanceKlassHandle> *_result; ++ ++ public: ++ FindAffectedKlassesClosure(GrowableArray<instanceKlassHandle> *original_klasses, GrowableArray<instanceKlassHandle> *result); ++ ++ virtual void do_object(oop obj); ++ }; ++ ++ ++ static jvmtiError do_topological_class_sorting(const jvmtiClassDefinition *class_definitions, int class_count, GrowableArray<instanceKlassHandle> *affected, GrowableArray<instanceKlassHandle> *arr, TRAPS); + + // Install the redefinition of a class +- void redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS); ++ void redefine_single_class(instanceKlassHandle the_new_class, TRAPS); + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. + void increment_class_counter(instanceKlass *ik, TRAPS); + +- // Support for constant pool merging (these routines are in alpha +- // order): +- void append_entry(constantPoolHandle scratch_cp, int scratch_i, +- constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); +- int find_new_index(int old_index); +- bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- bool is_unresolved_string_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); +- bool merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS); +- jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class, +- instanceKlassHandle scratch_class, TRAPS); +- u2 rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS); +- bool rewrite_cp_refs(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_element_value( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_method(methodHandle method, +- methodHandle * new_method_p, TRAPS); +- bool rewrite_cp_refs_in_methods(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); +- void rewrite_cp_refs_in_verification_type_info( +- address& stackmap_addr_ref, address stackmap_end, u2 frame_i, +- u1 frame_size, TRAPS); +- void set_new_constant_pool(instanceKlassHandle scratch_class, +- constantPoolHandle scratch_cp, int scratch_cp_length, bool shrink, TRAPS); + + void flush_dependent_code(instanceKlassHandle k_h, TRAPS); + +- static void check_class(klassOop k_oop, oop initiating_loader, TRAPS); +- static void dump_methods(); ++ static void check_class(klassOop k_oop,/* oop initiating_loader,*/ TRAPS) PRODUCT_RETURN; ++ ++ static void adjust_cpool_cache(klassOop k_oop, oop initiating_loader, TRAPS); ++ ++#ifdef ASSERT ++ static void verify_classes(klassOop k_oop, oop initiating_loader, TRAPS); ++#endif ++ ++ int calculate_redefinition_flags(instanceKlassHandle new_version); ++ void calculate_instance_update_information(klassOop new_version); ++ void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p); ++ ++ static void calculate_type_check_information(klassOop k); ++ static void clear_type_check_information(klassOop k); + + public: +- VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind); +- VMOp_Type type() const { return VMOp_RedefineClasses; } ++ VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind); ++ virtual ~VM_RedefineClasses(); ++ ++ bool check_arguments(); + bool doit_prologue(); + void doit(); + void doit_epilogue(); ++ void rollback(); + +- bool allow_nested_vm_operations() const { return true; } +- jvmtiError check_error() { return _res; } ++ jvmtiError check_exception() const; ++ VMOp_Type type() const { return VMOp_RedefineClasses; } ++ bool skip_operation() const { return false; } ++ bool allow_nested_vm_operations() const { return true; } ++ jvmtiError check_error() { return _result; } ++ ++ void update_active_methods(); ++ ++ // Checks for type consistency after hierarchy change ++ bool check_type_consistency(); ++ void calculate_type_check_information(); ++ bool check_field_value_types(); ++ void clear_type_check_information(); ++ bool check_method_stacks(); ++ bool check_loaded_methods(); ++ bool check_method(methodOop method); ++ static Symbol* signature_to_class_name(Symbol* signature); ++ ++ void method_forwarding(); ++ ++ void update_array_classes_to_newest_version(klassOop smallest_dimension); + + // Modifiable test must be shared between IsModifiableClass query + // and redefine implementation + static bool is_modifiable_class(oop klass_mirror); ++ ++ // Method used during garbage collection, the VM operation must iterate over all oops. ++ void oops_do(OopClosure* f); ++ ++ // Utility methods for transfering field access flags ++ ++ static void transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to); ++ static void update_klass_field_access_flag(fieldDescriptor *fd); ++ ++ void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ ++ void lock_threads(); ++ void unlock_threads(); ++ ++ template <class T> static void do_oop_work(T* p); ++ ++ static void swap_marks(oop first, oop second); ++ + }; + + #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP ++ +diff -r 6c6a2299029a src/share/vm/prims/methodComparator.cpp +--- a/src/share/vm/prims/methodComparator.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/methodComparator.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -65,6 +65,7 @@ + if (! args_same(c_old, c_new)) + return false; + } ++ + return true; + } + +diff -r 6c6a2299029a src/share/vm/prims/nativeLookup.cpp +--- a/src/share/vm/prims/nativeLookup.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/prims/nativeLookup.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -35,6 +35,7 @@ + #include "oops/symbol.hpp" + #include "prims/jvm_misc.hpp" + #include "prims/nativeLookup.hpp" ++#include "prims/jvmtiRedefineClasses.hpp" + #include "runtime/arguments.hpp" + #include "runtime/handles.inline.hpp" + #include "runtime/javaCalls.hpp" +@@ -53,7 +54,6 @@ + # include "os_bsd.inline.hpp" + #endif + +- + static void mangle_name_on(outputStream* st, Symbol* name, int begin, int end) { + char* bytes = (char*)name->bytes() + begin; + char* end_bytes = (char*)name->bytes() + end; +@@ -138,6 +138,40 @@ + { CC"Java_sun_hotspot_WhiteBox_registerNatives", NULL, FN_PTR(JVM_RegisterWhiteBoxMethods) }, + }; + ++// Helper function to call redefineClasses from Java Code ++JVM_ENTRY(int, JVM_RedefineClassesHelper(JNIEnv *env, jclass cb, jclass target, jbyteArray bytes)) ++ ResourceMark rm(THREAD); ++ ++ JavaThread* current_thread = JavaThread::current(); ++ jbyte* bytecodes = NULL; ++ const int class_count = 1; ++ jvmtiClassDefinition* class_definitions = NEW_RESOURCE_ARRAY(jvmtiClassDefinition, class_count); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ jboolean is_copy = JNI_FALSE; ++ bytecodes = env->GetByteArrayElements(bytes, &is_copy); ++ class_definitions[0].klass = target; ++ class_definitions[0].class_byte_count = env->GetArrayLength(bytes); ++ class_definitions[0].class_bytes = (unsigned char*)bytecodes; ++ } ++ ++ VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); ++ VMThread::execute(&op); ++ int result = op.check_error(); ++ ++ { ++ ThreadToNativeFromVM ttnfv(thread); ++ if (env->ExceptionOccurred()) { ++ return -1; ++ } ++ env->ReleaseByteArrayElements(bytes, bytecodes, 0); ++ } ++ ++ return result; ++JVM_END ++ ++ + static address lookup_special_native(char* jni_name) { + int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods + int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); +@@ -177,6 +211,9 @@ + return entry; + } + } ++ if(strstr(jni_name, "Java_at_ssw_hotswap_ClassRedefinition_redefineClasses") != NULL) { ++ return CAST_FROM_FN_PTR(address, JVM_RedefineClassesHelper); ++ } + + // Otherwise call static method findNative in ClassLoader + KlassHandle klass (THREAD, SystemDictionary::ClassLoader_klass()); +diff -r 6c6a2299029a src/share/vm/runtime/arguments.cpp +--- a/src/share/vm/runtime/arguments.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/arguments.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1792,6 +1792,15 @@ + status = false; + } + ++ // (tw) Must use serial GC ++ if (!UseSerialGC && i >= 1) { ++ jio_fprintf(defaultStream::error_stream(), ++ "Must use the serial GC in the Dynamic Code Evolution VM\n"); ++ status = false; ++ } else { ++ UseSerialGC = true; ++ } ++ + return status; + } + +diff -r 6c6a2299029a src/share/vm/runtime/deoptimization.cpp +--- a/src/share/vm/runtime/deoptimization.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/deoptimization.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -599,6 +599,38 @@ + // Cleanup thread deopt data + cleanup_deopt_info(thread, array); + ++ // (tw) Redefinition support: Check if we need to transfer method execution points to new versions ++ { ++ ResourceMark res_mark; ++ ++ // Verify that the just-unpacked frames match the interpreter's ++ // notions of expression stack and locals ++ vframeArray* cur_array = thread->vframe_array_last(); ++ RegisterMap rm(thread, false); ++ rm.set_include_argument_oops(false); ++ for (int i = 0; i < cur_array->frames(); i++) { ++ vframeArrayElement* el = cur_array->element(i); ++ frame* frame = el->iframe(); ++ guarantee(frame->is_interpreted_frame(), "Wrong frame type"); ++ RegisterMap reg_map(thread); ++ vframe* vf = vframe::new_vframe(frame, ®_map, thread); ++ interpretedVFrame *iframe = (interpretedVFrame *)vf; ++ methodOop method = iframe->method(); ++ int bci = iframe->bci(); ++ method = method->newest_version(); ++ iframe->set_method(method, bci); ++ ++ methodOop forward_method = method->forward_method(); ++ if (forward_method != NULL && method->is_in_code_section(bci)) { ++ int new_bci = method->calculate_forward_bci(bci, forward_method); ++ if (TraceRedefineClasses >= 2) { ++ tty->print_cr("Transfering execution of %s to new method old_bci=%d new_bci=%d", forward_method->name()->as_C_string(), bci, new_bci); ++ } ++ iframe->set_method(forward_method, new_bci); ++ } ++ } ++ } ++ + #ifndef PRODUCT + if (VerifyStack) { + ResourceMark res_mark; +diff -r 6c6a2299029a src/share/vm/runtime/frame.cpp +--- a/src/share/vm/runtime/frame.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/frame.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -407,6 +407,12 @@ + *interpreter_frame_method_addr() = method; + } + ++// (tw) Sets constant pool cache oop ++void frame::interpreter_frame_set_cache(constantPoolCacheOop cp) { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ *interpreter_frame_cache_addr() = cp; ++} ++ + void frame::interpreter_frame_set_bcx(intptr_t bcx) { + assert(is_interpreted_frame(), "Not an interpreted frame"); + if (ProfileInterpreter) { +@@ -422,19 +428,27 @@ + // The bcx was just converted from bci to bcp. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. +- address mdp = mdo->di_to_dp(mdi); +- interpreter_frame_set_mdx((intptr_t)mdp); ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. ++ address mdp = mdo->di_to_dp(mdi); ++ interpreter_frame_set_mdx((intptr_t)mdp); ++ } + } + } else { + if (is_now_bci) { + // The bcx was just converted from bcp to bci. + // Convert the mdx in parallel. + methodDataOop mdo = interpreter_frame_method()->method_data(); +- assert(mdo != NULL, ""); +- int mdi = mdo->dp_to_di((address)mdx); +- interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ if (mdo == NULL) { ++ interpreter_frame_set_mdx(0); ++ } else { ++ assert(mdo != NULL, ""); ++ int mdi = mdo->dp_to_di((address)mdx); ++ interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. ++ } + } + } + } +diff -r 6c6a2299029a src/share/vm/runtime/frame.hpp +--- a/src/share/vm/runtime/frame.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/frame.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -346,6 +346,7 @@ + // Method & constant pool cache + methodOop interpreter_frame_method() const; + void interpreter_frame_set_method(methodOop method); ++ void interpreter_frame_set_cache(constantPoolCacheOop method); + methodOop* interpreter_frame_method_addr() const; + constantPoolCacheOop* interpreter_frame_cache_addr() const; + #ifdef PPC +diff -r 6c6a2299029a src/share/vm/runtime/globals.hpp +--- a/src/share/vm/runtime/globals.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/globals.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1227,9 +1227,23 @@ + product(bool, StressLdcRewrite, false, \ + "Force ldc -> ldc_w rewrite during RedefineClasses") \ + \ ++ product(bool, UseMethodForwardPoints, false, \ ++ "Use method forward points") \ ++ \ ++ product(intx, MethodForwardPointsMaxLocals, 300, \ ++ "Maximum number of locals in forwarding method") \ ++ \ ++ product(intx, MethodForwardPointsMaxStack, 300, \ ++ "Maximum number of stack slots in forwarding method") \ ++ \ + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, TimeRedefineClasses, false, \ ++ "Measure timing for JVMTI RedefineClasses") \ ++ \ ++ product(bool, AllowAdvancedClassRedefinition, true, \ ++ "Allow advanced class redefinition beyond swapping method bodies")\ + develop(bool, StressMethodComparator, false, \ + "run the MethodComparator on all loaded methods") \ + \ +diff -r 6c6a2299029a src/share/vm/runtime/interfaceSupport.hpp +--- a/src/share/vm/runtime/interfaceSupport.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/interfaceSupport.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -296,7 +296,7 @@ + ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { + // We are leaving the VM at this point and going directly to native code. + // Block, if we are in the middle of a safepoint synchronization. +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + thread->frame_anchor()->make_walkable(thread); + trans_and_fence(_thread_in_vm, _thread_in_native); + // Check for pending. async. exceptions or suspends. +diff -r 6c6a2299029a src/share/vm/runtime/javaCalls.cpp +--- a/src/share/vm/runtime/javaCalls.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/javaCalls.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -60,7 +60,7 @@ + bool clear_pending_exception = true; + + guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code"); +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); + _result = result; + +diff -r 6c6a2299029a src/share/vm/runtime/jniHandles.cpp +--- a/src/share/vm/runtime/jniHandles.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/jniHandles.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -112,6 +112,10 @@ + } + + jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { ++ if (mh->newest_version() != mh()) { ++ methodHandle mh_new(Thread::current(), mh()->newest_version()); ++ return (jmethodID) make_weak_global(mh_new); ++ } + return (jmethodID) make_weak_global(mh); + } + +diff -r 6c6a2299029a src/share/vm/runtime/mutex.cpp +--- a/src/share/vm/runtime/mutex.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/mutex.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1227,7 +1227,7 @@ + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1247,7 +1247,7 @@ + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1310,6 +1310,7 @@ + // already hold Terminator_lock - may happen because of periodic safepoints + if (this->rank() != Mutex::native && + this->rank() != Mutex::suspend_resume && ++ this->rank() != Mutex::redefine_classes && + locks != NULL && locks->rank() <= this->rank() && + !SafepointSynchronize::is_at_safepoint() && + this != Interrupt_lock && +diff -r 6c6a2299029a src/share/vm/runtime/mutex.hpp +--- a/src/share/vm/runtime/mutex.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/mutex.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -109,7 +109,8 @@ + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, +- native = max_nonleaf + 1 ++ native = max_nonleaf + 1, ++ redefine_classes = native + 1 + }; + + // The WaitSet and EntryList linked lists are composed of ParkEvents. +diff -r 6c6a2299029a src/share/vm/runtime/mutexLocker.cpp +--- a/src/share/vm/runtime/mutexLocker.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/mutexLocker.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -49,6 +49,7 @@ + // Consider using GCC's __read_mostly. + + Mutex* Patching_lock = NULL; ++Mutex* RedefineClasses_lock = NULL; + Monitor* SystemDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -90,6 +91,7 @@ + Mutex* DirtyCardQ_FL_lock = NULL; + Monitor* DirtyCardQ_CBL_mon = NULL; + Mutex* Shared_DirtyCardQ_lock = NULL; ++Monitor* RedefinitionSync_lock = NULL; + Mutex* ParGCRareEvent_lock = NULL; + Mutex* EvacFailureStack_lock = NULL; + Mutex* DerivedPointerTableGC_lock = NULL; +@@ -207,6 +209,7 @@ + def(HotCardCache_lock , Mutex , special , true ); + def(EvacFailureStack_lock , Mutex , nonleaf , true ); + } ++ def(RedefinitionSync_lock , Monitor , leaf , false ); + def(ParGCRareEvent_lock , Mutex , leaf , true ); + def(DerivedPointerTableGC_lock , Mutex, leaf, true ); + def(CodeCache_lock , Mutex , special, true ); +@@ -281,6 +284,7 @@ + def(Debug3_lock , Mutex , nonleaf+4, true ); + def(CompileThread_lock , Monitor, nonleaf+5, false); + def(PeriodicTask_lock , Monitor, nonleaf+5, true); ++ def(RedefineClasses_lock , Mutex , nonleaf+7, false ); // for ensuring that class redefinition is not done in parallel + + #ifdef INCLUDE_TRACE + def(JfrMsg_lock , Monitor, leaf, true); +diff -r 6c6a2299029a src/share/vm/runtime/mutexLocker.hpp +--- a/src/share/vm/runtime/mutexLocker.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/mutexLocker.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -43,6 +43,8 @@ + // Mutexes used in the VM. + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code ++extern Monitor* RedefinitionSync_lock; // a lock on synchronized class redefinition ++extern Mutex* RedefineClasses_lock; // a lock on class redefinition + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff -r 6c6a2299029a src/share/vm/runtime/reflection.cpp +--- a/src/share/vm/runtime/reflection.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/reflection.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -468,7 +468,8 @@ + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +@@ -519,6 +520,12 @@ + AccessFlags access, + bool classloader_only, + bool protected_restriction) { ++ ++ // (tw) Decide accessibility based on active version ++ if (current_class != NULL) { ++ current_class = current_class->klass_part()->active_version(); ++ } ++ + // Verify that current_class can access a field of field_class, where that + // field's access bits are "access". We assume that we've already verified + // that current_class can access field_class. +@@ -560,7 +567,8 @@ + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +diff -r 6c6a2299029a src/share/vm/runtime/sharedRuntime.cpp +--- a/src/share/vm/runtime/sharedRuntime.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/sharedRuntime.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -1137,7 +1137,20 @@ + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && +- callee_method->method_holder() != SystemDictionary::Object_klass()) { ++ callee_method->method_holder()->klass_part()->newest_version() != SystemDictionary::Object_klass()->klass_part()->newest_version()) { ++ ++ // DCEVM: If we are executing an old method, this is OK! ++ { ++ ResourceMark rm(thread); ++ RegisterMap cbl_map(thread, false); ++ frame caller_frame = thread->last_frame().sender(&cbl_map); ++ ++ CodeBlob* caller_cb = caller_frame.cb(); ++ guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); ++ nmethod* caller_nm = caller_cb->as_nmethod_or_null(); ++ if (caller_nm->method()->is_old()) break; ++ } ++ + // If has a pending exception then there is no need to re-try to + // resolve this method. + // If the method has been redefined, we need to try again. +diff -r 6c6a2299029a src/share/vm/runtime/thread.cpp +--- a/src/share/vm/runtime/thread.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/thread.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -216,6 +216,8 @@ + set_self_raw_id(0); + set_lgrp_id(-1); + ++ _redefine_classes_mutex = new Mutex(Mutex::redefine_classes, "redefine classes lock", false); ++ + // allocated data structures + set_osthread(NULL); + set_resource_area(new (mtThread)ResourceArea()); +@@ -249,6 +251,7 @@ + omFreeProvision = 32 ; + omInUseList = NULL ; + omInUseCount = 0 ; ++ _pretend_new_universe = false; + + #ifdef ASSERT + _visited_for_critical_count = false; +@@ -884,6 +887,15 @@ + return false; + } + ++bool Thread::owns_locks_but_redefine_classes_lock() const { ++ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) { ++ if (cur != RedefineClasses_lock && cur->rank() != Mutex::redefine_classes) { ++ return true; ++ } ++ } ++ return false; ++} ++ + + #endif + +@@ -1637,7 +1649,7 @@ + ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm); + + assert(JavaThread::current() == this, "sanity check"); +- assert(!Thread::current()->owns_locks(), "sanity check"); ++ assert(!Thread::current()->owns_locks_but_redefine_classes_lock(), "sanity check"); + + DTRACE_THREAD_PROBE(start, this); + +@@ -3193,7 +3205,7 @@ + + // Create a CompilerThread + CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) +-: JavaThread(&compiler_thread_entry) { ++: JavaThread(&compiler_thread_entry), _should_bailout(false) { + _env = NULL; + _log = NULL; + _task = NULL; +@@ -3201,6 +3213,7 @@ + _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; ++ _compilation_mutex = new Mutex(Mutex::redefine_classes, "compilationMutex", false); + + #ifndef PRODUCT + _ideal_graph_printer = NULL; +@@ -3230,6 +3243,7 @@ + int Threads::_number_of_non_daemon_threads = 0; + int Threads::_return_code = 0; + size_t JavaThread::_stack_size_at_create = 0; ++bool Threads::_wait_at_instrumentation_entry = false; + + // All JavaThreads + #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next()) +diff -r 6c6a2299029a src/share/vm/runtime/thread.hpp +--- a/src/share/vm/runtime/thread.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/thread.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -203,11 +203,14 @@ + void enter_signal_handler() { _num_nested_signal++; } + void leave_signal_handler() { _num_nested_signal--; } + bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ Mutex* redefine_classes_mutex() { return _redefine_classes_mutex; } + + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; + ++ Mutex* _redefine_classes_mutex; ++ + // Active_handles points to a block of handles + JNIHandleBlock* _active_handles; + +@@ -530,10 +533,15 @@ + uintptr_t _self_raw_id; // used by get_thread (mutable) + int _lgrp_id; + ++ ++ bool _pretend_new_universe; ++ + public: + // Stack overflow support + address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + ++ void set_pretend_new_universe(bool b) { if (_pretend_new_universe != b) { if (TraceRedefineClasses >= 5) tty->print_cr("Changing pretend universe to %d", (int)b); _pretend_new_universe = b; } } ++ bool pretend_new_universe() { return _pretend_new_universe; } + void set_stack_base(address base) { _stack_base = base; } + size_t stack_size() const { return _stack_size; } + void set_stack_size(size_t size) { _stack_size = size; } +@@ -570,6 +578,7 @@ + void print_owned_locks() const { print_owned_locks_on(tty); } + Monitor* owned_locks() const { return _owned_locks; } + bool owns_locks() const { return owned_locks() != NULL; } ++ bool owns_locks_but_redefine_classes_lock() const; + bool owns_locks_but_compiled_lock() const; + + // Deadlock detection +@@ -1793,6 +1802,8 @@ + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; ++ bool _should_bailout; ++ Mutex* _compilation_mutex; + + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + +@@ -1802,12 +1813,16 @@ + + CompilerThread(CompileQueue* queue, CompilerCounters* counters); + ++ bool should_bailout() const { return _should_bailout; } ++ void set_should_bailout(bool b) { _should_bailout = false; } ++ + bool is_Compiler_thread() const { return true; } + // Hide this compiler thread from external view. + bool is_hidden_from_external_view() const { return true; } + + CompileQueue* queue() { return _queue; } + CompilerCounters* counters() { return _counters; } ++ Mutex *compilation_mutex() { return _compilation_mutex; } + + // Get/set the thread's compilation environment. + ciEnv* env() { return _env; } +@@ -1862,6 +1877,7 @@ + static int _number_of_threads; + static int _number_of_non_daemon_threads; + static int _return_code; ++ static bool _wait_at_instrumentation_entry; + + public: + // Thread management +@@ -1873,6 +1889,9 @@ + static JavaThread* first() { return _thread_list; } + static void threads_do(ThreadClosure* tc); + ++ static bool wait_at_instrumentation_entry() { return _wait_at_instrumentation_entry; } ++ static void set_wait_at_instrumentation_entry(bool b) { _wait_at_instrumentation_entry = b; } ++ + // Initializes the vm and creates the vm thread + static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); + static void convert_vm_init_libraries_to_agents(); +diff -r 6c6a2299029a src/share/vm/runtime/vframe.cpp +--- a/src/share/vm/runtime/vframe.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/vframe.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -253,6 +253,46 @@ + return fr().interpreter_frame_method(); + } + ++// (tw) Sets interpreter frame method. ++void interpretedVFrame::set_method(methodOop new_method, int new_bci) { ++ methodOop old_method = fr().interpreter_frame_method(); ++ int old_stack_size = fr().interpreter_frame_expression_stack_size(); ++ if (old_method == new_method) return; ++ u_char *old_bcp = bcp(); ++ int old_bci = bci(); ++ fr().interpreter_frame_set_method(new_method); ++ fr().interpreter_frame_set_cache(new_method->constants()->cache()); ++ u_char *new_bcp = new_method->code_base() + new_bci; ++ assert(new_method->bcp_from(new_bci) == new_bcp, ""); ++ ++ set_bcp(new_bcp); ++ ++ Bytecodes::Code code = Bytecodes::java_code_at(old_method, old_bcp); ++ assert(Bytecodes::java_code_at(new_method, new_bcp) == code, "must have same bytecode at this position"); ++ ++ switch (code) { ++ case Bytecodes::_invokevirtual : ++ case Bytecodes::_invokespecial : ++ case Bytecodes::_invokestatic : ++ case Bytecodes::_invokeinterface: { ++ int old_index = Bytes::get_native_u2(old_bcp+1); ++ int new_index = Bytes::get_native_u2(new_bcp+1); ++ new_method->constants()->cache()->entry_at(new_index)->copy_from(old_method->constants()->cache()->entry_at(old_index)); ++ break; ++ } ++ ++ case Bytecodes::_invokedynamic: { ++ int old_index = Bytes::get_native_u4(old_bcp+1); ++ int new_index = Bytes::get_native_u4(new_bcp+1); ++ new_method->constants()->cache()->secondary_entry_at(new_index)->copy_from(old_method->constants()->cache()->secondary_entry_at(old_index)); ++ break; ++ } ++ } ++ ++ int new_stack_size = fr().interpreter_frame_expression_stack_size(); ++ assert(new_method->validate_bci_from_bcx((intptr_t)new_bcp) == new_bci, ""); ++} ++ + StackValueCollection* interpretedVFrame::locals() const { + int length = method()->max_locals(); + +diff -r 6c6a2299029a src/share/vm/runtime/vframe.hpp +--- a/src/share/vm/runtime/vframe.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/vframe.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -163,6 +163,7 @@ + StackValueCollection* locals() const; + StackValueCollection* expressions() const; + GrowableArray<MonitorInfo*>* monitors() const; ++ void set_method(methodOop method, int new_bci); + + void set_locals(StackValueCollection* values) const; + +diff -r 6c6a2299029a src/share/vm/runtime/vmThread.cpp +--- a/src/share/vm/runtime/vmThread.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/runtime/vmThread.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -691,6 +691,10 @@ + void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + Thread::oops_do(f, cf); + _vm_queue->oops_do(f); ++ // (DCEVM) need to update oops in VM_RedefineClasses! ++ if (_cur_vm_operation != NULL) { ++ _cur_vm_operation->oops_do(f); ++ } + } + + //------------------------------------------------------------------------------------------------------------------ +diff -r 6c6a2299029a src/share/vm/utilities/exceptions.cpp +--- a/src/share/vm/utilities/exceptions.cpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/utilities/exceptions.cpp Mon Apr 28 13:12:30 2014 -0700 +@@ -254,6 +254,8 @@ + assert(thread->is_Java_thread(), "can only be called by a Java thread"); + assert(!thread->has_pending_exception(), "already has exception"); + ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception; + + // Resolve exception klass +@@ -285,6 +287,7 @@ + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +@@ -295,6 +298,8 @@ + Symbol* signature, JavaCallArguments *args, + Handle h_cause, + Handle h_loader, Handle h_protection_domain) { ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception = new_exception(thread, name, signature, args, h_loader, h_protection_domain); + + // Future: object initializer should take a cause argument +@@ -317,6 +322,8 @@ + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +diff -r 6c6a2299029a src/share/vm/utilities/growableArray.hpp +--- a/src/share/vm/utilities/growableArray.hpp Sat Dec 14 11:51:15 2013 -0800 ++++ b/src/share/vm/utilities/growableArray.hpp Mon Apr 28 13:12:30 2014 -0700 +@@ -145,6 +145,33 @@ + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } ++ ++}; ++ ++template<class E, class F> class Pair : public StackObj ++{ ++private: ++ E _left; ++ F _right; ++ ++public: ++ ++ Pair() { ++ ++ } ++ ++ Pair(E left, F right) { ++ this->_left = left; ++ this->_right = right; ++ } ++ ++ E left() { ++ return _left; ++ } ++ ++ F right() { ++ return _right; ++ } + }; + + template<class E> class GrowableArray : public GenericGrowableArray { diff --git a/hotspot/.hg/patches/gc-java8.patch b/hotspot/.hg/patches/gc-java8.patch new file mode 100644 index 00000000..b190c7d8 --- /dev/null +++ b/hotspot/.hg/patches/gc-java8.patch @@ -0,0 +1,613 @@ +Change MarkAndSweep garbage collector to allow changing instances during redefinition. +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +@@ -161,6 +161,12 @@ + } + } + ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +@@ -2098,7 +2104,7 @@ + // Support for compaction + + void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); ++ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false); + // prepare_for_compaction() uses the space between live objects + // so that later phase can skip dead space quickly. So verification + // of the free lists doesn't work after. +@@ -2119,7 +2125,7 @@ + } + + void CompactibleFreeListSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ SCAN_AND_COMPACT(obj_size, false); + } + + // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +@@ -150,6 +150,7 @@ + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp +@@ -46,6 +46,8 @@ + STWGCTimer* MarkSweep::_gc_timer = NULL; + SerialOldTracer* MarkSweep::_gc_tracer = NULL; + ++GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL; ++ + MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; + CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true); + +@@ -171,3 +173,100 @@ + } + + #endif ++ ++// (DCEVM) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ HeapWord* rescued_ptr = _rescued_oops->at(i); ++ oop rescued_obj = (oop) rescued_ptr; ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ assert(rescued_obj->klass()->new_version() != NULL, "just checking"); ++ ++ if (rescued_obj->klass()->new_version()->update_information() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ rescued_obj->set_klass(rescued_obj->klass()->new_version()); ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (DCEVM) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->klass()->new_version() != NULL, "class of old object must have new version"); ++ ++ Klass* old_klass_oop = q->klass(); ++ Klass* new_klass_oop = q->klass()->new_version(); ++ ++ InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop); ++ InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ HeapWord* tmp = NULL; ++ oop tmp_obj = q; ++ ++ // Save object somewhere, there is an overlap in fields ++ if (new_klass_oop->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp = NEW_RESOURCE_ARRAY(HeapWord, size); ++ q = (oop) tmp; ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ q->set_klass(new_klass_oop); ++ int *cur = new_klass_oop->update_information(); ++ assert(cur != NULL, "just checking"); ++ MarkSweep::update_fields(new_location, q, cur); ++ ++ if (tmp != NULL) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp, size); ++ } ++} ++ ++void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) { ++ assert(cur != NULL, "just checking"); ++ char* to = (char*)(HeapWord*)new_location; ++ while (*cur != 0) { ++ int size = *cur; ++ if (size > 0) { ++ cur++; ++ int offset = *cur; ++ HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset); ++ if (size == HeapWordSize) { ++ *((HeapWord*)to) = *from; ++ } else if (size == HeapWordSize * 2) { ++ *((HeapWord*)to) = *from; ++ *(((HeapWord*)to) + 1) = *(from + 1); ++ } else { ++ Copy::conjoint_jbytes(from, to, size); ++ } ++ to += size; ++ cur++; ++ } else { ++ assert(size < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(to, skip, 0); ++ to += skip; ++ cur++; ++ } ++ } ++} +diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp +@@ -107,8 +107,12 @@ + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<HeapWord*>* _rescued_oops; ++ + // + // Vars + // +@@ -169,6 +173,9 @@ + + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); ++ static void update_fields(oop new_location, oop tmp_obj, int *cur); + static void follow_stack(); // Empty marking stack. + + static void follow_klass(Klass* klass); +diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp +--- a/src/share/vm/memory/genMarkSweep.cpp ++++ b/src/share/vm/memory/genMarkSweep.cpp +@@ -334,11 +334,16 @@ + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); + trace("4"); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); ++ ++ MarkSweep::copy_rescued_objects_back(); + } +diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp +--- a/src/share/vm/memory/space.cpp ++++ b/src/share/vm/memory/space.cpp +@@ -379,9 +379,8 @@ + _compaction_top = bottom(); + } + +-HeapWord* CompactibleSpace::forward(oop q, size_t size, +- CompactPoint* cp, HeapWord* compact_top) { +- // q is alive ++// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) { + // First check if we should switch compaction space + assert(this == cp->space, "'this' should be current compaction space."); + size_t compaction_max_size = pointer_delta(end(), compact_top); +@@ -401,8 +400,15 @@ + compaction_max_size = pointer_delta(cp->space->end(), compact_top); + } + ++ return compact_top; ++} ++ ++HeapWord* CompactibleSpace::forward(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ compact_top = forward_compact_top(size, cp, compact_top); ++ + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -423,6 +429,58 @@ + return compact_top; + } + ++// Compute the forward sizes and leave out objects whose position could ++// possibly overlap other objects. ++HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ size_t forward_size = size; ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); ++ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = forward_compact_top(forward_size, cp, compact_top); ++ ++ if (must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); ++ } ++ MarkSweep::_rescued_oops->append(q); ++ return compact_top; ++ } ++ ++ return forward(oop(q), forward_size, cp, compact_top); ++} ++ ++// Compute the forwarding addresses for the objects that need to be rescued. ++HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) { ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ HeapWord* q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size(q); ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->klass()->new_version() != NULL) { ++ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); ++ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); ++ size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); ++ assert(compact_top <= end(), "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ return compact_top; ++} + + bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + HeapWord* q, size_t deadlength) { +@@ -444,12 +502,17 @@ + #define adjust_obj_size(s) s + + void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); ++ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false); + } + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true); ++ } + } + + void Space::adjust_pointers() { +@@ -487,6 +550,111 @@ + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ //if (heap->is_in_permanent(obj)) { ++ // return -1; ++ //} ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ // Only redefined objects can have the need to be rescued. ++ if (oop(old_obj)->klass()->new_version() == NULL) return false; ++ ++ //if (old_obj->is_perm()) { ++ // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj. ++ // return true; ++ //} ++ ++ int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version()); ++ int original_size = old_obj->size(); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj); ++ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj); ++ if (old_in_tenured == new_in_tenured) { ++ // Rescue if object may overlap with a higher memory address. ++ bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size); ++ if (old_in_tenured) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ } ++ return overlap; ++ ++ } else { ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ } ++ } ++} ++ ++HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) { ++ assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise"); ++ ++ int size = oop(old_obj)->size(); ++ HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size); ++ Copy::aligned_disjoint_words(old_obj, rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { +@@ -497,7 +665,12 @@ + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_COMPACT(obj_size, true) ++ } + } + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp +--- a/src/share/vm/memory/space.hpp ++++ b/src/share/vm/memory/space.hpp +@@ -450,6 +450,9 @@ + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ HeapWord* rescue(HeapWord* old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -479,6 +482,15 @@ + // accordingly". + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); ++ // (DCEVM) same as forwad, but can rescue objects. Invoked only during ++ // redefinition runs ++ HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ ++ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top); ++ ++ // (tw) Compute new compact top without actually forwarding the object. ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } +@@ -509,7 +521,7 @@ + size_t word_len); + }; + +-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ ++#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ +@@ -567,7 +579,17 @@ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + size_t size = block_size(q); \ ++ if (redefinition_run) { \ ++ compact_top = cp->space->forward_with_rescue(q, size, \ ++ cp, compact_top); \ ++ if (q < first_dead && oop(q)->is_gc_marked()) { \ ++ /* Was moved (otherwise, forward would reset mark), \ ++ set first_dead to here */ \ ++ first_dead = q; \ ++ } \ ++ } else { \ + compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ } \ + q += size; \ + end_of_live = q; \ + } else { \ +@@ -616,6 +638,8 @@ + } \ + } \ + \ ++ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \ ++ \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ +@@ -662,13 +686,8 @@ + q += size; \ + } \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ This is funky. Using this to read the previously written \ +- * LiveRange. See also use below. */ \ +- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ +@@ -696,7 +715,7 @@ + assert(q == t, "just checking"); \ + } + +-#define SCAN_AND_COMPACT(obj_size) { \ ++#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ +@@ -721,13 +740,9 @@ + } \ + ) /* debug_only */ \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ Funky */ \ +- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ +- } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ + const intx copy_interval = PrefetchCopyIntervalInBytes; \ +@@ -745,11 +760,34 @@ + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ ++ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \ ++ rescue(q); \ ++ debug_only(Copy::fill_to_words(q, size, 0)); \ ++ q += size; \ ++ continue; \ ++ } \ ++ \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ +- assert(q != compaction_top, "everything in this pass should be moving"); \ ++ assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \ ++ "everything in this pass should be moving"); \ ++ if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \ ++ Klass* new_version = oop(q)->klass()->new_version(); \ ++ if (new_version->update_information() == NULL) { \ ++ Copy::aligned_conjoint_words(q, compaction_top, size); \ ++ oop(compaction_top)->set_klass(new_version); \ ++ } else { \ ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); \ ++ } \ ++ oop(compaction_top)->init_mark(); \ ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ ++ \ ++ debug_only(prev_q = q); \ ++ q += size; \ ++ continue; \ ++ } \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ +diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp +--- a/src/share/vm/memory/universe.cpp ++++ b/src/share/vm/memory/universe.cpp +@@ -78,6 +78,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif // INCLUDE_ALL_GCS + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + Klass* Universe::_boolArrayKlassObj = NULL; + Klass* Universe::_byteArrayKlassObj = NULL; +diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp +--- a/src/share/vm/memory/universe.hpp ++++ b/src/share/vm/memory/universe.hpp +@@ -248,7 +248,13 @@ + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { return _is_redefining_gc_run; } ++ static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; } ++ + // Known classes in the VM + static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; } + static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; } diff --git a/hotspot/.hg/patches/light-jdk7u40-b43.patch b/hotspot/.hg/patches/light-jdk7u40-b43.patch new file mode 100644 index 00000000..e78e6599 --- /dev/null +++ b/hotspot/.hg/patches/light-jdk7u40-b43.patch @@ -0,0 +1,10126 @@ +diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp +index b0ebcfd..6366d68 100644 +--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp ++++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp +@@ -1364,7 +1364,7 @@ void InterpreterMacroAssembler::notify_method_entry() { + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + get_thread(rcx); + get_method(rbx); + call_VM_leaf( +diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp +index 2790c2a..c315b18 100644 +--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp ++++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp +@@ -1427,7 +1427,7 @@ void InterpreterMacroAssembler::notify_method_entry() { + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + get_method(c_rarg1); + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +index 16958cd..09d6300 100644 +--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp ++++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +@@ -1976,7 +1976,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + __ movoop(rax, JNIHandles::make_local(method())); + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +index 7dc4e62..86c8c95 100644 +--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp ++++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +@@ -2235,7 +2235,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + // protect the args we've loaded + save_args(masm, total_c_args, c_arg, out_regs); + __ movoop(c_rarg1, JNIHandles::make_local(method())); +diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp +index 9a8ca61..83e6f54 100644 +--- a/src/share/vm/c1/c1_Compilation.hpp ++++ b/src/share/vm/c1/c1_Compilation.hpp +@@ -242,8 +242,9 @@ class Compilation: public StackObj { + #define BAILOUT(msg) { bailout(msg); return; } + #define BAILOUT_(msg, res) { bailout(msg); return res; } + +-#define CHECK_BAILOUT() { if (bailed_out()) return; } +-#define CHECK_BAILOUT_(res) { if (bailed_out()) return res; } ++// (tw) Also checks a thread local flag that can be set to trigger compiler bailout from another thread. ++#define CHECK_BAILOUT() { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return; } ++#define CHECK_BAILOUT_(res) { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return res; } + + + class InstructionMark: public StackObj { +diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp +index e0ab96b..db8e551 100644 +--- a/src/share/vm/ci/ciObjectFactory.cpp ++++ b/src/share/vm/ci/ciObjectFactory.cpp +@@ -764,3 +764,26 @@ void ciObjectFactory::print() { + _unloaded_instances->length(), + _unloaded_klasses->length()); + } ++ ++int ciObjectFactory::compare_ciobjects(ciObject** a, ciObject** b) { ++ oop oop1 = (*a)->get_oop(); ++ oop oop2 = (*b)->get_oop(); ++ return ((oop1 > oop2) ? 1 : ((oop1 == oop2) ? 0 : -1)); ++} ++ ++// (DCEVM) Resoring the ciObject arrays after class redefinition ++void ciObjectFactory::resort_shared_ci_objects() { ++ _shared_ci_objects->sort(ciObjectFactory::compare_ciobjects); ++ ++#ifdef ASSERT ++ if (CIObjectFactoryVerify) { ++ oop last = NULL; ++ for (int j = 0; j < _shared_ci_objects->length(); j++) { ++ oop o = _shared_ci_objects->at(j)->get_oop(); ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++#endif // ASSERT ++} ++ +diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp +index 26cc2c3..d99d3d6 100644 +--- a/src/share/vm/ci/ciObjectFactory.hpp ++++ b/src/share/vm/ci/ciObjectFactory.hpp +@@ -88,6 +88,7 @@ private: + + ciInstance* get_unloaded_instance(ciInstanceKlass* klass); + ++ static int compare_ciobjects(ciObject** a, ciObject** b); + public: + static bool is_initialized() { return _initialized; } + +@@ -137,6 +138,8 @@ public: + + void print_contents(); + void print(); ++ ++ static void resort_shared_ci_objects(); + }; + + #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP +diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp +index 8d57bb1..8b7d5eb 100644 +--- a/src/share/vm/classfile/classFileParser.cpp ++++ b/src/share/vm/classfile/classFileParser.cpp +@@ -795,6 +795,7 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS) { + ClassFileStream* cfs = stream(); + assert(length > 0, "only called for length>0"); +@@ -813,6 +814,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + interface_index, CHECK_(nullHandle)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); ++ if (!old_klass.is_null() && !interf->is_newest_version()) { ++ interf = KlassHandle(THREAD, interf->newest_version()); ++ } + } else { + Symbol* unresolved_klass = cp->klass_name_at(interface_index); + +@@ -825,6 +829,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + interf = KlassHandle(THREAD, k); + } + +@@ -2921,8 +2928,10 @@ typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annota + instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, ++ GrowableArray<Symbol*>* parsed_super_symbols, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { +@@ -2948,7 +2957,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + + init_parsed_class_attributes(); + +- if (JvmtiExport::should_post_class_file_load_hook()) { ++ if (parsed_super_symbols == NULL && JvmtiExport::should_post_class_file_load_hook()) { + // Get the cached class file bytes (if any) from the class that + // is being redefined or retransformed. We use jvmti_thread_state() + // instead of JvmtiThreadState::state_for(jt) so we don't allocate +@@ -2971,10 +2980,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + unsigned char* ptr = cfs->buffer(); + unsigned char* end_ptr = cfs->buffer() + cfs->length(); + ++ bool pretend_new_universe = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, + &ptr, &end_ptr, + &cached_class_file_bytes, + &cached_class_file_length); ++ Thread::current()->set_pretend_new_universe(pretend_new_universe); + + if (ptr != cfs->buffer()) { + // JVMTI agent has modified class file data. +@@ -3090,6 +3102,30 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + CHECK_(nullHandle)); + } + ++ // (tw) Do not parse full class file, only get super symbols and return. ++ if (parsed_super_symbols != NULL) { ++ u2 super_class_index = cfs->get_u2_fast(); ++ ++ if (super_class_index != 0) { ++ parsed_super_symbols->append(cp->klass_name_at(super_class_index)); ++ } ++ ++ // Interfaces ++ u2 itfs_len = cfs->get_u2_fast(); ++ objArrayHandle local_interfaces; ++ if (itfs_len == 0) { ++ local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); ++ } else { ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_NULL); ++ } ++ ++ for (int i=0; i<local_interfaces->length(); i++) { ++ oop o = local_interfaces->obj_at(i); ++ parsed_super_symbols->append(((klassOop)o)->klass_part()->name()); ++ } ++ return NULL; ++ } ++ + klassOop preserve_this_klass; // for storing result across HandleMark + + // release all handles when parsing is done +@@ -3130,7 +3166,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { +- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); ++ klassOop resolved_klass = cp->resolved_klass_at(super_class_index); ++ if (!old_klass.is_null()) { ++ resolved_klass = resolved_klass->klass_part()->newest_version(); ++ } ++ super_klass = instanceKlassHandle(THREAD, resolved_klass); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { +@@ -3148,7 +3188,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + if (itfs_len == 0) { + local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); + } else { +- local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle)); ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_(nullHandle)); + } + + u2 java_fields_count = 0; +@@ -3202,7 +3242,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + protection_domain, + true, + CHECK_(nullHandle)); +- ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + KlassHandle kh (THREAD, k); + super_klass = instanceKlassHandle(THREAD, kh()); + } +@@ -3591,6 +3633,19 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + rt = REF_NONE; + } else { + rt = super_klass->reference_type(); ++ ++ // (tw) With class redefinition, it can also happen that special classes are loaded. ++ if (name == vmSymbols::java_lang_ref_Reference()) { ++ rt = REF_OTHER; ++ } else if (name == vmSymbols::java_lang_ref_SoftReference()) { ++ rt = REF_SOFT; ++ } else if (name == vmSymbols::java_lang_ref_WeakReference()) { ++ rt = REF_WEAK; ++ } else if (name == vmSymbols::java_lang_ref_FinalReference()) { ++ rt = REF_FINAL; ++ } else if (name == vmSymbols::java_lang_ref_PhantomReference()) { ++ rt = REF_PHANTOM; ++ } + } + + // We can now create the basic klassOop for this klass +@@ -3599,6 +3654,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + total_oop_map_count, + access_flags, + rt, host_klass, ++ old_klass, + CHECK_(nullHandle)); + instanceKlassHandle this_klass (THREAD, ik); + +@@ -3691,7 +3747,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts); + + // Fill in has_finalizer, has_vanilla_constructor, and layout_helper +- set_precomputed_flags(this_klass); ++ set_precomputed_flags(this_klass, old_klass); + + // reinitialize modifiers, using the InnerClasses attribute + int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle)); +@@ -3711,6 +3767,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + check_illegal_static_method(this_klass, CHECK_(nullHandle)); + } + ++ if (rt == REF_OTHER) { ++ instanceRefKlass::update_nonstatic_oop_maps(ik); ++ } ++ + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + +@@ -3856,7 +3916,7 @@ void ClassFileParser::fill_oop_maps(instanceKlassHandle k, + } + + +-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { ++void ClassFileParser::set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass) { + klassOop super = k->super(); + + // Check if this klass has an empty finalize method (i.e. one with return bytecode only), +@@ -3864,7 +3924,9 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + if (!_has_empty_finalizer) { + if (_has_finalizer || + (super != NULL && super->klass_part()->has_finalizer())) { +- k->set_has_finalizer(); ++ if (old_klass.is_null() || old_klass->has_finalizer()) { ++ k->set_has_finalizer(); ++ } + } + } + +@@ -3880,7 +3942,7 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + + // Check if this klass supports the java.lang.Cloneable interface + if (SystemDictionary::Cloneable_klass_loaded()) { +- if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) { ++ if (k->is_subtype_of(SystemDictionary::Cloneable_klass()) || k->is_subtype_of(SystemDictionary::Cloneable_klass()->klass_part()->newest_version())) { + k->set_is_cloneable(); + } + } +diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp +index 314ec5e..5fca1da 100644 +--- a/src/share/vm/classfile/classFileParser.hpp ++++ b/src/share/vm/classfile/classFileParser.hpp +@@ -151,6 +151,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS); + + // Field parsing +@@ -237,7 +238,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + unsigned int nonstatic_oop_map_count, + int* nonstatic_oop_offsets, + unsigned int* nonstatic_oop_counts); +- void set_precomputed_flags(instanceKlassHandle k); ++ void set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass); + objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, + objArrayHandle local_ifs, TRAPS); + +@@ -349,17 +350,20 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { + KlassHandle no_host_klass; +- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD); ++ return parseClassFile(name, class_loader, protection_domain, old_klass, no_host_klass, NULL, NULL, parsed_name, verify, THREAD); + } + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, ++ GrowableArray<Symbol*>* parsed_super_symbols, + TempNewSymbol& parsed_name, + bool verify, + TRAPS); +diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp +index a2e61a4..450e19f 100644 +--- a/src/share/vm/classfile/classLoader.cpp ++++ b/src/share/vm/classfile/classLoader.cpp +@@ -915,6 +915,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { + instanceKlassHandle result = parser.parseClassFile(h_name, + class_loader, + protection_domain, ++ KlassHandle(), + parsed_name, + false, + CHECK_(h)); +diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp +index 78e76cc..d167c98 100644 +--- a/src/share/vm/classfile/dictionary.cpp ++++ b/src/share/vm/classfile/dictionary.cpp +@@ -144,87 +144,10 @@ bool Dictionary::do_unloading(BoolObjectClosure* is_alive) { + probe = *p; + klassOop e = probe->klass(); + oop class_loader = probe->loader(); +- + instanceKlass* ik = instanceKlass::cast(e); +- if (ik->previous_versions() != NULL) { +- // This klass has previous versions so see what we can cleanup +- // while it is safe to do so. +- +- int gc_count = 0; // leave debugging breadcrumbs +- int live_count = 0; +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000200, ("unload: %s: previous version length=%d", +- ik->external_name(), ik->previous_versions()->length())); +- +- for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) { +- // check the previous versions array for GC'ed weak refs +- PreviousVersionNode * pv_node = ik->previous_versions()->at(i); +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- ik->previous_versions()->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop pvcp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (pvcp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- ik->previous_versions()->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- gc_count++; +- continue; +- } else { +- RC_TRACE(0x00000200, ("unload: previous version @%d is alive", i)); +- if (is_alive->do_object_b(pvcp)) { +- live_count++; +- } else { +- guarantee(false, "sanity check"); +- } +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs != NULL) { +- RC_TRACE(0x00000200, ("unload: previous methods length=%d", +- method_refs->length())); +- for (int j = method_refs->length() - 1; j >= 0; j--) { +- jweak method_ref = method_refs->at(j); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this method entry has been GC'ed so remove it +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(j); +- } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000200, +- ("unload: %s(%s): prev method @%d in version @%d is alive", +- method->name()->as_C_string(), +- method->signature()->as_C_string(), j, i)); +- } +- } +- } +- } +- assert(ik->previous_versions()->length() == live_count, "sanity check"); +- RC_TRACE(0x00000200, +- ("unload: previous version stats: live=%d, GC'ed=%d", live_count, +- gc_count)); +- } +- ++ + // Non-unloadable classes were handled in always_strong_oops_do +- if (!is_strongly_reachable(class_loader, e)) { ++ if (!ik->is_redefining() && !is_strongly_reachable(class_loader, e)) { + // Entry was not visited in phase1 (negated test from phase1) + assert(class_loader != NULL, "unloading entry with null class loader"); + oop k_def_class_loader = ik->class_loader(); +@@ -326,6 +249,7 @@ void Dictionary::classes_do(void f(klassOop)) { + } + } + ++ + // Added for initialize_itable_for_klass to handle exceptions + // Just the classes from defining class loaders + void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) { +@@ -433,6 +357,33 @@ void Dictionary::add_klass(Symbol* class_name, Handle class_loader, + add_entry(index, entry); + } + ++// (tw) Updates the klass entry to point to the new klassOop. Necessary only for class redefinition. ++bool Dictionary::update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class) { ++ ++ // There are several entries for the same class in the dictionary: One extra entry for each parent classloader of the classloader of the class. ++ bool found = false; ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass() == old_class()) { ++ entry->set_literal(k()); ++ found = true; ++ } ++ } ++ } ++ ++ return found; ++} ++ ++// (tw) Undo previous updates to the system dictionary ++void Dictionary::rollback_redefinition() { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass()->klass_part()->is_redefining()) { ++ entry->set_literal(entry->klass()->klass_part()->old_version()); ++ } ++ } ++ } ++} + + // This routine does not lock the system dictionary. + // +@@ -459,12 +410,21 @@ DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash, + return NULL; + } + ++klassOop Dictionary::intercept_for_version(klassOop k) { ++ if (k == NULL) return k; ++ ++ if (k->klass_part()->is_redefining() && !Thread::current()->pretend_new_universe()) { ++ return k->klass_part()->old_version(); ++ } ++ ++ return k; ++} + + klassOop Dictionary::find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS) { + DictionaryEntry* entry = get_entry(index, hash, name, loader); + if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { +- return entry->klass(); ++ return intercept_for_version(entry->klass()); + } else { + return NULL; + } +@@ -477,7 +437,7 @@ klassOop Dictionary::find_class(int index, unsigned int hash, + assert (index == index_for(name, loader), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, loader); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +@@ -489,7 +449,7 @@ klassOop Dictionary::find_shared_class(int index, unsigned int hash, + assert (index == index_for(name, Handle()), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, Handle()); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp +index bd33760..ea1fe3c 100644 +--- a/src/share/vm/classfile/dictionary.hpp ++++ b/src/share/vm/classfile/dictionary.hpp +@@ -73,6 +73,10 @@ public: + + void add_klass(Symbol* class_name, Handle class_loader,KlassHandle obj); + ++ bool update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class); ++ ++ void rollback_redefinition(); ++ + klassOop find_class(int index, unsigned int hash, + Symbol* name, Handle loader); + +@@ -105,6 +109,7 @@ public: + bool do_unloading(BoolObjectClosure* is_alive); + + // Protection domains ++ static klassOop intercept_for_version(klassOop k); + klassOop find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS); + bool is_valid_protection_domain(int index, unsigned int hash, +diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp +index f8b10b3..c417a29 100644 +--- a/src/share/vm/classfile/javaClasses.cpp ++++ b/src/share/vm/classfile/javaClasses.cpp +@@ -621,6 +621,10 @@ klassOop java_lang_Class::as_klassOop(oop java_class) { + assert(java_lang_Class::is_instance(java_class), "must be a Class object"); + klassOop k = klassOop(java_class->obj_field(_klass_offset)); + assert(k == NULL || k->is_klass(), "type check"); ++ // Necessary to make old verifier work. ++ if (Thread::current()->pretend_new_universe()) { ++ k = k->klass_part()->newest_version(); ++ } + return k; + } + +@@ -1541,6 +1545,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle met + skip_throwableInit_check = true; + } + } ++ method = method->newest_version(); + if (method->is_hidden()) { + if (skip_hidden) continue; + } +diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp +index b741cfa..5412831 100644 +--- a/src/share/vm/classfile/javaClasses.hpp ++++ b/src/share/vm/classfile/javaClasses.hpp +@@ -213,6 +213,7 @@ class java_lang_String : AllStatic { + + class java_lang_Class : AllStatic { + friend class VMStructs; ++ friend class VM_RedefineClasses; + + private: + // The fake offsets are added by the class loader when java.lang.Class is loaded +@@ -248,7 +249,7 @@ class java_lang_Class : AllStatic { + static void print_signature(oop java_class, outputStream *st); + // Testing + static bool is_instance(oop obj) { +- return obj != NULL && obj->klass() == SystemDictionary::Class_klass(); ++ return obj != NULL && (obj->klass()->klass_part()->newest_version() == SystemDictionary::Class_klass()->klass_part()->newest_version()); + } + static bool is_primitive(oop java_class); + static BasicType primitive_type(oop java_class); +diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfile/loaderConstraints.cpp +index 8650cd9..965cce2 100644 +--- a/src/share/vm/classfile/loaderConstraints.cpp ++++ b/src/share/vm/classfile/loaderConstraints.cpp +@@ -449,7 +449,7 @@ void LoaderConstraintTable::verify(Dictionary* dictionary, + if (k != NULL) { + // We found the class in the system dictionary, so we should + // make sure that the klassOop matches what we already have. +- guarantee(k == probe->klass(), "klass should be in dictionary"); ++ guarantee(k == probe->klass()->klass_part()->newest_version(), "klass should be in dictionary"); + } else { + // If we don't find the class in the system dictionary, it + // has to be in the placeholders table. +diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp +index 899153a..3f64268 100644 +--- a/src/share/vm/classfile/systemDictionary.cpp ++++ b/src/share/vm/classfile/systemDictionary.cpp +@@ -157,6 +157,7 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, Handle class_load + // can return a null klass + klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); + } ++ assert(klass == NULL || klass->klass_part()->is_newest_version() || klass->klass_part()->newest_version()->klass_part()->is_redefining(), "must be"); + return klass; + } + +@@ -199,7 +200,7 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), ++ assert(!THREAD->is_Compiler_thread() || JvmtiThreadState::state_for(JavaThread::current())->get_class_being_redefined() != NULL, + err_msg("can not load classes with compiler thread: class=%s, classloader=%s", + class_name->as_C_string(), + class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); +@@ -961,8 +962,10 @@ klassOop SystemDictionary::parse_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ KlassHandle(), + host_klass, + cp_patches, ++ NULL, + parsed_name, + true, + THREAD); +@@ -1022,7 +1025,15 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + Handle protection_domain, + ClassFileStream* st, + bool verify, ++ KlassHandle old_class, + TRAPS) { ++ ++ bool redefine_classes_locked = false; ++ if (!Thread::current()->redefine_classes_mutex()->owned_by_self()) { ++ Thread::current()->redefine_classes_mutex()->lock(); ++ redefine_classes_locked = true; ++ } ++ + // Classloaders that support parallelism, e.g. bootstrap classloader, + // or all classloaders with UnsyncloadClass do not acquire lock here + bool DoObjectLock = true; +@@ -1050,9 +1061,14 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ old_class, + parsed_name, + verify, + THREAD); ++ if (!old_class.is_null() && !k.is_null()) { ++ k->set_redefining(true); ++ k->set_old_version(old_class()); ++ } + + const char* pkg = "java/"; + if (!HAS_PENDING_EXCEPTION && +@@ -1087,13 +1103,18 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + // Add class just loaded + // If a class loader supports parallel classloading handle parallel define requests + // find_or_define_instance_class may return a different instanceKlass +- if (is_parallelCapable(class_loader)) { ++ // (tw) TODO: for class redefinition the parallel version does not work, check if this is a problem? ++ if (is_parallelCapable(class_loader) && old_class.is_null()) { + k = find_or_define_instance_class(class_name, class_loader, k, THREAD); + } else { +- define_instance_class(k, THREAD); ++ define_instance_class(k, old_class, THREAD); + } + } + ++ if (redefine_classes_locked) { ++ Thread::current()->redefine_classes_mutex()->unlock(); ++ } ++ + // If parsing the class file or define_instance_class failed, we + // need to remove the placeholder added on our behalf. But we + // must make sure parsed_name is valid first (it won't be if we had +@@ -1122,7 +1143,7 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + MutexLocker mu(SystemDictionary_lock, THREAD); + + klassOop check = find_class(parsed_name, class_loader); +- assert(check == k(), "should be present in the dictionary"); ++ assert((check == k() && !k->is_redefining()) || (k->is_redefining() && check == k->old_version()), "should be present in the dictionary"); + + klassOop check2 = find_class(h_name, h_loader); + assert(check == check2, "name inconsistancy in SystemDictionary"); +@@ -1349,7 +1370,11 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha + } + } + +-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ++void SystemDictionary::rollback_redefinition() { ++ dictionary()->rollback_redefinition(); ++} ++ ++void SystemDictionary::define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS) { + + Handle class_loader_h(THREAD, k->class_loader()); + +@@ -1376,13 +1401,23 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + Symbol* name_h = k->name(); + unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h); + int d_index = dictionary()->hash_to_index(d_hash); +- check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK); ++ ++ // (tw) Update version of the klassOop in the system dictionary ++ // TODO: Check for thread safety! ++ if (!old_class.is_null()) { ++ bool ok = dictionary()->update_klass(d_index, d_hash, name_h, class_loader_h, k, old_class); ++ assert (ok, "must have found old class and updated!"); ++ } ++ check_constraints(d_index, d_hash, k, class_loader_h, old_class.is_null(), CHECK); ++ ++ if(!old_class.is_null() && TraceRedefineClasses >= 3){ tty->print_cr("Class has been updated!"); } + + // Register class just loaded with class loader (placed in Vector) + // Note we do this before updating the dictionary, as this can + // fail with an OutOfMemoryError (if it does, we will *not* put this + // class in the dictionary and will not update the class hierarchy). +- if (k->class_loader() != NULL) { ++ // (tw) Only register if not redefining a class. ++ if (k->class_loader() != NULL && old_class.is_null()) { + methodHandle m(THREAD, Universe::loader_addClass_method()); + JavaValue result(T_VOID); + JavaCallArguments args(class_loader_h); +@@ -1408,8 +1443,9 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + } + k->eager_initialize(THREAD); + ++ // (tw) Only notify jvmti if not redefining a class. + // notify jvmti +- if (JvmtiExport::should_post_class_load()) { ++ if (JvmtiExport::should_post_class_load() && old_class.is_null()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + +@@ -1482,7 +1518,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* clas + } + } + +- define_instance_class(k, THREAD); ++ define_instance_class(k, KlassHandle(), THREAD); + + Handle linkage_exception = Handle(); // null handle + +@@ -1612,6 +1648,14 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) { + Universe::flush_dependents_on(k); + } + ++// (tw) Remove from hierarchy - Undo add_to_hierarchy. ++void SystemDictionary::remove_from_hierarchy(instanceKlassHandle k) { ++ assert(k.not_null(), "just checking"); ++ ++ k->remove_from_sibling_list(); ++ ++ // TODO (tw): Remove from interfaces. ++} + + // ---------------------------------------------------------------------------- + // GC support +@@ -1869,9 +1913,12 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) { + + // Preload ref klasses and set reference types + instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER); +- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); ++ ++ // (tw) This is now done in parseClassFile in order to support class redefinition ++ // instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK); ++ // TODO(tw): Check that the following is also not necessary? + instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT); + instanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK); + instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL); +@@ -1955,7 +2002,7 @@ void SystemDictionary::check_constraints(int d_index, unsigned int d_hash, + // also holds array classes + + assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary"); +- if ((defining == true) || (k() != check)) { ++ if ((defining == true) && ((k() != check) && k->old_version() != check)) { + linkage_error = "loader (instance of %s): attempted duplicate class " + "definition for name: \"%s\""; + } else { +@@ -2602,8 +2649,10 @@ void SystemDictionary::verify_obj_klass_present(Handle obj, + name = find_placeholder(class_name, class_loader); + } + } +- guarantee(probe != NULL || name != NULL, +- "Loaded klasses should be in SystemDictionary"); ++ // (tw) Relaxed assertion to allow different class versions. Also allow redefining classes lie around (because of rollback). ++ guarantee(probe != NULL && ++ (!probe->is_klass() || (!((klassOop)(obj()))->klass_part()->is_redefining()) || ((klassOop)probe)->klass_part()->is_same_or_older_version((klassOop)(obj()))) || ((klassOop)(obj()))->klass_part()->is_redefining(), ++ "Loaded klasses should be in SystemDictionary"); + } + + // utility function for posting class load event +diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp +index adf82e5..00cf392 100644 +--- a/src/share/vm/classfile/systemDictionary.hpp ++++ b/src/share/vm/classfile/systemDictionary.hpp +@@ -268,7 +268,7 @@ public: + // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) + static klassOop resolve_from_stream(Symbol* class_name, Handle class_loader, + Handle protection_domain, +- ClassFileStream* st, bool verify, TRAPS); ++ ClassFileStream* st, bool verify, KlassHandle old_class, TRAPS); + + // Lookup an already loaded class. If not found NULL is returned. + static klassOop find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); +@@ -343,6 +343,8 @@ public: + // System loader lock + static oop system_loader_lock() { return _system_loader_lock_obj; } + ++ // Remove link to hierarchy ++ static void remove_from_hierarchy(instanceKlassHandle k); + private: + // Traverses preloaded oops: various system classes. These are + // guaranteed to be in the perm gen. +@@ -415,6 +417,8 @@ public: + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); + } + ++ static void rollback_redefinition(); ++ + public: + #define WK_KLASS_DECLARE(name, symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } +@@ -596,7 +600,7 @@ private: + // after waiting, but before reentering SystemDictionary_lock + // to preserve lock order semantics. + static void double_lock_wait(Handle lockObject, TRAPS); +- static void define_instance_class(instanceKlassHandle k, TRAPS); ++ static void define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS); + static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, + Handle class_loader, + instanceKlassHandle k, TRAPS); +diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp +index da188bb..097c50c 100644 +--- a/src/share/vm/classfile/verifier.cpp ++++ b/src/share/vm/classfile/verifier.cpp +@@ -106,7 +106,7 @@ bool Verifier::relax_verify_for(oop loader) { + return !need_verify; + } + +-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) { ++bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS) { + HandleMark hm; + ResourceMark rm(THREAD); + +@@ -117,6 +117,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + + const char* klassName = klass->external_name(); + bool can_failover = FailOverToOldVerifier && ++ may_use_old_verifier && + klass->major_version() < NOFAILOVER_MAJOR_VERSION; + + // If the class should be verified, first see if we can use the split +@@ -138,6 +139,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + tty->print_cr( + "Fail over class verification to old verifier for: %s", klassName); + } ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -145,6 +147,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + exception_message = split_verifier.exception_message(); + } + } else { ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -210,7 +213,7 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou + // NOTE: this is called too early in the bootstrapping process to be + // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. + (refl_magic_klass == NULL || +- !klass->is_subtype_of(refl_magic_klass) || ++ !(klass->is_subtype_of(refl_magic_klass) || klass->is_subtype_of(refl_magic_klass->klass_part()->newest_version())) || + VerifyReflectionBytecodes) + ); + } +@@ -517,7 +520,7 @@ void ErrorContext::stackmap_details(outputStream* ss, methodOop method) const { + + ClassVerifier::ClassVerifier( + instanceKlassHandle klass, TRAPS) +- : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass) { ++ : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass->newest_version()), _klass_to_verify(klass) { + _this_type = VerificationType::reference_type(klass->name()); + // Create list to hold symbols in reference area. + _symbols = new GrowableArray<Symbol*>(100, 0, NULL); +@@ -547,7 +550,7 @@ void ClassVerifier::verify_class(TRAPS) { + _klass->external_name()); + } + +- objArrayHandle methods(THREAD, _klass->methods()); ++ objArrayHandle methods(THREAD, _klass_to_verify->methods()); + int num_methods = methods->length(); + + for (int index = 0; index < num_methods; index++) { +@@ -2444,7 +2447,8 @@ void ClassVerifier::verify_invoke_instructions( + VerificationType stack_object_type = + current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); + if (current_type() != stack_object_type) { +- assert(cp->cache() == NULL, "not rewritten yet"); ++ // (tw) TODO: Check if relaxing the following assertion is correct. For class redefinition we might call the verifier twice. ++ //assert(cp->cache() == NULL, "not rewritten yet"); + Symbol* ref_class_name = + cp->klass_name_at(cp->klass_ref_index_at(index)); + // See the comments in verify_field_instructions() for +diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp +index 4457f4a..b1b96f2 100644 +--- a/src/share/vm/classfile/verifier.hpp ++++ b/src/share/vm/classfile/verifier.hpp +@@ -47,7 +47,7 @@ class Verifier : AllStatic { + * Otherwise, no exception is thrown and the return indicates the + * error. + */ +- static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS); ++ static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS); + + // Return false if the class is loaded by the bootstrap loader, + // or if defineClass was called requesting skipping verification +@@ -256,7 +256,10 @@ class ClassVerifier : public StackObj { + + ErrorContext _error_context; // contains information about an error + ++public: + void verify_method(methodHandle method, TRAPS); ++ ++private: + char* generate_code_data(methodHandle m, u4 code_length, TRAPS); + void verify_exception_handler_table(u4 code_length, char* code_data, + int& min, int& max, TRAPS); +@@ -329,6 +332,7 @@ class ClassVerifier : public StackObj { + + VerificationType object_type() const; + ++ instanceKlassHandle _klass_to_verify; + instanceKlassHandle _klass; // the class being verified + methodHandle _method; // current method being verified + VerificationType _this_type; // the verification type of the current class +diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp +index 21c9413..59f5f7e 100644 +--- a/src/share/vm/code/nmethod.cpp ++++ b/src/share/vm/code/nmethod.cpp +@@ -2074,15 +2074,14 @@ bool nmethod::is_evol_dependent_on(klassOop dependee) { + methodOop method = deps.method_argument(0); + for (int j = 0; j < dependee_methods->length(); j++) { + if ((methodOop) dependee_methods->obj_at(j) == method) { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x01000000, +- ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", ++ ResourceMark rm(Thread::current()); ++ TRACE_RC3("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", + _method->method_holder()->klass_part()->external_name(), + _method->name()->as_C_string(), + _method->signature()->as_C_string(), compile_id(), + method->method_holder()->klass_part()->external_name(), + method->name()->as_C_string(), +- method->signature()->as_C_string())); ++ method->signature()->as_C_string()); + if (TraceDependencies || LogCompilation) + deps.log_dependency(dependee); + return true; +diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp +index 0feca89..1c4b014 100644 +--- a/src/share/vm/compiler/compileBroker.cpp ++++ b/src/share/vm/compiler/compileBroker.cpp +@@ -1592,6 +1592,9 @@ void CompileBroker::compiler_thread_loop() { + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { ++ // (tw) Obtain a compilation lock. Class redefinition requires that there is no compilation in parallel. ++ thread->compilation_mutex()->lock(); ++ thread->set_should_bailout(false); + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + #ifdef COMPILER1 +@@ -1615,6 +1618,7 @@ void CompileBroker::compiler_thread_loop() { + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); + } ++ thread->compilation_mutex()->unlock(); + } + } + } +@@ -1780,7 +1784,11 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { + //assert(false, "compiler should always document failure"); + // The compiler elected, without comment, not to register a result. + // Do not attempt further compilations of this method. +- ci_env.record_method_not_compilable("compile failed", !TieredCompilation); ++ if (((CompilerThread *)Thread::current())->should_bailout()) { ++ ci_env.record_failure("compile externally aborted"); ++ } else { ++ ci_env.record_method_not_compilable("compile failed"); ++ } + } + + // Copy this bit to the enclosing block: +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +index b0c9ec8..7feadf9 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +@@ -162,6 +162,13 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, + } + } + ++ ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +@@ -2118,7 +2125,7 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const { + // Support for compaction + + void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); ++ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false); + // prepare_for_compaction() uses the space between live objects + // so that later phase can skip dead space quickly. So verification + // of the free lists doesn't work after. +@@ -2139,7 +2146,7 @@ void CompactibleFreeListSpace::adjust_pointers() { + } + + void CompactibleFreeListSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ SCAN_AND_COMPACT(obj_size, false); + } + + // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +index 3b7bb9a..de7e54b 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +@@ -149,6 +149,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp +index 29841d8..d1386c7 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp +@@ -32,6 +32,8 @@ + #include "oops/objArrayKlass.inline.hpp" + #include "oops/oop.inline.hpp" + ++GrowableArray<oop>* MarkSweep::_rescued_oops = NULL; ++ + Stack<oop, mtGC> MarkSweep::_marking_stack; + Stack<DataLayout*, mtGC> MarkSweep::_revisit_mdo_stack; + Stack<Klass*, mtGC> MarkSweep::_revisit_klass_stack; +@@ -357,3 +359,97 @@ void MarkSweep::trace(const char* msg) { + } + + #endif ++ ++// (tw) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ oop rescued_obj = _rescued_oops->at(i); ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ assert(rescued_obj->blueprint()->new_version() != NULL, "just checking"); ++ ++ if (rescued_obj->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ rescued_obj->set_klass_no_check(rescued_obj->blueprint()->new_version()); ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_obj, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (tw) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->blueprint()->new_version() != NULL, "class of old object must have new version"); ++ ++ klassOop old_klass_oop = q->klass(); ++ klassOop new_klass_oop = q->blueprint()->new_version(); ++ ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = instanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ oop tmp_obj = q; ++ ++ // Save object somewhere, there is an overlap in fields ++ if (new_klass_oop->klass_part()->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ tmp_obj->set_klass_no_check(new_klass_oop); ++ int *cur = new_klass_oop->klass_part()->update_information(); ++ assert(cur != NULL, "just checking"); ++ MarkSweep::update_fields(new_location, tmp_obj, cur); ++ ++ if (tmp_obj != q) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp_obj, size); ++ } ++} ++ ++void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) { ++ assert(cur != NULL, "just checking"); ++ char* to = (char*)new_location; ++ while (*cur != 0) { ++ int size = *cur; ++ if (size > 0) { ++ cur++; ++ int offset = *cur; ++ HeapWord* from = (HeapWord*)(((char *)tmp_obj) + offset); ++ if (size == HeapWordSize) { ++ *((HeapWord*)to) = *from; ++ } else if (size == HeapWordSize * 2) { ++ *((HeapWord*)to) = *from; ++ *(((HeapWord*)to) + 1) = *(from + 1); ++ } else { ++ Copy::conjoint_jbytes(from, to, size); ++ } ++ to += size; ++ cur++; ++ } else { ++ assert(size < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(to, skip, 0); ++ to += skip; ++ cur++; ++ } ++ } ++} +diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp +index eb8252c..40118db 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp +@@ -117,8 +117,12 @@ class MarkSweep : AllStatic { + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<oop>* _rescued_oops; ++ + // + // Vars + // +@@ -208,6 +212,9 @@ class MarkSweep : AllStatic { + template <class T> static inline void mark_and_push(T* p); + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); ++ static void update_fields(oop new_location, oop tmp_obj, int *cur); + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); +diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp +index 32c0bdb..448d673 100644 +--- a/src/share/vm/interpreter/interpreterRuntime.cpp ++++ b/src/share/vm/interpreter/interpreterRuntime.cpp +@@ -402,7 +402,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea + assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); + assert(h_exception->is_oop(), "just checking"); + // Check that exception is a subclass of Throwable, otherwise we have a VerifyError +- if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { ++ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()->klass_part()->newest_version())) && !(h_exception->is_a(SystemDictionary::Throwable_klass()))) { + if (ExitVMOnVerifyError) vm_exit(-1); + ShouldNotReachHere(); + } +diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp +index b17f405..1c96783 100644 +--- a/src/share/vm/interpreter/linkResolver.cpp ++++ b/src/share/vm/interpreter/linkResolver.cpp +@@ -153,8 +153,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass + // Klass resolution + + void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) { +- if (!Reflection::verify_class_access(ref_klass->as_klassOop(), +- sel_klass->as_klassOop(), ++ if (!Reflection::verify_class_access(ref_klass->as_klassOop()->klass_part()->newest_version(), ++ sel_klass->as_klassOop()->klass_part()->newest_version(), + true)) { + ResourceMark rm(THREAD); + Exceptions::fthrow( +@@ -338,7 +338,7 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass, + // We'll check for the method name first, as that's most likely + // to be false (so we'll short-circuit out of these tests). + if (sel_method->name() == vmSymbols::clone_name() && +- sel_klass() == SystemDictionary::Object_klass() && ++ sel_klass()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() && + resolved_klass->oop_is_array()) { + // We need to change "protected" to "public". + assert(flags.is_protected(), "clone not protected?"); +@@ -634,7 +634,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo + } + + // Final fields can only be accessed from its own class. +- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { ++ if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()->klass_part()->active_version() && sel_klass() != pool->pool_holder()) { + THROW(vmSymbols::java_lang_IllegalAccessError()); + } + +@@ -839,7 +839,7 @@ void LinkResolver::resolve_virtual_call(CallInfo& result, Handle recv, KlassHand + bool check_access, bool check_null_and_abstract, TRAPS) { + methodHandle resolved_method; + linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); +- runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK); ++ runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, current_klass, check_null_and_abstract, CHECK); + } + + // throws linktime exceptions +@@ -869,6 +869,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + KlassHandle resolved_klass, + Handle recv, + KlassHandle recv_klass, ++ KlassHandle current_klass, + bool check_null_and_abstract, + TRAPS) { + +@@ -917,6 +918,9 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + // recv_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + instanceKlass* inst = (instanceKlass*)recv_klass()->klass_part(); ++ ++ // (tw) Check that the receiver is a subtype of the holder of the resolved method. ++ assert(inst->is_subtype_of(resolved_method->method_holder()), "receiver and resolved method holder are inconsistent"); + selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); + } + } +diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp +index dfd74f9..6ca1b54 100644 +--- a/src/share/vm/interpreter/linkResolver.hpp ++++ b/src/share/vm/interpreter/linkResolver.hpp +@@ -110,7 +110,8 @@ class CallInfo: public LinkInfo { + // It does all necessary link-time checks & throws exceptions if necessary. + + class LinkResolver: AllStatic { +- private: ++private: ++ static void lookup_method (methodHandle& result, KlassHandle resolved_klass, Symbol* name, Symbol* signature, bool is_interface, KlassHandle current_klass, TRAPS); + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +@@ -133,7 +134,7 @@ class LinkResolver: AllStatic { + static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + + static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); +- static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); ++ static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, KlassHandle current_klass, bool check_null_and_abstract, TRAPS); + static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); + + static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); +diff --git a/src/share/vm/interpreter/oopMapCache.cpp b/src/share/vm/interpreter/oopMapCache.cpp +index 01d5753..6816b3a 100644 +--- a/src/share/vm/interpreter/oopMapCache.cpp ++++ b/src/share/vm/interpreter/oopMapCache.cpp +@@ -536,9 +536,9 @@ void OopMapCache::flush_obsolete_entries() { + if (!_array[i].is_empty() && _array[i].method()->is_old()) { + // Cache entry is occupied by an old redefined method and we don't want + // to pin it down so flush the entry. +- RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", ++ TRACE_RC3("flush: %s(%s): cached entry @%d", + _array[i].method()->name()->as_C_string(), +- _array[i].method()->signature()->as_C_string(), i)); ++ _array[i].method()->signature()->as_C_string(), i); + + _array[i].flush(); + } +diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp +index 76e18d8..6af7c14 100644 +--- a/src/share/vm/memory/genMarkSweep.cpp ++++ b/src/share/vm/memory/genMarkSweep.cpp +@@ -421,6 +421,7 @@ void GenMarkSweep::mark_sweep_phase4() { + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + Generation* pg = gch->perm_gen(); + +@@ -433,10 +434,14 @@ void GenMarkSweep::mark_sweep_phase4() { + + VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); + + VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); + ++ MarkSweep::copy_rescued_objects_back(); ++ + pg->post_compact(); // Shared spaces verification. + } +diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp +index b5f8e0e..856cfce 100644 +--- a/src/share/vm/memory/iterator.hpp ++++ b/src/share/vm/memory/iterator.hpp +@@ -101,6 +101,12 @@ class OopClosure : public Closure { + #endif + }; + ++class OopClosureNoHeader : public OopClosure { ++public: ++ // If "true", invoke on header klass field. ++ bool do_header() { return false; } // Note that this is non-virtual. ++}; ++ + // ObjectClosure is used for iterating through an object space + + class ObjectClosure : public Closure { +diff --git a/src/share/vm/memory/oopFactory.cpp b/src/share/vm/memory/oopFactory.cpp +index def88cc..016d7eb 100644 +--- a/src/share/vm/memory/oopFactory.cpp ++++ b/src/share/vm/memory/oopFactory.cpp +@@ -129,11 +129,11 @@ klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_ + unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS) { ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS) { + instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj()); + return ikk->allocate_instance_klass(name, vtable_len, itable_len, + static_field_size, nonstatic_oop_map_count, +- access_flags, rt, host_klass, CHECK_NULL); ++ access_flags, rt, host_klass, old_klass, CHECK_NULL); + } + + +diff --git a/src/share/vm/memory/oopFactory.hpp b/src/share/vm/memory/oopFactory.hpp +index e7e22d4..ce39ada 100644 +--- a/src/share/vm/memory/oopFactory.hpp ++++ b/src/share/vm/memory/oopFactory.hpp +@@ -80,7 +80,7 @@ class oopFactory: AllStatic { + unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS); ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS); + + // Methods + private: +diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp +index f97bc34..c8563b2 100644 +--- a/src/share/vm/memory/space.cpp ++++ b/src/share/vm/memory/space.cpp +@@ -378,9 +378,8 @@ void CompactibleSpace::clear(bool mangle_space) { + _compaction_top = bottom(); + } + +-HeapWord* CompactibleSpace::forward(oop q, size_t size, +- CompactPoint* cp, HeapWord* compact_top) { +- // q is alive ++// (tw) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) { + // First check if we should switch compaction space + assert(this == cp->space, "'this' should be current compaction space."); + size_t compaction_max_size = pointer_delta(end(), compact_top); +@@ -400,8 +399,15 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size, + compaction_max_size = pointer_delta(cp->space->end(), compact_top); + } + ++ return compact_top; ++} ++ ++HeapWord* CompactibleSpace::forward(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ compact_top = forward_compact_top(size, cp, compact_top); ++ + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -423,6 +429,58 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size, + return compact_top; + } + ++// Compute the forward sizes and leave out objects whose position could ++// possibly overlap other objects. ++HeapWord* CompactibleSpace::forward_with_rescue(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ size_t forward_size = size; ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL && oop(q)->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = forward_compact_top(forward_size, cp, compact_top); ++ ++ if (must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ MarkSweep::_rescued_oops->append(oop(q)); ++ return compact_top; ++ } ++ ++ return forward(q, forward_size, cp, compact_top); ++} ++ ++// Compute the forwarding addresses for the objects that need to be rescued. ++HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) { ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ oop q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size((HeapWord*)q); ++ ++ // (tw) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); ++ assert(compact_top <= end(), "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ return compact_top; ++} + + bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + HeapWord* q, size_t deadlength) { +@@ -444,12 +502,17 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + #define adjust_obj_size(s) s + + void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); ++ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false); + } + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true); ++ } + } + + void Space::adjust_pointers() { +@@ -490,6 +553,111 @@ void Space::adjust_pointers() { + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ if (heap->is_in_permanent(obj)) { ++ return -1; ++ } ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ // Only redefined objects can have the need to be rescued. ++ if (oop(old_obj)->blueprint()->new_version() == NULL) return false; ++ ++ if (old_obj->is_perm()) { ++ // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj. ++ return true; ++ } ++ ++ int new_size = old_obj->size_given_klass(oop(old_obj)->blueprint()->new_version()->klass_part()); ++ int original_size = old_obj->size(); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj); ++ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj); ++ if (old_in_tenured == new_in_tenured) { ++ // Rescue if object may overlap with a higher memory address. ++ bool overlap = (old_obj + original_size < new_obj + new_size); ++ if (old_in_tenured) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ } ++ return overlap; ++ ++ } else { ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ } ++ } ++} ++ ++oop CompactibleSpace::rescue(oop old_obj) { ++ assert(must_rescue(old_obj, old_obj->forwardee()), "do not call otherwise"); ++ ++ int size = old_obj->size(); ++ oop rescued_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)old_obj, (HeapWord*)rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { +@@ -500,7 +668,13 @@ void CompactibleSpace::adjust_pointers() { + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_COMPACT(obj_size, true) ++ } + } + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp +index ef2f2c6..ff95a8b 100644 +--- a/src/share/vm/memory/space.hpp ++++ b/src/share/vm/memory/space.hpp +@@ -445,6 +445,9 @@ public: + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ oop rescue(oop old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -474,6 +477,15 @@ public: + // accordingly". + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); ++ // (DCEVM) same as forwad, but can rescue objects. Invoked only during ++ // redefinition runs ++ HeapWord* forward_with_rescue(oop q, size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ ++ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top); ++ ++ // (tw) Compute new compact top without actually forwarding the object. ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } +@@ -504,7 +516,7 @@ protected: + size_t word_len); + }; + +-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ ++#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ +@@ -564,7 +576,17 @@ protected: + Prefetch::write(q, interval); \ + /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ + size_t size = block_size(q); \ +- compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ if (redefinition_run) { \ ++ compact_top = cp->space->forward_with_rescue(oop(q), size, \ ++ cp, compact_top); \ ++ if (q < first_dead && oop(q)->is_gc_marked()) { \ ++ /* Was moved (otherwise, forward would reset mark), \ ++ set first_dead to here */ \ ++ first_dead = q; \ ++ } \ ++ } else { \ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ } \ + q += size; \ + end_of_live = q; \ + } else { \ +@@ -613,6 +635,8 @@ protected: + } \ + } \ + \ ++ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \ ++ \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ +@@ -665,13 +689,8 @@ protected: + q += size; \ + } \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ This is funky. Using this to read the previously written \ +- * LiveRange. See also use below. */ \ +- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ +@@ -702,7 +721,7 @@ protected: + assert(q == t, "just checking"); \ + } + +-#define SCAN_AND_COMPACT(obj_size) { \ ++#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ +@@ -728,12 +747,8 @@ protected: + } \ + ) /* debug_only */ \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ Funky */ \ +- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ +@@ -752,13 +767,36 @@ protected: + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ ++ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \ ++ oop dest_obj = rescue(oop(q)); \ ++ debug_only(Copy::fill_to_words(q, size, 0)); \ ++ q += size; \ ++ continue; \ ++ } \ ++ \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ + compaction_top)); \ +- assert(q != compaction_top, "everything in this pass should be moving"); \ ++ assert(q != compaction_top || oop(q)->blueprint()->new_version() != NULL, \ ++ "everything in this pass should be moving"); \ ++ if (redefinition_run && oop(q)->blueprint()->new_version() != NULL) { \ ++ klassOop new_version = oop(q)->blueprint()->new_version(); \ ++ if (new_version->klass_part()->update_information() == NULL) { \ ++ Copy::aligned_conjoint_words(q, compaction_top, size); \ ++ oop(compaction_top)->set_klass_no_check(new_version); \ ++ } else { \ ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); \ ++ } \ ++ oop(compaction_top)->init_mark(); \ ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ ++ \ ++ debug_only(prev_q = q); \ ++ q += size; \ ++ continue; \ ++ } \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ +diff --git a/src/share/vm/memory/specialized_oop_closures.hpp b/src/share/vm/memory/specialized_oop_closures.hpp +index 4d7c50b..671787e 100644 +--- a/src/share/vm/memory/specialized_oop_closures.hpp ++++ b/src/share/vm/memory/specialized_oop_closures.hpp +@@ -37,6 +37,7 @@ + + // Forward declarations. + class OopClosure; ++class OopClosureNoHeader; + class OopsInGenClosure; + // DefNew + class ScanClosure; +@@ -74,6 +75,7 @@ class CMSInnerParMarkAndPushClosure; + #endif + + #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \ ++ f(OopClosureNoHeader,_v) \ + f(ScanClosure,_nv) \ + f(FastScanClosure,_nv) \ + f(FilteringClosure,_nv) +@@ -132,6 +134,7 @@ class CMSInnerParMarkAndPushClosure; + + #define ALL_PAR_OOP_ITERATE_CLOSURES(f) \ + f(OopClosure,_v) \ ++ f(OopClosureNoHeader,_v) \ + SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) + #endif // SERIALGC + +diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp +index 8ce17d9..fe37993 100644 +--- a/src/share/vm/memory/universe.cpp ++++ b/src/share/vm/memory/universe.cpp +@@ -100,6 +100,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + klassOop Universe::_boolArrayKlassObj = NULL; + klassOop Universe::_byteArrayKlassObj = NULL; +@@ -204,6 +206,42 @@ void Universe::system_classes_do(void f(klassOop)) { + f(systemObjArrayKlassObj()); + } + ++// (tw) This method should iterate all pointers that are not within heap objects. ++void Universe::root_oops_do(OopClosure *oopClosure) { ++ ++ class AlwaysTrueClosure: public BoolObjectClosure { ++ public: ++ void do_object(oop p) { ShouldNotReachHere(); } ++ bool do_object_b(oop p) { return true; } ++ }; ++ AlwaysTrueClosure always_true; ++ ++ Universe::oops_do(oopClosure); ++// ReferenceProcessor::oops_do(oopClosure); (tw) check why no longer there ++ JNIHandles::oops_do(oopClosure); // Global (strong) JNI handles ++ Threads::oops_do(oopClosure, NULL); ++ ObjectSynchronizer::oops_do(oopClosure); ++ FlatProfiler::oops_do(oopClosure); ++ JvmtiExport::oops_do(oopClosure); ++ ++ // Now adjust pointers in remaining weak roots. (All of which should ++ // have been cleared if they pointed to non-surviving objects.) ++ // Global (weak) JNI handles ++ JNIHandles::weak_oops_do(&always_true, oopClosure); ++ ++ CodeCache::oops_do(oopClosure); ++ StringTable::oops_do(oopClosure); ++ ++ // (tw) TODO: Check if this is correct? ++ //CodeCache::scavenge_root_nmethods_oops_do(oopClosure); ++ //Management::oops_do(oopClosure); ++ //ref_processor()->weak_oops_do(&oopClosure); ++ //PSScavenge::reference_processor()->weak_oops_do(&oopClosure); ++ ++ // SO_AllClasses ++ SystemDictionary::oops_do(oopClosure); ++} ++ + void Universe::oops_do(OopClosure* f, bool do_all) { + + f->do_oop((oop*) &_int_mirror); +@@ -1590,10 +1628,9 @@ void ActiveMethodOopsCache::add_previous_version(const methodOop method) { + } + + // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000100, +- ("add: %s(%s): adding prev version ref for cached method @%d", ++ TRACE_RC2("add: %s(%s): adding prev version ref for cached method @%d", + method->name()->as_C_string(), method->signature()->as_C_string(), +- _prev_methods->length())); ++ _prev_methods->length()); + + methodHandle method_h(method); + jweak method_ref = JNIHandles::make_weak_global(method_h); +@@ -1620,9 +1657,8 @@ void ActiveMethodOopsCache::add_previous_version(const methodOop method) { + JNIHandles::destroy_weak_global(method_ref); + _prev_methods->remove_at(i); + } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive", +- m->name()->as_C_string(), m->signature()->as_C_string(), i)); ++ TRACE_RC2("add: %s(%s): previous cached method @%d is alive", ++ m->name()->as_C_string(), m->signature()->as_C_string(), i); + } + } + } // end add_previous_version() +diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp +index da21a8b..676675e 100644 +--- a/src/share/vm/memory/universe.hpp ++++ b/src/share/vm/memory/universe.hpp +@@ -127,6 +127,8 @@ class Universe: AllStatic { + friend class SystemDictionary; + friend class VMStructs; + friend class CompactingPermGenGen; ++ friend class Space; ++ friend class ContiguousSpace; + friend class VM_PopulateDumpSharedSpace; + + friend jint universe_init(); +@@ -258,7 +260,18 @@ class Universe: AllStatic { + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { ++ return _is_redefining_gc_run; ++ } ++ ++ static void set_redefining_gc_run(bool b) { ++ _is_redefining_gc_run = b; ++ } ++ + // Known classes in the VM + static klassOop boolArrayKlassObj() { return _boolArrayKlassObj; } + static klassOop byteArrayKlassObj() { return _byteArrayKlassObj; } +@@ -403,6 +416,8 @@ class Universe: AllStatic { + + // Iteration + ++ static void root_oops_do(OopClosure *f); ++ + // Apply "f" to the addresses of all the direct heap pointers maintained + // as static fields of "Universe". + static void oops_do(OopClosure* f, bool do_all = false); +@@ -419,6 +434,7 @@ class Universe: AllStatic { + + // Debugging + static bool verify_in_progress() { return _verify_in_progress; } ++ static void set_verify_in_progress(bool b) { _verify_in_progress = b; } + static void verify(bool silent, VerifyOption option); + static void verify(bool silent) { + verify(silent, VerifyOption_Default /* option */); +diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp +index ad62921..1cd422c 100644 +--- a/src/share/vm/oops/cpCacheOop.cpp ++++ b/src/share/vm/oops/cpCacheOop.cpp +@@ -40,6 +40,11 @@ + void ConstantPoolCacheEntry::initialize_entry(int index) { + assert(0 < index && index < 0x10000, "sanity check"); + _indices = index; ++// (DCEVM) Should put something else to force JVM to fail if these invalid entries are accessed! ++//#ifdef ASSERT ++ _f1 = NULL; ++ _f2 = 0; ++//#endif + assert(constant_pool_index() == index, ""); + } + +@@ -162,7 +167,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + int vtable_index) { + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); +- assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); ++ // (tw) No longer valid assert ++ //assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); + + int byte_no = -1; + bool change_to_virtual = false; +@@ -516,116 +522,6 @@ void ConstantPoolCacheEntry::update_pointers() { + } + #endif // SERIALGC + +-// RedefineClasses() API support: +-// If this constantPoolCacheEntry refers to old_method then update it +-// to refer to new_method. +-bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, +- methodOop new_method, bool * trace_name_printed) { +- +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- if (f2_as_vfinal_method() == old_method) { +- // match old_method so need an update +- // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values +- _f2 = (intptr_t)new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- +- return true; +- } +- +- // f1() is not used with virtual entries so bail out +- return false; +- } +- +- if ((oop)_f1 == NULL) { +- // NULL f1() means this is a virtual entry so bail out +- // We are assuming that the vtable index does not need change. +- return false; +- } +- +- if ((oop)_f1 == old_method) { +- _f1 = new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- +- return true; +- } +- +- return false; +-} +- +-// a constant pool cache entry should never contain old or obsolete methods +-bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- methodOop m = (methodOop)_f2; +- // Return false if _f2 refers to an old or an obsolete method. +- // _f2 == NULL || !m->is_method() are just as unexpected here. +- return (m != NULL && m->is_method() && !m->is_old() && !m->is_obsolete()); +- } else if ((oop)_f1 == NULL || !((oop)_f1)->is_method()) { +- // _f1 == NULL || !_f1->is_method() are OK here +- return true; +- } +- +- methodOop m = (methodOop)_f1; +- // return false if _f1 refers to an old or an obsolete method +- return (!m->is_old() && !m->is_obsolete()); +-} +- +-bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { +- if (!is_method_entry()) { +- // not a method entry so not interesting by default +- return false; +- } +- +- methodOop m = NULL; +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- m = f2_as_vfinal_method(); +- } else if (is_f1_null()) { +- // NULL _f1 means this is a virtual entry so also not interesting +- return false; +- } else { +- oop f1 = _f1; // _f1 is volatile +- if (!f1->is_method()) { +- // _f1 can also contain a klassOop for an interface +- return false; +- } +- m = f1_as_method(); +- } +- +- assert(m != NULL && m->is_method(), "sanity check"); +- if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) { +- // robustness for above sanity checks or method is not in +- // the interesting class +- return false; +- } +- +- // the method is in the interesting class so the entry is interesting +- return true; +-} +- + void ConstantPoolCacheEntry::print(outputStream* st, int index) const { + // print separator + if (index == 0) st->print_cr(" -------------"); +@@ -663,60 +559,10 @@ void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { + } + } + +-// RedefineClasses() API support: +-// If any entry of this constantPoolCache points to any of +-// old_methods, replace it with the corresponding new_method. +-void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed) { +- +- if (methods_length == 0) { +- // nothing to do if there are no methods +- return; +- } +- +- // get shorthand for the interesting class +- klassOop old_holder = old_methods[0]->method_holder(); ++void constantPoolCacheOopDesc::adjust_entries() { + + for (int i = 0; i < length(); i++) { +- if (!entry_at(i)->is_interesting_method_entry(old_holder)) { +- // skip uninteresting methods +- continue; +- } +- +- // The constantPoolCache contains entries for several different +- // things, but we only care about methods. In fact, we only care +- // about methods in the same class as the one that contains the +- // old_methods. At this point, we have an interesting entry. +- +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; +- methodOop new_method = new_methods[j]; +- +- if (entry_at(i)->adjust_method_entry(old_method, new_method, +- trace_name_printed)) { +- // current old_method matched this entry and we updated it so +- // break out and get to the next interesting entry if there one +- break; +- } +- } ++ entry_at(i)->initialize_entry(entry_at(i)->constant_pool_index()); + } + } + +-// the constant pool cache should never contain old or obsolete methods +-bool constantPoolCacheOopDesc::check_no_old_or_obsolete_entries() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL) && +- !entry_at(i)->check_no_old_or_obsolete_entries()) { +- return false; +- } +- } +- return true; +-} +- +-void constantPoolCacheOopDesc::dump_cache() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL)) { +- entry_at(i)->print(tty, i); +- } +- } +-} +diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp +index ef26775..a270d0d 100644 +--- a/src/share/vm/oops/cpCacheOop.hpp ++++ b/src/share/vm/oops/cpCacheOop.hpp +@@ -355,17 +355,6 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + + void update_pointers(); + +- // RedefineClasses() API support: +- // If this constantPoolCacheEntry refers to old_method then update it +- // to refer to new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- bool adjust_method_entry(methodOop old_method, methodOop new_method, +- bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- bool is_interesting_method_entry(klassOop k); +- + // Debugging & Printing + void print (outputStream* st, int index) const; + void verify(outputStream* st) const; +@@ -485,16 +474,8 @@ class constantPoolCacheOopDesc: public oopDesc { + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); + } + +- // RedefineClasses() API support: +- // If any entry of this constantPoolCache points to any of +- // old_methods, replace it with the corresponding new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- void adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- void dump_cache(); ++ // (tw) Clear references to methods and fields from this cache. ++ void adjust_entries(); + }; + + #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP +diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp +index cd3dce0..666ffdf 100644 +--- a/src/share/vm/oops/instanceKlass.cpp ++++ b/src/share/vm/oops/instanceKlass.cpp +@@ -255,7 +255,7 @@ bool instanceKlass::verify_code( + // 1) Verify the bytecodes + Verifier::Mode mode = + throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; +- return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); ++ return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), true, CHECK_false); + } + + +@@ -362,7 +362,13 @@ bool instanceKlass::link_class_impl( + jt->get_thread_stat()->perf_recursion_counts_addr(), + jt->get_thread_stat()->perf_timers_addr(), + PerfClassTraceTime::CLASS_VERIFY); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(true); ++ } + bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(false); ++ } + if (!verify_ok) { + return false; + } +@@ -400,7 +406,8 @@ bool instanceKlass::link_class_impl( + } + #endif + this_oop->set_init_state(linked); +- if (JvmtiExport::should_post_class_prepare()) { ++ // (tw) Must check for old version in order to prevent infinite loops. ++ if (JvmtiExport::should_post_class_prepare() && this_oop->old_version() == NULL /* JVMTI deadlock otherwise */) { + Thread *thread = THREAD; + assert(thread->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); +@@ -454,7 +461,9 @@ void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { + // If we were to use wait() instead of waitInterruptibly() then + // we might end up throwing IE from link/symbol resolution sites + // that aren't expected to throw. This would wreak havoc. See 6320309. +- while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { ++ // (tw) Wait also for the old class version to be fully initialized. ++ while((this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) ++ || (this_oop->old_version() != NULL && ((instanceKlass*)this_oop->old_version()->klass_part())->is_being_initialized())) { + wait = true; + ol.waitUninterruptibly(CHECK); + } +@@ -673,6 +682,18 @@ bool instanceKlass::implements_interface(klassOop k) const { + return false; + } + ++bool instanceKlass::implements_interface_any_version(klassOop k) const { ++ k = k->klass_part()->newest_version(); ++ if (this->newest_version() == k) return true; ++ assert(Klass::cast(k)->is_interface(), "should be an interface class"); ++ for (int i = 0; i < transitive_interfaces()->length(); i++) { ++ if (((klassOop)transitive_interfaces()->obj_at(i))->klass_part()->newest_version() == k) { ++ return true; ++ } ++ } ++ return false; ++} ++ + objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { + if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + if (length > arrayOopDesc::max_array_length(T_OBJECT)) { +@@ -949,6 +970,18 @@ void instanceKlass::methods_do(void f(methodOop method)) { + } + } + ++void instanceKlass::store_update_information(GrowableArray<int> &values) { ++ int *arr = NEW_C_HEAP_ARRAY(int, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_update_information(arr); ++} ++ ++void instanceKlass::clear_update_information() { ++ FREE_C_HEAP_ARRAY(int, update_information(), mtClass); ++ set_update_information(NULL); ++} + + void instanceKlass::do_local_static_fields(FieldClosure* cl) { + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { +@@ -1368,6 +1401,20 @@ jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { + return id; + } + ++bool instanceKlass::update_jmethod_id(methodOop method, jmethodID newMethodID) { ++ size_t idnum = (size_t)method->method_idnum(); ++ jmethodID* jmeths = methods_jmethod_ids_acquire(); ++ size_t length; // length assigned as debugging crumb ++ jmethodID id = NULL; ++ if (jmeths != NULL && // If there is a cache ++ (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, ++ jmeths[idnum+1] = newMethodID; // Set the id (may be NULL) ++ return true; ++ } ++ ++ return false; ++} ++ + + // Cache an itable index + void instanceKlass::set_cached_itable_index(size_t idnum, int index) { +@@ -1527,6 +1574,13 @@ void instanceKlass::remove_dependent_nmethod(nmethod* nm) { + last = b; + b = b->next(); + } ++ ++ // (tw) Hack as dependencies get wrong version of klassOop ++ if(this->old_version() != NULL) { ++ ((instanceKlass *)this->old_version()->klass_part())->remove_dependent_nmethod(nm); ++ return; ++ } ++ + #ifdef ASSERT + tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); + nm->print(); +@@ -1922,16 +1976,6 @@ void instanceKlass::release_C_heap_structures() { + assert(breakpoints() == 0x0, "should have cleared breakpoints"); + } + +- // deallocate information about previous versions +- if (_previous_versions != NULL) { +- for (int i = _previous_versions->length() - 1; i >= 0; i--) { +- PreviousVersionNode * pv_node = _previous_versions->at(i); +- delete pv_node; +- } +- delete _previous_versions; +- _previous_versions = NULL; +- } +- + // deallocate the cached class file + if (_cached_class_file_bytes != NULL) { + os::free(_cached_class_file_bytes, mtClass); +@@ -2545,275 +2589,10 @@ void instanceKlass::set_init_state(ClassState state) { + } + #endif + +- +-// RedefineClasses() support for previous versions: +- +-// Add an information node that contains weak references to the +-// interesting parts of the previous version of the_class. +-// This is also where we clean out any unused weak references. +-// Note that while we delete nodes from the _previous_versions +-// array, we never delete the array itself until the klass is +-// unloaded. The has_been_redefined() query depends on that fact. +-// +-void instanceKlass::add_previous_version(instanceKlassHandle ikh, +- BitMap* emcp_methods, int emcp_method_count) { +- assert(Thread::current()->is_VM_thread(), +- "only VMThread can add previous versions"); +- +- if (_previous_versions == NULL) { +- // This is the first previous version so make some space. +- // Start with 2 elements under the assumption that the class +- // won't be redefined much. +- _previous_versions = new (ResourceObj::C_HEAP, mtClass) +- GrowableArray<PreviousVersionNode *>(2, true); +- } +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", +- ikh->external_name(), _previous_versions->length(), emcp_method_count)); +- constantPoolHandle cp_h(ikh->constants()); +- jobject cp_ref; +- if (cp_h->is_shared()) { +- // a shared ConstantPool requires a regular reference; a weak +- // reference would be collectible +- cp_ref = JNIHandles::make_global(cp_h); +- } else { +- cp_ref = JNIHandles::make_weak_global(cp_h); +- } +- PreviousVersionNode * pv_node = NULL; +- objArrayOop old_methods = ikh->methods(); +- +- if (emcp_method_count == 0) { +- // non-shared ConstantPool gets a weak reference +- pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); +- RC_TRACE(0x00000400, +- ("add: all methods are obsolete; flushing any EMCP weak refs")); +- } else { +- int local_count = 0; +- GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP, mtClass) +- GrowableArray<jweak>(emcp_method_count, true); +- for (int i = 0; i < old_methods->length(); i++) { +- if (emcp_methods->at(i)) { +- // this old method is EMCP so save a weak ref +- methodOop old_method = (methodOop) old_methods->obj_at(i); +- methodHandle old_method_h(old_method); +- jweak method_ref = JNIHandles::make_weak_global(old_method_h); +- method_refs->append(method_ref); +- if (++local_count >= emcp_method_count) { +- // no more EMCP methods so bail out now +- break; +- } +- } +- } +- // non-shared ConstantPool gets a weak reference +- pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); +- } +- +- _previous_versions->append(pv_node); +- +- // Using weak references allows the interesting parts of previous +- // classes to be GC'ed when they are no longer needed. Since the +- // caller is the VMThread and we are at a safepoint, this is a good +- // time to clear out unused weak references. +- +- RC_TRACE(0x00000400, ("add: previous version length=%d", +- _previous_versions->length())); +- +- // skip the last entry since we just added it +- for (int i = _previous_versions->length() - 2; i >= 0; i--) { +- // check the previous versions array for a GC'ed weak refs +- pv_node = _previous_versions->at(i); +- cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- _previous_versions->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- _previous_versions->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; +- } else { +- RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs != NULL) { +- RC_TRACE(0x00000400, ("add: previous methods length=%d", +- method_refs->length())); +- for (int j = method_refs->length() - 1; j >= 0; j--) { +- jweak method_ref = method_refs->at(j); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL || emcp_method_count == 0) { +- // This method entry has been GC'ed or the current +- // RedefineClasses() call has made all methods obsolete +- // so remove it. +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(j); +- } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000400, +- ("add: %s(%s): previous method @%d in version @%d is alive", +- method->name()->as_C_string(), method->signature()->as_C_string(), +- j, i)); +- } +- } +- } +- } +- +- int obsolete_method_count = old_methods->length() - emcp_method_count; +- +- if (emcp_method_count != 0 && obsolete_method_count != 0 && +- _previous_versions->length() > 1) { +- // We have a mix of obsolete and EMCP methods. If there is more +- // than the previous version that we just added, then we have to +- // clear out any matching EMCP method entries the hard way. +- int local_count = 0; +- for (int i = 0; i < old_methods->length(); i++) { +- if (!emcp_methods->at(i)) { +- // only obsolete methods are interesting +- methodOop old_method = (methodOop) old_methods->obj_at(i); +- Symbol* m_name = old_method->name(); +- Symbol* m_signature = old_method->signature(); +- +- // skip the last entry since we just added it +- for (int j = _previous_versions->length() - 2; j >= 0; j--) { +- // check the previous versions array for a GC'ed weak refs +- pv_node = _previous_versions->at(j); +- cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- _previous_versions->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- _previous_versions->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs == NULL) { +- // We have run into a PreviousVersion generation where +- // all methods were made obsolete during that generation's +- // RedefineClasses() operation. At the time of that +- // operation, all EMCP methods were flushed so we don't +- // have to go back any further. +- // +- // A NULL method_refs is different than an empty method_refs. +- // We cannot infer any optimizations about older generations +- // from an empty method_refs for the current generation. +- break; +- } +- +- for (int k = method_refs->length() - 1; k >= 0; k--) { +- jweak method_ref = method_refs->at(k); +- assert(method_ref != NULL, +- "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(k); +- // Since we are traversing the array backwards, we don't +- // have to do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this method entry has been GC'ed so skip it +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(k); +- continue; +- } +- +- if (method->name() == m_name && +- method->signature() == m_signature) { +- // The current RedefineClasses() call has made all EMCP +- // versions of this method obsolete so mark it as obsolete +- // and remove the weak ref. +- RC_TRACE(0x00000400, +- ("add: %s(%s): flush obsolete method @%d in version @%d", +- m_name->as_C_string(), m_signature->as_C_string(), k, j)); +- +- method->set_is_obsolete(); +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(k); +- break; +- } +- } +- +- // The previous loop may not find a matching EMCP method, but +- // that doesn't mean that we can optimize and not go any +- // further back in the PreviousVersion generations. The EMCP +- // method for this generation could have already been GC'ed, +- // but there still may be an older EMCP method that has not +- // been GC'ed. +- } +- +- if (++local_count >= obsolete_method_count) { +- // no more obsolete methods so bail out now +- break; +- } +- } +- } +- } +-} // end add_previous_version() +- +- + // Determine if instanceKlass has a previous version. + bool instanceKlass::has_previous_version() const { +- if (_previous_versions == NULL) { +- // no previous versions array so answer is easy +- return false; +- } +- +- for (int i = _previous_versions->length() - 1; i >= 0; i--) { +- // Check the previous versions array for an info node that hasn't +- // been GC'ed +- PreviousVersionNode * pv_node = _previous_versions->at(i); +- +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); +- if (cp_ref == NULL) { +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp != NULL) { +- // we have at least one previous version +- return true; +- } +- +- // We don't have to check the method refs. If the constant pool has +- // been GC'ed then so have the methods. +- } +- +- // all of the underlying nodes' info has been GC'ed +- return false; +-} // end has_previous_version() ++ return _old_version != NULL; ++} + + methodOop instanceKlass::method_with_idnum(int idnum) { + methodOop m = NULL; +@@ -2854,153 +2633,3 @@ void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, obj + } // if no array and idnum isn't included there is nothing to do + } + +-// Construct a PreviousVersionNode entry for the array hung off +-// the instanceKlass. +-PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, +- bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { +- +- _prev_constant_pool = prev_constant_pool; +- _prev_cp_is_weak = prev_cp_is_weak; +- _prev_EMCP_methods = prev_EMCP_methods; +-} +- +- +-// Destroy a PreviousVersionNode +-PreviousVersionNode::~PreviousVersionNode() { +- if (_prev_constant_pool != NULL) { +- if (_prev_cp_is_weak) { +- JNIHandles::destroy_weak_global(_prev_constant_pool); +- } else { +- JNIHandles::destroy_global(_prev_constant_pool); +- } +- } +- +- if (_prev_EMCP_methods != NULL) { +- for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { +- jweak method_ref = _prev_EMCP_methods->at(i); +- if (method_ref != NULL) { +- JNIHandles::destroy_weak_global(method_ref); +- } +- } +- delete _prev_EMCP_methods; +- } +-} +- +- +-// Construct a PreviousVersionInfo entry +-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { +- _prev_constant_pool_handle = constantPoolHandle(); // NULL handle +- _prev_EMCP_method_handles = NULL; +- +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- return; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // Weak reference has been GC'ed. Since the constant pool has been +- // GC'ed, the methods have also been GC'ed. +- return; +- } +- +- // make the constantPoolOop safe to return +- _prev_constant_pool_handle = constantPoolHandle(cp); +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs == NULL) { +- // the instanceKlass did not have any EMCP methods +- return; +- } +- +- _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); +- +- int n_methods = method_refs->length(); +- for (int i = 0; i < n_methods; i++) { +- jweak method_ref = method_refs->at(i); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this entry has been GC'ed so skip it +- continue; +- } +- +- // make the methodOop safe to return +- _prev_EMCP_method_handles->append(methodHandle(method)); +- } +-} +- +- +-// Destroy a PreviousVersionInfo +-PreviousVersionInfo::~PreviousVersionInfo() { +- // Since _prev_EMCP_method_handles is not C-heap allocated, we +- // don't have to delete it. +-} +- +- +-// Construct a helper for walking the previous versions array +-PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { +- _previous_versions = ik->previous_versions(); +- _current_index = 0; +- // _hm needs no initialization +- _current_p = NULL; +-} +- +- +-// Destroy a PreviousVersionWalker +-PreviousVersionWalker::~PreviousVersionWalker() { +- // Delete the current info just in case the caller didn't walk to +- // the end of the previous versions list. No harm if _current_p is +- // already NULL. +- delete _current_p; +- +- // When _hm is destroyed, all the Handles returned in +- // PreviousVersionInfo objects will be destroyed. +- // Also, after this destructor is finished it will be +- // safe to delete the GrowableArray allocated in the +- // PreviousVersionInfo objects. +-} +- +- +-// Return the interesting information for the next previous version +-// of the klass. Returns NULL if there are no more previous versions. +-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { +- if (_previous_versions == NULL) { +- // no previous versions so nothing to return +- return NULL; +- } +- +- delete _current_p; // cleanup the previous info for the caller +- _current_p = NULL; // reset to NULL so we don't delete same object twice +- +- int length = _previous_versions->length(); +- +- while (_current_index < length) { +- PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); +- PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass) +- PreviousVersionInfo(pv_node); +- +- constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); +- if (cp_h.is_null()) { +- delete pv_info; +- +- // The underlying node's info has been GC'ed so try the next one. +- // We don't have to check the methods. If the constant pool has +- // GC'ed then so have the methods. +- continue; +- } +- +- // Found a node with non GC'ed info so return it. The caller will +- // need to delete pv_info when they are done with it. +- _current_p = pv_info; +- return pv_info; +- } +- +- // all of the underlying nodes' info has been GC'ed +- return NULL; +-} // end next_previous_version() +diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp +index 8a849cb..53163b6 100644 +--- a/src/share/vm/oops/instanceKlass.hpp ++++ b/src/share/vm/oops/instanceKlass.hpp +@@ -271,9 +271,6 @@ class instanceKlass: public Klass { + nmethodBucket* _dependencies; // list of dependent nmethods + nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class + BreakpointInfo* _breakpoints; // bpt lists, managed by methodOop +- // Array of interesting part(s) of the previous version(s) of this +- // instanceKlass. See PreviousVersionWalker below. +- GrowableArray<PreviousVersionNode *>* _previous_versions; + // JVMTI fields can be moved to their own structure - see 6315920 + unsigned char * _cached_class_file_bytes; // JVMTI: cached class file, before retransformable agent modified it in CFLH + jint _cached_class_file_len; // JVMTI: length of above +@@ -571,20 +568,11 @@ class instanceKlass: public Klass { + _nonstatic_oop_map_size = words; + } + +- // RedefineClasses() support for previous versions: +- void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods, +- int emcp_method_count); + // If the _previous_versions array is non-NULL, then this klass + // has been redefined at least once even if we aren't currently + // tracking a previous version. +- bool has_been_redefined() const { return _previous_versions != NULL; } ++ bool has_been_redefined() const { return _old_version != NULL; } + bool has_previous_version() const; +- void init_previous_versions() { +- _previous_versions = NULL; +- } +- GrowableArray<PreviousVersionNode *>* previous_versions() const { +- return _previous_versions; +- } + + // JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation + void set_cached_class_file(unsigned char *class_file_bytes, +@@ -629,6 +617,7 @@ class instanceKlass: public Klass { + static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum, + size_t *length_p, jmethodID* id_p); + jmethodID jmethod_id_or_null(methodOop method); ++ bool update_jmethod_id(methodOop method, jmethodID newMethodID); + + // cached itable index support + void set_cached_itable_index(size_t idnum, int index); +@@ -711,6 +700,7 @@ class instanceKlass: public Klass { + + // subclass/subinterface checks + bool implements_interface(klassOop k) const; ++ bool implements_interface_any_version(klassOop k) const; + + // Access to the implementor of an interface. + klassOop implementor() const +@@ -760,6 +750,9 @@ class instanceKlass: public Klass { + void do_local_static_fields(FieldClosure* cl); + void do_nonstatic_fields(FieldClosure* cl); // including inherited fields + void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS); ++ void store_update_information(GrowableArray<int> &values); ++ void clear_update_information(); ++ + + void methods_do(void f(methodOop method)); + void array_klasses_do(void f(klassOop k)); +@@ -895,7 +888,6 @@ class instanceKlass: public Klass { + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + #endif // !SERIALGC + +-private: + // initialization state + #ifdef ASSERT + void set_init_state(ClassState state); +@@ -1057,106 +1049,6 @@ class JNIid: public CHeapObj<mtClass> { + void verify(klassOop holder); + }; + +- +-// If breakpoints are more numerous than just JVMTI breakpoints, +-// consider compressing this data structure. +-// It is currently a simple linked list defined in methodOop.hpp. +- +-class BreakpointInfo; +- +- +-// A collection point for interesting information about the previous +-// version(s) of an instanceKlass. This class uses weak references to +-// the information so that the information may be collected as needed +-// by the system. If the information is shared, then a regular +-// reference must be used because a weak reference would be seen as +-// collectible. A GrowableArray of PreviousVersionNodes is attached +-// to the instanceKlass as needed. See PreviousVersionWalker below. +-class PreviousVersionNode : public CHeapObj<mtClass> { +- private: +- // A shared ConstantPool is never collected so we'll always have +- // a reference to it so we can update items in the cache. We'll +- // have a weak reference to a non-shared ConstantPool until all +- // of the methods (EMCP or obsolete) have been collected; the +- // non-shared ConstantPool becomes collectible at that point. +- jobject _prev_constant_pool; // regular or weak reference +- bool _prev_cp_is_weak; // true if not a shared ConstantPool +- +- // If the previous version of the instanceKlass doesn't have any +- // EMCP methods, then _prev_EMCP_methods will be NULL. If all the +- // EMCP methods have been collected, then _prev_EMCP_methods can +- // have a length of zero. +- GrowableArray<jweak>* _prev_EMCP_methods; +- +-public: +- PreviousVersionNode(jobject prev_constant_pool, bool prev_cp_is_weak, +- GrowableArray<jweak>* prev_EMCP_methods); +- ~PreviousVersionNode(); +- jobject prev_constant_pool() const { +- return _prev_constant_pool; +- } +- GrowableArray<jweak>* prev_EMCP_methods() const { +- return _prev_EMCP_methods; +- } +-}; +- +- +-// A Handle-ized version of PreviousVersionNode. +-class PreviousVersionInfo : public ResourceObj { +- private: +- constantPoolHandle _prev_constant_pool_handle; +- // If the previous version of the instanceKlass doesn't have any +- // EMCP methods, then _prev_EMCP_methods will be NULL. Since the +- // methods cannot be collected while we hold a handle, +- // _prev_EMCP_methods should never have a length of zero. +- GrowableArray<methodHandle>* _prev_EMCP_method_handles; +- +-public: +- PreviousVersionInfo(PreviousVersionNode *pv_node); +- ~PreviousVersionInfo(); +- constantPoolHandle prev_constant_pool_handle() const { +- return _prev_constant_pool_handle; +- } +- GrowableArray<methodHandle>* prev_EMCP_method_handles() const { +- return _prev_EMCP_method_handles; +- } +-}; +- +- +-// Helper object for walking previous versions. This helper cleans up +-// the Handles that it allocates when the helper object is destroyed. +-// The PreviousVersionInfo object returned by next_previous_version() +-// is only valid until a subsequent call to next_previous_version() or +-// the helper object is destroyed. +-class PreviousVersionWalker : public StackObj { +- private: +- GrowableArray<PreviousVersionNode *>* _previous_versions; +- int _current_index; +- // Fields for cleaning up when we are done walking the previous versions: +- // A HandleMark for the PreviousVersionInfo handles: +- HandleMark _hm; +- +- // It would be nice to have a ResourceMark field in this helper also, +- // but the ResourceMark code says to be careful to delete handles held +- // in GrowableArrays _before_ deleting the GrowableArray. Since we +- // can't guarantee the order in which the fields are destroyed, we +- // have to let the creator of the PreviousVersionWalker object do +- // the right thing. Also, adding a ResourceMark here causes an +- // include loop. +- +- // A pointer to the current info object so we can handle the deletes. +- PreviousVersionInfo * _current_p; +- +- public: +- PreviousVersionWalker(instanceKlass *ik); +- ~PreviousVersionWalker(); +- +- // Return the interesting information for the next previous version +- // of the klass. Returns NULL if there are no more previous versions. +- PreviousVersionInfo* next_previous_version(); +-}; +- +- + // + // nmethodBucket is used to record dependent nmethods for + // deoptimization. nmethod dependencies are actually <klass, method> +diff --git a/src/share/vm/oops/instanceKlassKlass.cpp b/src/share/vm/oops/instanceKlassKlass.cpp +index 8e7dc12..5b9b266 100644 +--- a/src/share/vm/oops/instanceKlassKlass.cpp ++++ b/src/share/vm/oops/instanceKlassKlass.cpp +@@ -358,7 +358,7 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it + unsigned nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS) { ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS) { + + const int nonstatic_oop_map_size = + instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count); +@@ -435,7 +435,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it + ik->set_jni_ids(NULL); + ik->set_osr_nmethods_head(NULL); + ik->set_breakpoints(NULL); +- ik->init_previous_versions(); + ik->set_generic_signature(NULL); + ik->release_set_methods_jmethod_ids(NULL); + ik->release_set_methods_cached_itable_indices(NULL); +@@ -480,6 +479,28 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { + instanceKlass* ik = instanceKlass::cast(klassOop(obj)); + klassKlass::oop_print_on(obj, st); + ++ // (tw) Output revision number and revision numbers of older / newer and oldest / newest version of this class. ++ ++ st->print(BULLET"revision: %d", ik->revision_number()); ++ ++ if (ik->new_version() != NULL) { ++ st->print(" (newer=%d)", ik->new_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->newest_version() != ik->new_version() && ik->newest_version() != obj) { ++ st->print(" (newest=%d)", ik->newest_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->old_version() != NULL) { ++ st->print(" (old=%d)", ik->old_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->oldest_version() != ik->old_version() && ik->oldest_version() != obj) { ++ st->print(" (oldest=%d)", ik->oldest_version()->klass_part()->revision_number()); ++ } ++ ++ st->cr(); ++ + st->print(BULLET"instance size: %d", ik->size_helper()); st->cr(); + st->print(BULLET"klass size: %d", ik->object_size()); st->cr(); + st->print(BULLET"access: "); ik->access_flags().print_on(st); st->cr(); +@@ -537,26 +558,6 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { + st->cr(); + } + +- { +- ResourceMark rm; +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- bool have_pv = false; +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- if (!have_pv) +- st->print(BULLET"previous version: "); +- have_pv = true; +- pv_info->prev_constant_pool_handle()()->print_value_on(st); +- } +- if (have_pv) st->cr(); +- } // pvw is cleaned up +- } // rm is cleaned up +- + if (ik->generic_signature() != NULL) { + st->print(BULLET"generic signature: "); + ik->generic_signature()->print_value_on(st); +@@ -663,7 +664,7 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) { + } + guarantee(sib->as_klassOop()->is_klass(), "should be klass"); + guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); +- guarantee(sib->super() == super, "siblings should have same superklass"); ++ guarantee(sib->super() == super || super->klass_part()->newest_version() == SystemDictionary::Object_klass(), "siblings should have same superklass"); + sib = sib->next_sibling(); + } + +diff --git a/src/share/vm/oops/instanceKlassKlass.hpp b/src/share/vm/oops/instanceKlassKlass.hpp +index df674a9..45d0b66 100644 +--- a/src/share/vm/oops/instanceKlassKlass.hpp ++++ b/src/share/vm/oops/instanceKlassKlass.hpp +@@ -50,6 +50,7 @@ class instanceKlassKlass : public klassKlass { + AccessFlags access_flags, + ReferenceType rt, + KlassHandle host_klass, ++ KlassHandle old_klass, + TRAPS); + + // Casting from klassOop +diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp +index e0dd7d7..a7eec08 100644 +--- a/src/share/vm/oops/instanceMirrorKlass.cpp ++++ b/src/share/vm/oops/instanceMirrorKlass.cpp +@@ -156,6 +156,13 @@ void instanceMirrorKlass::oop_follow_contents(oop obj) { + assert_is_in_closed_subset) + } + ++void instanceMirrorKlass::oop_fields_iterate(oop obj, OopClosure* blk) { ++ InstanceMirrorKlass_OOP_ITERATE( \ ++ start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ ++ blk->do_oop(p), \ ++ assert_is_in_closed_subset) ++} ++ + #ifndef SERIALGC + void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm, + oop obj) { +diff --git a/src/share/vm/oops/instanceMirrorKlass.hpp b/src/share/vm/oops/instanceMirrorKlass.hpp +index 2b8b2f4..31969c7 100644 +--- a/src/share/vm/oops/instanceMirrorKlass.hpp ++++ b/src/share/vm/oops/instanceMirrorKlass.hpp +@@ -79,6 +79,9 @@ class instanceMirrorKlass: public instanceKlass { + DEFINE_ALLOCATE_PERMANENT(instanceMirrorKlass); + instanceOop allocate_instance(KlassHandle k, TRAPS); + ++ // Class redefinition, iterate static fields ++ static void oop_fields_iterate(oop obj, OopClosure* blk); ++ + // Garbage collection + int oop_adjust_pointers(oop obj); + void oop_follow_contents(oop obj); +diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp +index 7db4f03..1171487 100644 +--- a/src/share/vm/oops/instanceRefKlass.cpp ++++ b/src/share/vm/oops/instanceRefKlass.cpp +@@ -455,10 +455,13 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) { + instanceKlass* ik = instanceKlass::cast(k); + + // Check that we have the right class +- debug_only(static bool first_time = true); +- assert(k == SystemDictionary::Reference_klass() && first_time, +- "Invalid update of maps"); +- debug_only(first_time = false); ++ ++ // (tw) Asserts no longer valid for class redefinition ++ // debug_only(static bool first_time = true); ++ ++ //assert(k == SystemDictionary::Reference_klass() && first_time, ++ // "Invalid update of maps"); ++ //debug_only(first_time = false); + assert(ik->nonstatic_oop_map_count() == 1, "just checking"); + + OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); +diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp +index 596d5ad..a928777 100644 +--- a/src/share/vm/oops/klass.cpp ++++ b/src/share/vm/oops/klass.cpp +@@ -161,6 +161,13 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size, + kl->set_alloc_size(0); + TRACE_INIT_ID(kl); + ++ kl->set_redefinition_flags(Klass::NoRedefinition); ++ kl->set_redefining(false); ++ kl->set_new_version(NULL); ++ kl->set_old_version(NULL); ++ kl->set_redefinition_index(-1); ++ kl->set_revision_number(-1); ++ + kl->set_prototype_header(markOopDesc::prototype()); + kl->set_biased_lock_revocation_count(0); + kl->set_last_biased_lock_bulk_revocation_time(0); +@@ -232,7 +239,7 @@ void Klass::initialize_supers(klassOop k, TRAPS) { + set_super(NULL); + oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop()); + assert(super_depth() == 0, "Object must already be initialized properly"); +- } else if (k != super() || k == SystemDictionary::Object_klass()) { ++ } else if (k != super() || k->klass_part()->super() == NULL) { + assert(super() == NULL || super() == SystemDictionary::Object_klass(), + "initialize this only once to a non-trivial value"); + set_super(k); +@@ -385,7 +392,7 @@ void Klass::append_to_sibling_list() { + void Klass::remove_from_sibling_list() { + // remove receiver from sibling list + instanceKlass* super = superklass(); +- assert(super != NULL || as_klassOop() == SystemDictionary::Object_klass(), "should have super"); ++ assert(super != NULL || as_klassOop()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version(), "should have super"); + if (super == NULL) return; // special case: class Object + if (super->subklass() == this) { + // first subklass +diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp +index bcbd4e7..bf242d9 100644 +--- a/src/share/vm/oops/klass.hpp ++++ b/src/share/vm/oops/klass.hpp +@@ -170,6 +170,7 @@ class Klass_vtbl { + void* operator new(size_t ignored, KlassHandle& klass, int size, TRAPS); + }; + ++template<class L, class R> class Pair; + + class Klass : public Klass_vtbl { + friend class VMStructs; +@@ -222,6 +223,31 @@ class Klass : public Klass_vtbl { + oop* oop_block_beg() const { return adr_secondary_super_cache(); } + oop* oop_block_end() const { return adr_next_sibling() + 1; } + ++ // (tw) Different class redefinition flags of code evolution. ++ enum RedefinitionFlags { ++ ++ // This class is not redefined at all! ++ NoRedefinition, ++ ++ // There are changes to the class meta data. ++ ModifyClass = 1, ++ ++ // The size of the class meta data changes. ++ ModifyClassSize = ModifyClass << 1, ++ ++ // There are change to the instance format. ++ ModifyInstances = ModifyClassSize << 1, ++ ++ // The size of instances changes. ++ ModifyInstanceSize = ModifyInstances << 1, ++ ++ // A super type of this class is removed. ++ RemoveSuperType = ModifyInstanceSize << 1, ++ ++ // This class has been marked as an affected class. ++ MarkedAsAffected = RemoveSuperType << 1 ++ }; ++ + protected: + // + // The oop block. All oop fields must be declared here and only oop fields +@@ -241,6 +267,10 @@ class Klass : public Klass_vtbl { + oop _java_mirror; + // Superclass + klassOop _super; ++ // Old class ++ klassOop _old_version; ++ // New class ++ klassOop _new_version; + // First subclass (NULL if none); _subklass->next_sibling() is next one + klassOop _subklass; + // Sibling link (or NULL); links all subklasses of a klass +@@ -253,6 +283,16 @@ class Klass : public Klass_vtbl { + jint _modifier_flags; // Processed access flags, for use by Class.getModifiers. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ // (tw) Non-oop fields for enhanced class redefinition ++ jint _revision_number; // The revision number for redefined classes ++ jint _redefinition_index; // Index of this class when performing the redefinition ++ bool _subtype_changed; ++ int _redefinition_flags; // Level of class redefinition ++ bool _is_copying_backwards; // Does the class need to copy fields backwards? => possibly overwrite itself? ++ bool _original_field_offsets_changed; // Did the original field offsets of this class change during class redefinition? ++ int * _update_information; // Update information ++ bool _is_redefining; ++ + #ifndef PRODUCT + int _verify_count; // to avoid redundant verifies + #endif +@@ -301,6 +341,75 @@ class Klass : public Klass_vtbl { + klassOop secondary_super_cache() const { return _secondary_super_cache; } + void set_secondary_super_cache(klassOop k) { oop_store_without_check((oop*) &_secondary_super_cache, (oop) k); } + ++ // BEGIN class redefinition utilities ++ ++ // double links between new and old version of a class ++ klassOop old_version() const { return _old_version; } ++ void set_old_version(klassOop klass) { assert(_old_version == NULL || klass == NULL, "Can only be set once!"); _old_version = klass; } ++ klassOop new_version() const { return _new_version; } ++ void set_new_version(klassOop klass) { assert(_new_version == NULL || klass == NULL, "Can only be set once!"); _new_version = klass; } ++ ++ // A subtype of this class is no longer a subtype ++ bool has_subtype_changed() const { return _subtype_changed; } ++ void set_subtype_changed(bool b) { assert(is_newest_version() || new_version()->klass_part()->is_newest_version(), "must be newest or second newest version"); ++ _subtype_changed = b; } ++ // state of being redefined ++ int redefinition_index() const { return _redefinition_index; } ++ void set_redefinition_index(int index) { _redefinition_index = index; } ++ void set_redefining(bool b) { _is_redefining = b; } ++ bool is_redefining() const { return _is_redefining; } ++ int redefinition_flags() const { return _redefinition_flags; } ++ bool check_redefinition_flag(int flags) const { return (_redefinition_flags & flags) != 0; } ++ void set_redefinition_flags(int flags) { _redefinition_flags = flags; } ++ void set_redefinition_flag(int flag) { _redefinition_flags |= flag; } ++ void clear_redefinition_flag(int flag) { _redefinition_flags &= ~flag; } ++ bool is_copying_backwards() const { return _is_copying_backwards; } ++ void set_copying_backwards(bool b) { _is_copying_backwards = b; } ++ ++ // update information ++ int *update_information() const { return _update_information; } ++ void set_update_information(int *info) { _update_information = info; } ++ ++ bool is_same_or_older_version(klassOop klass) const { ++ if (Klass::cast(klass) == this) { return true; } ++ else if (_old_version == NULL) { return false; } ++ else { return _old_version->klass_part()->is_same_or_older_version(klass); } ++ } ++ ++ // Revision number for redefined classes, -1 for originally loaded classes ++ jint revision_number() const { ++ return _revision_number; ++ } ++ ++ bool was_redefined() const { ++ return _revision_number != -1; ++ } ++ ++ void set_revision_number(jint number) { ++ _revision_number = number; ++ } ++ ++ klassOop oldest_version() const { ++ if (_old_version == NULL) { return this->as_klassOop(); } ++ else { return _old_version->klass_part()->oldest_version(); }; ++ } ++ ++ klassOop newest_version() const { ++ if (_new_version == NULL) { return this->as_klassOop(); } ++ else { return _new_version->klass_part()->newest_version(); }; ++ } ++ ++ klassOop active_version() const { ++ if (_new_version == NULL || _new_version->klass_part()->is_redefining()) { return this->as_klassOop(); assert(!this->is_redefining(), "just checking"); } ++ else { return _new_version->klass_part()->active_version(); }; ++ } ++ ++ bool is_newest_version() const { ++ return _new_version == NULL; ++ } ++ ++ // END class redefinition utilities ++ + objArrayOop secondary_supers() const { return _secondary_supers; } + void set_secondary_supers(objArrayOop k) { oop_store_without_check((oop*) &_secondary_supers, (oop) k); } + +@@ -361,6 +470,8 @@ class Klass : public Klass_vtbl { + void set_next_sibling(klassOop s); + + oop* adr_super() const { return (oop*)&_super; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } + oop* adr_primary_supers() const { return (oop*)&_primary_supers[0]; } + oop* adr_secondary_super_cache() const { return (oop*)&_secondary_super_cache; } + oop* adr_secondary_supers()const { return (oop*)&_secondary_supers; } +diff --git a/src/share/vm/oops/klassKlass.cpp b/src/share/vm/oops/klassKlass.cpp +index 06809d5..1050eda 100644 +--- a/src/share/vm/oops/klassKlass.cpp ++++ b/src/share/vm/oops/klassKlass.cpp +@@ -68,6 +68,8 @@ void klassKlass::oop_follow_contents(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + MarkSweep::mark_and_push(k->adr_super()); ++ MarkSweep::mark_and_push(k->adr_old_version()); ++ MarkSweep::mark_and_push(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::mark_and_push(k->adr_primary_supers()+i); + MarkSweep::mark_and_push(k->adr_secondary_super_cache()); +@@ -87,6 +89,8 @@ void klassKlass::oop_follow_contents(ParCompactionManager* cm, + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + PSParallelCompact::mark_and_push(cm, k->adr_super()); ++ PSParallelCompact::mark_and_push(cm, k->adr_old_version()); ++ PSParallelCompact::mark_and_push(cm, k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + PSParallelCompact::mark_and_push(cm, k->adr_primary_supers()+i); + PSParallelCompact::mark_and_push(cm, k->adr_secondary_super_cache()); +@@ -106,6 +110,8 @@ int klassKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + int size = oop_size(obj); + Klass* k = Klass::cast(klassOop(obj)); + blk->do_oop(k->adr_super()); ++ blk->do_oop(k->adr_old_version()); ++ blk->do_oop(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + blk->do_oop(k->adr_primary_supers()+i); + blk->do_oop(k->adr_secondary_super_cache()); +@@ -134,6 +140,10 @@ int klassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = k->adr_super(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + for (juint i = 0; i < Klass::primary_super_limit(); i++) { + adr = k->adr_primary_supers()+i; + if (mr.contains(adr)) blk->do_oop(adr); +@@ -167,6 +177,8 @@ int klassKlass::oop_adjust_pointers(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + + MarkSweep::adjust_pointer(k->adr_super()); ++ MarkSweep::adjust_pointer(k->adr_new_version()); ++ MarkSweep::adjust_pointer(k->adr_old_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::adjust_pointer(k->adr_primary_supers()+i); + MarkSweep::adjust_pointer(k->adr_secondary_super_cache()); +diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp +index 94e2e04..a683a4b 100644 +--- a/src/share/vm/oops/klassVtable.cpp ++++ b/src/share/vm/oops/klassVtable.cpp +@@ -628,17 +628,13 @@ void klassVtable::adjust_method_entries(methodOop* old_methods, methodOop* new_m + if (unchecked_method_at(index) == old_method) { + put_method_at(new_method, index); + +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { ++ IF_TRACE_RC4 { + if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); ++ TRACE_RC4("adjust: name=%s", Klass::cast(old_method->method_holder())->external_name()); + *trace_name_printed = true; + } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00100000, ("vtable method update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); ++ TRACE_RC4("vtable method update: %s(%s)", new_method->name()->as_C_string(), ++ new_method->signature()->as_C_string()); + } + // cannot 'break' here; see for-loop comment above. + } +@@ -1008,17 +1004,13 @@ void klassItable::adjust_method_entries(methodOop* old_methods, methodOop* new_m + if (ime->method() == old_method) { + ime->initialize(new_method); + +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { ++ IF_TRACE_RC4 { + if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); ++ TRACE_RC4("adjust: name=%s", Klass::cast(old_method->method_holder())->external_name()); + *trace_name_printed = true; + } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00200000, ("itable method update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); ++ TRACE_RC4("itable method update: %s(%s)", new_method->name()->as_C_string(), ++ new_method->signature()->as_C_string()); + } + // cannot 'break' here; see for-loop comment above. + } +@@ -1241,6 +1233,7 @@ void klassVtable::verify(outputStream* st, bool forced) { + + void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { + vtableEntry* vte = &vt->table()[index]; ++ if (vte->method() == NULL || table()[index].method() == NULL) return; + if (vte->method()->name() != table()[index].method()->name() || + vte->method()->signature() != table()[index].method()->signature()) { + fatal("mismatched name/signature of vtable entries"); +@@ -1260,6 +1253,8 @@ void klassVtable::print() { + + void vtableEntry::verify(klassVtable* vt, outputStream* st) { + NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); ++ // (tw) TODO: Check: Does not hold? ++ if (method() != NULL) { + assert(method() != NULL, "must have set method"); + method()->verify(); + // we sub_type, because it could be a miranda method +@@ -1267,7 +1262,13 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + #ifndef PRODUCT + print(); + #endif +- fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); ++ klassOop first_klass = vt->klass()(); ++ klassOop second_klass = method()->method_holder(); ++ // (tw) the following fatal does not work for old versions of classes ++ if (first_klass->klass_part()->is_newest_version()) { ++ //fatal1("vtableEntry %#lx: method is from subclass", this); ++ } ++ } + } + } + +@@ -1275,8 +1276,8 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + + void vtableEntry::print() { + ResourceMark rm; +- tty->print("vtableEntry %s: ", method()->name()->as_C_string()); + if (Verbose) { ++ tty->print("vtableEntry %s: ", (method() == NULL) ? "null" : method()->name()->as_C_string()); + tty->print("m %#lx ", (address)method()); + } + } +diff --git a/src/share/vm/oops/methodKlass.cpp b/src/share/vm/oops/methodKlass.cpp +index 75d0b09..c4be146 100644 +--- a/src/share/vm/oops/methodKlass.cpp ++++ b/src/share/vm/oops/methodKlass.cpp +@@ -93,6 +93,9 @@ methodOop methodKlass::allocate(constMethodHandle xconst, + m->set_adapter_entry(NULL); + m->clear_code(); // from_c/from_i get set to c2i/i2i + ++ m->set_new_version(NULL); ++ m->set_old_version(NULL); ++ + if (access_flags.is_native()) { + m->clear_native_function(); + m->set_signature_handler(NULL); +@@ -122,6 +125,8 @@ void methodKlass::oop_follow_contents(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::mark_and_push(m->adr_constMethod()); ++ MarkSweep::mark_and_push(m->adr_new_version()); ++ MarkSweep::mark_and_push(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::mark_and_push(m->adr_method_data()); + } +@@ -135,6 +140,8 @@ void methodKlass::oop_follow_contents(ParCompactionManager* cm, + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + PSParallelCompact::mark_and_push(cm, m->adr_constMethod()); ++ PSParallelCompact::mark_and_push(cm, m->adr_new_version()); ++ PSParallelCompact::mark_and_push(cm, m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::mark_and_push(cm, m->adr_method_data()); +@@ -152,6 +159,8 @@ int methodKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves + blk->do_oop(m->adr_constMethod()); ++ blk->do_oop(m->adr_new_version()); ++ blk->do_oop(m->adr_old_version()); + if (m->method_data() != NULL) { + blk->do_oop(m->adr_method_data()); + } +@@ -170,6 +179,10 @@ int methodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = m->adr_constMethod(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + if (m->method_data() != NULL) { + adr = m->adr_method_data(); + if (mr.contains(adr)) blk->do_oop(adr); +@@ -187,6 +200,8 @@ int methodKlass::oop_adjust_pointers(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::adjust_pointer(m->adr_constMethod()); ++ MarkSweep::adjust_pointer(m->adr_new_version()); ++ MarkSweep::adjust_pointer(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::adjust_pointer(m->adr_method_data()); + } +@@ -202,6 +217,8 @@ int methodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { + assert(obj->is_method(), "should be method"); + methodOop m = methodOop(obj); + PSParallelCompact::adjust_pointer(m->adr_constMethod()); ++ PSParallelCompact::adjust_pointer(m->adr_new_version()); ++ PSParallelCompact::adjust_pointer(m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::adjust_pointer(m->adr_method_data()); +@@ -222,7 +239,18 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) { + methodOop m = methodOop(obj); + // get the effect of PrintOopAddress, always, for methods: + st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m); +- st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr(); ++ st->print (" - method holder: "); m->method_holder()->print_value_on(st); ++ ++ if (m->method_holder()->klass_part()->new_version() != NULL) { ++ st->print(" (old)"); ++ } ++ st->cr(); ++ ++ st->print_cr(" - is obsolete: %d", (int)(m->is_obsolete())); ++ st->print_cr(" - is old: %d", (int)(m->is_old())); ++ st->print_cr(" - new version: "INTPTR_FORMAT, (address)m->new_version()); ++ st->print_cr(" - old version: "INTPTR_FORMAT, (address)m->old_version()); ++ st->print_cr(" - holder revision: %d", m->method_holder()->klass_part()->revision_number()); + st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants()); + m->constants()->print_value_on(st); st->cr(); + st->print (" - access: 0x%x ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr(); +diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp +index 4f59d3a..32cb4cf 100644 +--- a/src/share/vm/oops/methodOop.cpp ++++ b/src/share/vm/oops/methodOop.cpp +@@ -1061,6 +1061,8 @@ methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code + + // Reset correct method/const method, method size, and parameter info + newm->set_constMethod(newcm); ++ newm->set_new_version(newm->new_version()); ++ newm->set_old_version(newm->old_version()); + newm->constMethod()->set_code_size(new_code_length); + newm->constMethod()->set_constMethod_size(new_const_method_size); + newm->set_method_size(new_method_size); +diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp +index 486e106..e35d5ed 100644 +--- a/src/share/vm/oops/methodOop.hpp ++++ b/src/share/vm/oops/methodOop.hpp +@@ -114,6 +114,10 @@ class methodOopDesc : public oopDesc { + AccessFlags _access_flags; // Access flags + int _vtable_index; // vtable index of this method (see VtableIndexFlag) + // note: can have vtables with >2**16 elements (because of inheritance) ++ // (tw) Newer version of method available? ++ methodOop _new_version; ++ methodOop _old_version; ++ + #ifdef CC_INTERP + int _result_index; // C++ interpreter needs for converting results to/from stack + #endif +@@ -175,6 +179,29 @@ class methodOopDesc : public oopDesc { + int name_index() const { return constMethod()->name_index(); } + void set_name_index(int index) { constMethod()->set_name_index(index); } + ++ methodOop new_version() const {return _new_version; } ++ void set_new_version(methodOop m) { _new_version = m; } ++ methodOop newest_version() { if(_new_version == NULL) return this; else return new_version()->newest_version(); } ++ ++ methodOop old_version() const {return _old_version; }; ++ void set_old_version(methodOop m) { ++ if (m == NULL) { ++ _old_version = NULL; ++ return; ++ } ++ ++ assert(_old_version == NULL, "may only be set once"); ++ assert(this->code_size() == m->code_size(), "must have same code length"); ++ _old_version = m; ++ } ++ ++ methodOop oldest_version() const { ++ if(_old_version == NULL) return (methodOop)this; ++ else { ++ return old_version()->oldest_version(); ++ } ++ } ++ + // signature + Symbol* signature() const { return constants()->symbol_at(signature_index()); } + int signature_index() const { return constMethod()->signature_index(); } +@@ -734,6 +761,8 @@ class methodOopDesc : public oopDesc { + + // Garbage collection support + oop* adr_constMethod() const { return (oop*)&_constMethod; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } + oop* adr_method_data() const { return (oop*)&_method_data; } + }; + +diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp +index 5982c88..4873fca 100644 +--- a/src/share/vm/oops/oop.hpp ++++ b/src/share/vm/oops/oop.hpp +@@ -95,6 +95,7 @@ class oopDesc { + narrowOop* compressed_klass_addr(); + + void set_klass(klassOop k); ++ void set_klass_no_check(klassOop k); + + // For klass field compression + int klass_gap() const; +@@ -135,6 +136,7 @@ class oopDesc { + bool is_array() const; + bool is_objArray() const; + bool is_klass() const; ++ bool is_instanceKlass() const; + bool is_thread() const; + bool is_method() const; + bool is_constMethod() const; +diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp +index f4eb2f7..0acb346 100644 +--- a/src/share/vm/oops/oop.inline.hpp ++++ b/src/share/vm/oops/oop.inline.hpp +@@ -123,6 +123,14 @@ inline void oopDesc::set_klass(klassOop k) { + } + } + ++inline void oopDesc::set_klass_no_check(klassOop k) { ++ if (UseCompressedOops) { ++ oop_store_without_check(compressed_klass_addr(), (oop)k); ++ } else { ++ oop_store_without_check(klass_addr(), (oop) k); ++ } ++} ++ + inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); + } +@@ -156,6 +164,7 @@ inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_ + inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } + inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } + inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } ++inline bool oopDesc::is_instanceKlass() const { return blueprint()->oop_is_instanceKlass(); } + inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } + inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } + inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } +diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp +index 2123991..6cbd78c 100644 +--- a/src/share/vm/prims/jni.cpp ++++ b/src/share/vm/prims/jni.cpp +@@ -406,7 +406,7 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR + } + } + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, +- Handle(), &st, true, ++ Handle(), &st, true, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp +index 7dcd968..d59052f 100644 +--- a/src/share/vm/prims/jvm.cpp ++++ b/src/share/vm/prims/jvm.cpp +@@ -872,7 +872,7 @@ static jclass jvm_define_class_common(JNIEnv *env, const char *name, + Handle protection_domain (THREAD, JNIHandles::resolve(pd)); + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, + protection_domain, &st, +- verify != 0, ++ verify != 0, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm_misc.hpp b/src/share/vm/prims/jvm_misc.hpp +index 2b46e36..549e949 100644 +--- a/src/share/vm/prims/jvm_misc.hpp ++++ b/src/share/vm/prims/jvm_misc.hpp +@@ -84,6 +84,7 @@ extern "C" { + (JNIEnv *env, jobject obj, jfieldID fieldID); + } + ++// TODO(tw): Check if we need to "unquicken" because of class redefinition. + void quicken_jni_functions(); + address jni_GetBooleanField_addr(); + address jni_GetByteField_addr(); +diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp +index 4ac6b82..30b8e84 100644 +--- a/src/share/vm/prims/jvmtiEnv.cpp ++++ b/src/share/vm/prims/jvmtiEnv.cpp +@@ -290,7 +290,10 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + class_definitions[index].klass = jcls; + } + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RetransformClasses */ + +@@ -299,9 +302,12 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + // class_definitions - pre-checked for NULL + jvmtiError + JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) { +-//TODO: add locking ++ + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RedefineClasses */ + +diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp +index ec8ede3..2bd5983 100644 +--- a/src/share/vm/prims/jvmtiExport.cpp ++++ b/src/share/vm/prims/jvmtiExport.cpp +@@ -2296,7 +2296,7 @@ JvmtiDynamicCodeEventCollector::JvmtiDynamicCodeEventCollector() : _code_blobs(N + // iterate over any code blob descriptors collected and post a + // DYNAMIC_CODE_GENERATED event to the profiler. + JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { +- assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events"); ++ assert(!JavaThread::current()->owns_locks_but_redefine_classes_lock(), "all locks must be released to post deferred events"); + // iterate over any code blob descriptors that we collected + if (_code_blobs != NULL) { + for (int i=0; i<_code_blobs->length(); i++) { +diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp +index d3fa140..31a8a19 100644 +--- a/src/share/vm/prims/jvmtiImpl.cpp ++++ b/src/share/vm/prims/jvmtiImpl.cpp +@@ -284,60 +284,11 @@ address JvmtiBreakpoint::getBcp() { + } + + void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { +- ((methodOopDesc*)_method->*meth_act)(_bci); +- +- // add/remove breakpoint to/from versions of the method that +- // are EMCP. Directly or transitively obsolete methods are +- // not saved in the PreviousVersionInfo. +- Thread *thread = Thread::current(); +- instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder()); +- Symbol* m_name = _method->name(); +- Symbol* m_signature = _method->signature(); +- +- { +- ResourceMark rm(thread); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // search previous versions if they exist +- PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part()); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- GrowableArray<methodHandle>* methods = +- pv_info->prev_EMCP_method_handles(); +- +- if (methods == NULL) { +- // We have run into a PreviousVersion generation where +- // all methods were made obsolete during that generation's +- // RedefineClasses() operation. At the time of that +- // operation, all EMCP methods were flushed so we don't +- // have to go back any further. +- // +- // A NULL methods array is different than an empty methods +- // array. We cannot infer any optimizations about older +- // generations from an empty methods array for the current +- // generation. +- break; +- } +- +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method = methods->at(i); +- if (method->name() == m_name && method->signature() == m_signature) { +- RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)", +- meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear", +- method->name()->as_C_string(), +- method->signature()->as_C_string())); +- assert(!method->is_obsolete(), "only EMCP methods here"); +- +- ((methodOopDesc*)method()->*meth_act)(_bci); +- break; +- } +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up ++ methodOop method = _method; ++ while (method != NULL) { ++ ((methodOopDesc*)method->*meth_act)(_bci); ++ method = method->old_version(); ++ } + } + + void JvmtiBreakpoint::set() { +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp +index eb52388..432e15a 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -38,490 +38,669 @@ + #include "runtime/deoptimization.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "prims/jvmtiClassFileReconstituter.hpp" ++#include "compiler/compileBroker.hpp" ++#include "oops/instanceMirrorKlass.hpp" + + + objArrayOop VM_RedefineClasses::_old_methods = NULL; + objArrayOop VM_RedefineClasses::_new_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_old_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_new_methods = NULL; +-methodOop* VM_RedefineClasses::_deleted_methods = NULL; +-methodOop* VM_RedefineClasses::_added_methods = NULL; ++int* VM_RedefineClasses::_matching_old_methods = NULL; ++int* VM_RedefineClasses::_matching_new_methods = NULL; ++int* VM_RedefineClasses::_deleted_methods = NULL; ++int* VM_RedefineClasses::_added_methods = NULL; + int VM_RedefineClasses::_matching_methods_length = 0; + int VM_RedefineClasses::_deleted_methods_length = 0; + int VM_RedefineClasses::_added_methods_length = 0; +-klassOop VM_RedefineClasses::_the_class_oop = NULL; ++GrowableArray<instanceKlassHandle>* VM_RedefineClasses::_affected_klasses = NULL; + + +-VM_RedefineClasses::VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind) { ++// Holds the revision number of the current class redefinition ++int VM_RedefineClasses::_revision_number = -1; ++ ++VM_RedefineClasses::VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind) ++ : VM_GC_Operation(Universe::heap()->total_full_collections(), GCCause::_heap_inspection) { ++ RC_TIMER_START(_timer_total); + _class_count = class_count; + _class_defs = class_defs; + _class_load_kind = class_load_kind; +- _res = JVMTI_ERROR_NONE; ++ _result = JVMTI_ERROR_NONE; + } + +-bool VM_RedefineClasses::doit_prologue() { +- if (_class_count == 0) { +- _res = JVMTI_ERROR_NONE; +- return false; ++VM_RedefineClasses::~VM_RedefineClasses() { ++ unlock_threads(); ++ RC_TIMER_STOP(_timer_total); ++ ++ if (TimeRedefineClasses) { ++ tty->print_cr(""); ++ tty->print_cr("Timing Prologue: %d", _timer_prologue.milliseconds()); ++ tty->print_cr("Timing Class Loading: %d", _timer_class_loading.milliseconds()); ++ tty->print_cr("Timing Waiting for Lock: %d", _timer_wait_for_locks.milliseconds()); ++ tty->print_cr("Timing Class Linking: %d", _timer_class_linking.milliseconds()); ++ tty->print_cr("Timing Prepare Redefinition: %d", _timer_prepare_redefinition.milliseconds()); ++ tty->print_cr("Timing Heap Iteration: %d", _timer_heap_iteration.milliseconds()); ++ tty->print_cr("Timing Redefinition GC: %d", _timer_redefinition.milliseconds()); ++ tty->print_cr("Timing Epilogue: %d", _timer_vm_op_epilogue.milliseconds()); ++ tty->print_cr("------------------------------------------------------------------"); ++ tty->print_cr("Total Time: %d", _timer_total.milliseconds()); ++ tty->print_cr(""); + } +- if (_class_defs == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++} ++ ++void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { ++ typeArrayOop save; ++ ++ save = scratch_class->get_method_annotations_of(i); ++ scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); ++ scratch_class->set_method_annotations_of(j, save); ++ ++ save = scratch_class->get_method_parameter_annotations_of(i); ++ scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); ++ scratch_class->set_method_parameter_annotations_of(j, save); ++ ++ save = scratch_class->get_method_default_annotations_of(i); ++ scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); ++ scratch_class->set_method_default_annotations_of(j, save); ++} ++ ++void VM_RedefineClasses::add_affected_klasses( klassOop klass ) ++{ ++ assert(!_affected_klasses->contains(klass), "must not occur more than once!"); ++ assert(klass->klass_part()->new_version() == NULL, "Only last version is valid entry in system dictionary"); ++ ++ Klass* k = klass->klass_part(); ++ ++ if (k->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ _affected_klasses->append(klass); ++ return; + } +- for (int i = 0; i < _class_count; i++) { +- if (_class_defs[i].klass == NULL) { +- _res = JVMTI_ERROR_INVALID_CLASS; +- return false; +- } +- if (_class_defs[i].class_byte_count == 0) { +- _res = JVMTI_ERROR_INVALID_CLASS_FORMAT; +- return false; ++ ++ for (juint i = 0; i < k->super_depth(); i++) { ++ klassOop primary_oop = k->primary_super_of_depth(i); ++ // super_depth returns "8" for interfaces, but they don't have primaries other than Object. ++ if (primary_oop == NULL) break; ++ Klass* primary = Klass::cast(primary_oop); ++ if (primary->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ TRACE_RC3("Found affected class: %s", k->name()->as_C_string()); ++ k->set_redefinition_flag(Klass::MarkedAsAffected); ++ _affected_klasses->append(klass); ++ return; + } +- if (_class_defs[i].class_bytes == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++ } ++ ++ // Check secondary supers ++ int cnt = k->secondary_supers()->length(); ++ for (int i = 0; i < cnt; i++) { ++ Klass* secondary = Klass::cast((klassOop) k->secondary_supers()->obj_at(i)); ++ if (secondary->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ TRACE_RC3("Found affected class: %s", k->name()->as_C_string()); ++ k->set_redefinition_flag(Klass::MarkedAsAffected); ++ _affected_klasses->append(klass); ++ return; + } + } ++} + +- // Start timer after all the sanity checks; not quite accurate, but +- // better than adding a bunch of stop() calls. +- RC_TIMER_START(_timer_vm_op_prologue); + +- // We first load new class versions in the prologue, because somewhere down the +- // call chain it is required that the current thread is a Java thread. +- _res = load_new_class_versions(Thread::current()); +- if (_res != JVMTI_ERROR_NONE) { +- // Free os::malloc allocated memory in load_new_class_version. +- os::free(_scratch_classes); +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return false; ++// Searches for all affected classes and performs a sorting such that a supertype is always before a subtype. ++jvmtiError VM_RedefineClasses::find_sorted_affected_classes() { ++ ++ assert(_affected_klasses, ""); ++ for (int i = 0; i < _class_count; i++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); ++ instanceKlassHandle klass_handle(Thread::current(), java_lang_Class::as_klassOop(mirror)); ++ klass_handle->set_redefinition_flag(Klass::MarkedAsAffected); ++ assert(klass_handle->new_version() == NULL, "Must be new class"); + } + +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return true; ++ // Find classes not directly redefined, but affected by a redefinition (because one of its supertypes is redefined) ++ SystemDictionary::classes_do(VM_RedefineClasses::add_affected_klasses); ++ TRACE_RC1("%d classes affected", _affected_klasses->length()); ++ ++ // Sort the affected klasses such that a supertype is always on a smaller array index than its subtype. ++ jvmtiError result = do_topological_class_sorting(_class_defs, _class_count, Thread::current()); ++ IF_TRACE_RC2 { ++ TRACE_RC2("Redefine order: "); ++ for (int i = 0; i < _affected_klasses->length(); i++) { ++ TRACE_RC2("%s", _affected_klasses->at(i)->name()->as_C_string()); ++ } ++ } ++ ++ return result; + } + +-void VM_RedefineClasses::doit() { +- Thread *thread = Thread::current(); ++// Searches for the class bytes of the given class and returns them as a byte array. ++jvmtiError VM_RedefineClasses::find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed) { + +- if (UseSharedSpaces) { +- // Sharing is enabled so we remap the shared readonly space to +- // shared readwrite, private just in case we need to redefine +- // a shared class. We do the remap during the doit() phase of +- // the safepoint to be safer. +- if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { +- RC_TRACE_WITH_THREAD(0x00000001, thread, +- ("failed to remap shared readonly space to readwrite, private")); +- _res = JVMTI_ERROR_INTERNAL; +- return; ++ *not_changed = false; ++ ++ // Search for the index in the redefinition array that corresponds to the current class ++ int j; ++ for (j=0; j<_class_count; j++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[j].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ if (the_class_oop == the_class()) { ++ break; + } + } + +- for (int i = 0; i < _class_count; i++) { +- redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread); +- } +- // Disable any dependent concurrent compilations +- SystemDictionary::notice_modification(); ++ if (j == _class_count) { + +- // Set flag indicating that some invariants are no longer true. +- // See jvmtiExport.hpp for detailed explanation. +- JvmtiExport::set_has_redefined_a_class(); ++ *not_changed = true; + +-// check_class() is optionally called for product bits, but is +-// always called for non-product bits. +-#ifdef PRODUCT +- if (RC_TRACE_ENABLED(0x00004000)) { +-#endif +- RC_TRACE_WITH_THREAD(0x00004000, thread, ("calling check_class")); +- SystemDictionary::classes_do(check_class, thread); +-#ifdef PRODUCT ++ // Redefine with same bytecodes. This is a class that is only indirectly affected by redefinition, ++ // so the user did not specify a different bytecode for that class. ++ ++ if (the_class->get_cached_class_file_bytes() == NULL) { ++ // not cached, we need to reconstitute the class file from VM representation ++ constantPoolHandle constants(Thread::current(), the_class->constants()); ++ ObjectLocker ol(constants, Thread::current()); // lock constant pool while we query it ++ ++ JvmtiClassFileReconstituter reconstituter(the_class); ++ if (reconstituter.get_error() != JVMTI_ERROR_NONE) { ++ return reconstituter.get_error(); ++ } ++ ++ *class_byte_count = (jint)reconstituter.class_file_size(); ++ *class_bytes = (unsigned char*)reconstituter.class_file_bytes(); ++ ++ TRACE_RC3("Reconstituted class bytes"); ++ ++ } else { ++ ++ // it is cached, get it from the cache ++ *class_byte_count = the_class->get_cached_class_file_len(); ++ *class_bytes = the_class->get_cached_class_file_bytes(); ++ ++ ++ TRACE_RC3("Retrieved cached class bytes"); ++ } ++ ++ } else { ++ ++ // Redefine with bytecodes at index j ++ *class_bytes = _class_defs[j].class_bytes; ++ *class_byte_count = _class_defs[j].class_byte_count; + } +-#endif ++ ++ return JVMTI_ERROR_NONE; + } + +-void VM_RedefineClasses::doit_epilogue() { +- // Free os::malloc allocated memory. +- // The memory allocated in redefine will be free'ed in next VM operation. +- os::free(_scratch_classes); +- +- if (RC_TRACE_ENABLED(0x00000004)) { +- // Used to have separate timers for "doit" and "all", but the timer +- // overhead skewed the measurements. +- jlong doit_time = _timer_rsc_phase1.milliseconds() + +- _timer_rsc_phase2.milliseconds(); +- jlong all_time = _timer_vm_op_prologue.milliseconds() + doit_time; +- +- RC_TRACE(0x00000004, ("vm_op: all=" UINT64_FORMAT +- " prologue=" UINT64_FORMAT " doit=" UINT64_FORMAT, all_time, +- _timer_vm_op_prologue.milliseconds(), doit_time)); +- RC_TRACE(0x00000004, +- ("redefine_single_class: phase1=" UINT64_FORMAT " phase2=" UINT64_FORMAT, +- _timer_rsc_phase1.milliseconds(), _timer_rsc_phase2.milliseconds())); ++// Prologue of the VM operation, called on the Java thread in parallel to normal program execution ++bool VM_RedefineClasses::doit_prologue() { ++ ++ _revision_number++; ++ TRACE_RC1("Redefinition with revision number %d started!", _revision_number); ++ lock_threads(); ++ ++ assert(Thread::current()->is_Java_thread(), "must be Java thread"); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (!check_arguments()) { ++ RC_TIMER_STOP(_timer_prologue); ++ return false; + } +-} + +-bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { +- // classes for primitives cannot be redefined +- if (java_lang_Class::is_primitive(klass_mirror)) { ++ // We first load new class versions in the prologue, because somewhere down the ++ // call chain it is required that the current thread is a Java thread. ++ _new_classes = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(5, true); ++ ++ assert(_affected_klasses == NULL, ""); ++ _affected_klasses = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(_class_count, true); ++ ++ ++ _result = load_new_class_versions(Thread::current()); ++ ++ TRACE_RC1("Loaded new class versions!"); ++ if (_result != JVMTI_ERROR_NONE) { ++ TRACE_RC1("error occured: %d!", _result); ++ delete _new_classes; ++ _new_classes = NULL; ++ delete _affected_klasses; ++ _affected_klasses = NULL; ++ RC_TIMER_STOP(_timer_prologue); + return false; + } +- klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); +- // classes for arrays cannot be redefined +- if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ ++ TRACE_RC2("nearly finished"); ++ VM_GC_Operation::doit_prologue(); ++ RC_TIMER_STOP(_timer_prologue); ++ TRACE_RC2("doit_prologue finished!"); ++ return true; ++} ++ ++// Checks basic properties of the arguments of the redefinition command. ++jvmtiError VM_RedefineClasses::check_arguments_error() { ++ if (_class_defs == NULL) return JVMTI_ERROR_NULL_POINTER; ++ for (int i = 0; i < _class_count; i++) { ++ if (_class_defs[i].klass == NULL) return JVMTI_ERROR_INVALID_CLASS; ++ if (_class_defs[i].class_byte_count == 0) return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ if (_class_defs[i].class_bytes == NULL) return JVMTI_ERROR_NULL_POINTER; ++ } ++ return JVMTI_ERROR_NONE; ++} ++ ++// Returns false and sets an result error code if the redefinition should be aborted. ++bool VM_RedefineClasses::check_arguments() { ++ jvmtiError error = check_arguments_error(); ++ if (error != JVMTI_ERROR_NONE || _class_count == 0) { ++ _result = error; + return false; + } + return true; + } + +-// Append the current entry at scratch_i in scratch_cp to *merge_cp_p +-// where the end of *merge_cp_p is specified by *merge_cp_length_p. For +-// direct CP entries, there is just the current entry to append. For +-// indirect and double-indirect CP entries, there are zero or more +-// referenced CP entries along with the current entry to append. +-// Indirect and double-indirect CP entries are handled by recursive +-// calls to append_entry() as needed. The referenced CP entries are +-// always appended to *merge_cp_p before the referee CP entry. These +-// referenced CP entries may already exist in *merge_cp_p in which case +-// there is nothing extra to append and only the current entry is +-// appended. +-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, +- int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, +- TRAPS) { +- +- // append is different depending on entry tag type +- switch (scratch_cp->tag_at(scratch_i).value()) { +- +- // The old verifier is implemented outside the VM. It loads classes, +- // but does not resolve constant pool entries directly so we never +- // see Class entries here with the old verifier. Similarly the old +- // verifier does not like Class entries in the input constant pool. +- // The split-verifier is implemented in the VM so it can optionally +- // and directly resolve constant pool entries to load classes. The +- // split-verifier can accept either Class entries or UnresolvedClass +- // entries in the input constant pool. We revert the appended copy +- // back to UnresolvedClass so that either verifier will be happy +- // with the constant pool entry. +- case JVM_CONSTANT_Class: +- { +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- (*merge_cp_p)->unresolved_klass_at_put(*merge_cp_length_p, +- scratch_cp->klass_name_at(scratch_i)); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++jvmtiError VM_RedefineClasses::check_exception() const { ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ TRACE_RC1("parse_stream exception: '%s'", ex_name->as_C_string()); ++ if (TraceRedefineClasses >= 1) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->print_cr(""); ++ } ++ CLEAR_PENDING_EXCEPTION; ++ ++ if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { ++ return JVMTI_ERROR_UNSUPPORTED_VERSION; ++ } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { ++ return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { ++ // The message will be "XXX (wrong name: YYY)" ++ return JVMTI_ERROR_NAMES_DONT_MATCH; ++ } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { ++ return JVMTI_ERROR_OUT_OF_MEMORY; ++ } else { ++ // Just in case more exceptions can be thrown.. ++ return JVMTI_ERROR_FAILS_VERIFICATION; ++ } ++ } + +- // these are direct CP entries so they can be directly appended, +- // but double and long take two constant pool entries +- case JVM_CONSTANT_Double: // fall through +- case JVM_CONSTANT_Long: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); ++ return JVMTI_ERROR_NONE; ++} + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p) += 2; +- } break; +- +- // these are direct CP entries so they can be directly appended +- case JVM_CONSTANT_Float: // fall through +- case JVM_CONSTANT_Integer: // fall through +- case JVM_CONSTANT_Utf8: // fall through +- +- // This was an indirect CP entry, but it has been changed into +- // an interned string so this entry can be directly appended. +- case JVM_CONSTANT_String: // fall through +- +- // These were indirect CP entries, but they have been changed into +- // Symbol*s so these entries can be directly appended. +- case JVM_CONSTANT_UnresolvedClass: // fall through +- case JVM_CONSTANT_UnresolvedString: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); ++// Loads all new class versions and stores the instanceKlass handles in an array. ++jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ ResourceMark rm(THREAD); + +- // this is an indirect CP entry so it needs special handling +- case JVM_CONSTANT_NameAndType: +- { +- int name_ref_i = scratch_cp->name_ref_index_at(scratch_i); +- int new_name_ref_i = 0; +- bool match = (name_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_ref_i, *merge_cp_p, name_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_ref_i = found_i; +- map_index(scratch_cp, name_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_name_ref_i = *merge_cp_length_p - 1; ++ TRACE_RC1("==================================================================="); ++ TRACE_RC1("redefinition started by thread \"%s\"", THREAD->name()); ++ TRACE_RC1("load new class versions (%d)", _class_count); ++ ++ // Retrieve an array of all classes that need to be redefined ++ jvmtiError err = find_sorted_affected_classes(); ++ if (err != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error finding sorted affected classes: %d", (int)err); ++ return err; ++ } ++ ++ ++ JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); ++ ++ _max_redefinition_flags = Klass::NoRedefinition; ++ jvmtiError result = JVMTI_ERROR_NONE; ++ ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ TRACE_RC2("Processing affected class %d of %d", i+1, _affected_klasses->length()); ++ ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ TRACE_RC2("name=%s", the_class->name()->as_C_string()); ++ ++ the_class->link_class(THREAD); ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ // Find new class bytes ++ const unsigned char* class_bytes; ++ jint class_byte_count; ++ jvmtiError error; ++ jboolean not_changed; ++ if ((error = find_class_bytes(the_class, &class_bytes, &class_byte_count, ¬_changed)) != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error finding class bytes: %d", (int)error); ++ result = error; ++ break; ++ } ++ assert(class_bytes != NULL && class_byte_count != 0, "Class bytes defined at this point!"); ++ ++ ++ // Set redefined class handle in JvmtiThreadState class. ++ // This redefined class is sent to agent event handler for class file ++ // load hook event. ++ state->set_class_being_redefined(&the_class, _class_load_kind); ++ ++ TRACE_RC2("Before resolving from stream"); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_loading); ++ ++ ++ // Parse the stream. ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ClassFileStream st((u1*) class_bytes, class_byte_count, (char *)"__VM_RedefineClasses__"); ++ instanceKlassHandle new_class(THREAD, SystemDictionary::resolve_from_stream(the_class->name(), ++ the_class_loader, ++ protection_domain, ++ &st, ++ true, ++ the_class, ++ THREAD)); ++ ++ RC_TIMER_STOP(_timer_class_loading); ++ RC_TIMER_START(_timer_prologue); ++ ++ TRACE_RC2("After resolving class from stream!"); ++ // Clear class_being_redefined just to be sure. ++ state->clear_class_being_redefined(); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ not_changed = false; ++ ++#ifdef ASSERT ++ ++ assert(new_class() != NULL, "Class could not be loaded!"); ++ assert(new_class() != the_class(), "must be different"); ++ assert(new_class->new_version() == NULL && new_class->old_version() != NULL, ""); ++ ++ ++ objArrayOop k_interfaces = new_class->local_interfaces(); ++ for (int j=0; j<k_interfaces->length(); j++) { ++ assert(((klassOop)k_interfaces->obj_at(j))->klass_part()->is_newest_version(), "just checking"); ++ } ++ ++ if (!THREAD->is_Compiler_thread()) { ++ ++ TRACE_RC2("name=%s loader="INTPTR_FORMAT" protection_domain="INTPTR_FORMAT, the_class->name()->as_C_string(), the_class->class_loader(), the_class->protection_domain()); ++ // If we are on the compiler thread, we must not try to resolve a class. ++ klassOop systemLookup = SystemDictionary::resolve_or_null(the_class->name(), the_class->class_loader(), the_class->protection_domain(), THREAD); ++ ++ if (systemLookup != NULL) { ++ assert(systemLookup == new_class->old_version(), "Old class must be in system dictionary!"); ++ ++ ++ Klass *subklass = new_class()->klass_part()->subklass(); ++ while (subklass != NULL) { ++ assert(subklass->new_version() == NULL, "Most recent version of class!"); ++ subklass = subklass->next_sibling(); + } ++ } else { ++ // This can happen for reflection generated classes.. ? ++ CLEAR_PENDING_EXCEPTION; + } ++ } + +- int signature_ref_i = scratch_cp->signature_ref_index_at(scratch_i); +- int new_signature_ref_i = 0; +- match = (signature_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(signature_ref_i, *merge_cp_p, +- signature_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(signature_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != signature_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_signature_ref_i = found_i; +- map_index(scratch_cp, signature_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, signature_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_signature_ref_i = *merge_cp_length_p - 1; +- } ++#endif ++ ++ IF_TRACE_RC1 { ++ if (new_class->layout_helper() != the_class->layout_helper()) { ++ TRACE_RC1("Instance size change for class %s: new=%d old=%d", new_class->name()->as_C_string(), new_class->layout_helper(), the_class->layout_helper()); + } ++ } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_name_ref_i and new_signature_ref_i will both be 0. +- // In that case, all we are appending is the current entry. +- if (new_name_ref_i == 0) { +- new_name_ref_i = name_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d name_ref_index change: %d to %d", +- *merge_cp_length_p, name_ref_i, new_name_ref_i)); ++ // Set the new version of the class ++ new_class->set_revision_number(_revision_number); ++ new_class->set_redefinition_index(i); ++ the_class->set_new_version(new_class()); ++ _new_classes->append(new_class); ++ ++ assert(new_class->new_version() == NULL, ""); ++ ++ int redefinition_flags = Klass::NoRedefinition; ++ ++ if (not_changed) { ++ redefinition_flags = Klass::NoRedefinition; ++ } else if (AllowAdvancedClassRedefinition) { ++ redefinition_flags = calculate_redefinition_flags(new_class); ++ if (redefinition_flags >= Klass::RemoveSuperType) { ++ TRACE_RC1("Remove super type is not allowed"); ++ result = JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ break; + } +- if (new_signature_ref_i == 0) { +- new_signature_ref_i = signature_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d signature_ref_index change: %d to %d", +- *merge_cp_length_p, signature_ref_i, new_signature_ref_i)); ++ } else { ++ jvmtiError allowed = check_redefinition_allowed(new_class); ++ if (allowed != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error redefinition not allowed!"); ++ result = allowed; ++ break; + } ++ redefinition_flags = Klass::ModifyClass; ++ } + +- (*merge_cp_p)->name_and_type_at_put(*merge_cp_length_p, +- new_name_ref_i, new_signature_ref_i); +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); ++ if (new_class->super() != NULL) { ++ redefinition_flags = redefinition_flags | new_class->super()->klass_part()->redefinition_flags(); ++ } ++ ++ for (int j=0; j<new_class->local_interfaces()->length(); j++) { ++ redefinition_flags = redefinition_flags | ((klassOop)new_class->local_interfaces()->obj_at(j))->klass_part()->redefinition_flags(); ++ } ++ ++ new_class->set_redefinition_flags(redefinition_flags); ++ ++ _max_redefinition_flags = _max_redefinition_flags | redefinition_flags; ++ ++ if ((redefinition_flags & Klass::ModifyInstances) != 0) { ++ // TODO: Check if watch access flags of static fields are updated correctly. ++ calculate_instance_update_information(_new_classes->at(i)()); ++ } else { ++ // Fields were not changed, transfer special flags only ++ assert(new_class->layout_helper() >> 1 == new_class->old_version()->klass_part()->layout_helper() >> 1, "must be equal"); ++ assert(new_class->fields()->length() == ((instanceKlass*)new_class->old_version()->klass_part())->fields()->length(), "must be equal"); ++ ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ AccessFlags flags = new_fs.access_flags(); ++ flags.set_is_field_modification_watched(old_fs.access_flags().is_field_modification_watched()); ++ flags.set_is_field_access_watched(old_fs.access_flags().is_field_access_watched()); ++ new_fs.set_access_flags(flags); + } +- (*merge_cp_length_p)++; +- } break; ++ } + +- // this is a double-indirect CP entry so it needs special handling +- case JVM_CONSTANT_Fieldref: // fall through +- case JVM_CONSTANT_InterfaceMethodref: // fall through +- case JVM_CONSTANT_Methodref: +- { +- int klass_ref_i = scratch_cp->uncached_klass_ref_index_at(scratch_i); +- int new_klass_ref_i = 0; +- bool match = (klass_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(klass_ref_i, *merge_cp_p, klass_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(klass_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != klass_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_klass_ref_i = found_i; +- map_index(scratch_cp, klass_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, klass_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. Without the optimization where we +- // use JVM_CONSTANT_UnresolvedClass, then up to two entries +- // could be appended. +- new_klass_ref_i = *merge_cp_length_p - 1; +- } ++ IF_TRACE_RC3 { ++ if (new_class->super() != NULL) { ++ TRACE_RC3("Super class is %s", new_class->super()->klass_part()->name()->as_C_string()); + } ++ } + +- int name_and_type_ref_i = +- scratch_cp->uncached_name_and_type_ref_index_at(scratch_i); +- int new_name_and_type_ref_i = 0; +- match = (name_and_type_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_and_type_ref_i, *merge_cp_p, +- name_and_type_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_and_type_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_and_type_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_and_type_ref_i = found_i; +- map_index(scratch_cp, name_and_type_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_and_type_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can append more than +- // one entry so the post call query of *merge_cp_length_p +- // is required in order to get the right index for the +- // JVM_CONSTANT_NameAndType entry. +- new_name_and_type_ref_i = *merge_cp_length_p - 1; ++#ifdef ASSERT ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->new_version() == NULL, "Super klass must be newest version!"); ++ ++ the_class->vtable()->verify(tty); ++ new_class->vtable()->verify(tty); ++#endif ++ ++ TRACE_RC2("Verification done!"); ++ ++ if (i == _affected_klasses->length() - 1) { ++ ++ // This was the last class processed => check if additional classes have been loaded in the meantime ++ ++ for (int j=0; j<_affected_klasses->length(); j++) { ++ ++ klassOop initial_klass = _affected_klasses->at(j)(); ++ Klass *initial_subklass = initial_klass->klass_part()->subklass(); ++ Klass *cur_klass = initial_subklass; ++ while(cur_klass != NULL) { ++ ++ if(cur_klass->oop_is_instance() && cur_klass->is_newest_version() && !cur_klass->is_redefining()) { ++ instanceKlassHandle handle(THREAD, cur_klass->as_klassOop()); ++ if (!_affected_klasses->contains(handle)) { ++ ++ int k = i + 1; ++ for (; k<_affected_klasses->length(); k++) { ++ if (_affected_klasses->at(k)->is_subtype_of(cur_klass->as_klassOop())) { ++ break; ++ } ++ } ++ _affected_klasses->insert_before(k, handle); ++ TRACE_RC2("Adding newly loaded class to affected classes: %s", cur_klass->name()->as_C_string()); ++ } ++ } ++ ++ cur_klass = cur_klass->next_sibling(); + } + } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_klass_ref_i and new_name_and_type_ref_i will both be +- // 0. In that case, all we are appending is the current entry. +- if (new_klass_ref_i == 0) { +- new_klass_ref_i = klass_ref_i; +- } +- if (new_name_and_type_ref_i == 0) { +- new_name_and_type_ref_i = name_and_type_ref_i; +- } ++ int new_count = _affected_klasses->length() - 1 - i; ++ if (new_count != 0) { + +- const char *entry_name; +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Fieldref: +- entry_name = "Fieldref"; +- (*merge_cp_p)->field_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- entry_name = "IFMethodref"; +- (*merge_cp_p)->interface_method_at_put(*merge_cp_length_p, +- new_klass_ref_i, new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_Methodref: +- entry_name = "Methodref"; +- (*merge_cp_p)->method_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- default: +- guarantee(false, "bad switch"); +- break; ++ TRACE_RC1("Found new number of affected classes: %d", new_count); + } ++ } ++ } + +- if (klass_ref_i != new_klass_ref_i) { +- RC_TRACE(0x00080000, ("%s entry@%d class_index changed: %d to %d", +- entry_name, *merge_cp_length_p, klass_ref_i, new_klass_ref_i)); +- } +- if (name_and_type_ref_i != new_name_and_type_ref_i) { +- RC_TRACE(0x00080000, +- ("%s entry@%d name_and_type_index changed: %d to %d", +- entry_name, *merge_cp_length_p, name_and_type_ref_i, +- new_name_and_type_ref_i)); +- } ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_linking); ++ // Link and verify new classes _after_ all classes have been updated in the system dictionary! ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ instanceKlassHandle new_class(the_class->new_version()); + +- // At this stage, Class or UnresolvedClass could be here, but not +- // ClassIndex +- case JVM_CONSTANT_ClassIndex: // fall through ++ TRACE_RC2("Linking class %d/%d %s", i, _affected_klasses->length(), the_class->name()->as_C_string()); ++ new_class->link_class(THREAD); + +- // Invalid is used as the tag for the second constant pool entry +- // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should +- // not be seen by itself. +- case JVM_CONSTANT_Invalid: // fall through ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ } ++ RC_TIMER_STOP(_timer_class_linking); ++ RC_TIMER_START(_timer_prologue); + +- // At this stage, String or UnresolvedString could be here, but not +- // StringIndex +- case JVM_CONSTANT_StringIndex: // fall through ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- // At this stage JVM_CONSTANT_UnresolvedClassInError should not be +- // here +- case JVM_CONSTANT_UnresolvedClassInError: // fall through ++ TRACE_RC2("All classes loaded!"); + +- default: +- { +- // leave a breadcrumb +- jbyte bad_value = scratch_cp->tag_at(scratch_i).value(); +- ShouldNotReachHere(); +- } break; +- } // end switch tag value +-} // end append_entry() ++#ifdef ASSERT ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ assert(the_class->new_version() != NULL, "Must have been redefined"); ++ instanceKlassHandle new_version = instanceKlassHandle(THREAD, the_class->new_version()); ++ assert(new_version->new_version() == NULL, "Must be newest version"); + ++ if (!(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL)) { ++ new_version()->print(); ++ new_version->super()->print(); ++ } ++ assert(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL, "Super class must be newest version"); ++ } + +-void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { +- typeArrayOop save; ++ SystemDictionary::classes_do(check_class, THREAD); + +- save = scratch_class->get_method_annotations_of(i); +- scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); +- scratch_class->set_method_annotations_of(j, save); ++#endif + +- save = scratch_class->get_method_parameter_annotations_of(i); +- scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); +- scratch_class->set_method_parameter_annotations_of(j, save); ++ TRACE_RC1("Finished verification!"); ++ return JVMTI_ERROR_NONE; ++} + +- save = scratch_class->get_method_default_annotations_of(i); +- scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); +- scratch_class->set_method_default_annotations_of(j, save); ++void VM_RedefineClasses::lock_threads() { ++ ++ RC_TIMER_START(_timer_wait_for_locks); ++ ++ ++ JavaThread *javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->set_should_bailout(true); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ int cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->compilation_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Locked %d compiler threads", cnt); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ javaThread->redefine_classes_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ ++ TRACE_RC2("Locked %d threads", cnt); ++ ++ RC_TIMER_STOP(_timer_wait_for_locks); ++} ++ ++void VM_RedefineClasses::unlock_threads() { ++ ++ int cnt = 0; ++ JavaThread *javaThread = Threads::first(); ++ Thread *thread = Thread::current(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ if (compilerThread->compilation_mutex()->owned_by_self()) { ++ compilerThread->compilation_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Unlocked %d compiler threads", cnt); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ if (javaThread->redefine_classes_mutex()->owned_by_self()) { ++ javaThread->redefine_classes_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Unlocked %d threads", cnt); + } + ++jvmtiError VM_RedefineClasses::check_redefinition_allowed(instanceKlassHandle scratch_class) { ++ ++ ++ ++ // Compatibility mode => check for unsupported modification ++ ++ ++ assert(scratch_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(scratch_class->old_version()); + +-jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( +- instanceKlassHandle the_class, +- instanceKlassHandle scratch_class) { + int i; + + // Check superclasses, or rather their names, since superclasses themselves can be + // requested to replace. + // Check for NULL superclass first since this might be java.lang.Object + if (the_class->super() != scratch_class->super() && +- (the_class->super() == NULL || scratch_class->super() == NULL || +- Klass::cast(the_class->super())->name() != +- Klass::cast(scratch_class->super())->name())) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ (the_class->super() == NULL || scratch_class->super() == NULL || ++ Klass::cast(the_class->super())->name() != ++ Klass::cast(scratch_class->super())->name())) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + + // Check if the number, names and order of directly implemented interfaces are the same. +@@ -539,8 +718,8 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + } + for (i = 0; i < n_intfs; i++) { + if (Klass::cast((klassOop) k_interfaces->obj_at(i))->name() != +- Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + } + +@@ -578,14 +757,283 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + Symbol* name_sym2 = scratch_class->constants()->symbol_at(new_fs.name_index()); + Symbol* sig_sym2 = scratch_class->constants()->symbol_at(new_fs.signature_index()); + if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ } ++ } ++ ++ // If both streams aren't done then we have a differing number of ++ // fields. ++ if (!old_fs.done() || !new_fs.done()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ } ++ ++ // Do a parallel walk through the old and new methods. Detect ++ // cases where they match (exist in both), have been added in ++ // the new methods, or have been deleted (exist only in the ++ // old methods). The class file parser places methods in order ++ // by method name, but does not order overloaded methods by ++ // signature. In order to determine what fate befell the methods, ++ // this code places the overloaded new methods that have matching ++ // old methods in the same order as the old methods and places ++ // new overloaded methods at the end of overloaded methods of ++ // that name. The code for this order normalization is adapted ++ // from the algorithm used in instanceKlass::find_method(). ++ // Since we are swapping out of order entries as we find them, ++ // we only have to search forward through the overloaded methods. ++ // Methods which are added and have the same name as an existing ++ // method (but different signature) will be put at the end of ++ // the methods with that name, and the name mismatch code will ++ // handle them. ++ objArrayHandle k_old_methods(the_class->methods()); ++ objArrayHandle k_new_methods(scratch_class->methods()); ++ int n_old_methods = k_old_methods->length(); ++ int n_new_methods = k_new_methods->length(); ++ ++ int ni = 0; ++ int oi = 0; ++ while (true) { ++ methodOop k_old_method; ++ methodOop k_new_method; ++ enum { matched, added, deleted, undetermined } method_was = undetermined; ++ ++ if (oi >= n_old_methods) { ++ if (ni >= n_new_methods) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ method_was = added; ++ } else if (ni >= n_new_methods) { ++ // Old method, at the end, is deleted ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ method_was = deleted; ++ } else { ++ // There are more methods in both the old and new lists ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ if (k_old_method->name() != k_new_method->name()) { ++ // Methods are sorted by method name, so a mismatch means added ++ // or deleted ++ if (k_old_method->name()->fast_compare(k_new_method->name()) > 0) { ++ method_was = added; ++ } else { ++ method_was = deleted; ++ } ++ } else if (k_old_method->signature() == k_new_method->signature()) { ++ // Both the name and signature match ++ method_was = matched; ++ } else { ++ // The name matches, but the signature doesn't, which means we have to ++ // search forward through the new overloaded methods. ++ int nj; // outside the loop for post-loop check ++ for (nj = ni + 1; nj < n_new_methods; nj++) { ++ methodOop m = (methodOop)k_new_methods->obj_at(nj); ++ if (k_old_method->name() != m->name()) { ++ // reached another method name so no more overloaded methods ++ method_was = deleted; ++ break; ++ } ++ if (k_old_method->signature() == m->signature()) { ++ // found a match so swap the methods ++ k_new_methods->obj_at_put(ni, m); ++ k_new_methods->obj_at_put(nj, k_new_method); ++ k_new_method = m; ++ method_was = matched; ++ break; ++ } ++ } ++ ++ if (nj >= n_new_methods) { ++ // reached the end without a match; so method was deleted ++ method_was = deleted; ++ } ++ } ++ } ++ ++ switch (method_was) { ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED; ++ } ++ { ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = scratch_class->method_with_idnum(old_num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(old_num); ++ } ++ } ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ } ++ { ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ } ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = scratch_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ } ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; ++ } ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++int VM_RedefineClasses::calculate_redefinition_flags(instanceKlassHandle new_class) { ++ ++ int result = Klass::NoRedefinition; ++ ++ ++ ++ TRACE_RC2("Comparing different class versions of class %s", new_class->name()->as_C_string()); ++ ++ assert(new_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(new_class->old_version()); ++ ++ // Check whether class is in the error init state. ++ if (the_class->is_in_error_state()) { ++ // TBD #5057930: special error code is needed in 1.6 ++ //result = Klass::union_redefinition_level(result, Klass::Invalid); ++ } ++ ++ int i; ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check superclasses ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->is_newest_version(), ""); ++ if (the_class->super() != new_class->super()) { ++ // Super class changed ++ ++ klassOop cur_klass = the_class->super(); ++ while (cur_klass != NULL) { ++ if (!new_class->is_subclass_of(cur_klass->klass_part()->newest_version())) { ++ TRACE_RC2("Removed super class %s", cur_klass->klass_part()->name()->as_C_string()); ++ result = result | Klass::RemoveSuperType | Klass::ModifyInstances | Klass::ModifyClass; ++ ++ if (!cur_klass->klass_part()->has_subtype_changed()) { ++ TRACE_RC2("Subtype changed of class %s", cur_klass->klass_part()->name()->as_C_string()); ++ cur_klass->klass_part()->set_subtype_changed(true); ++ } ++ } ++ ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ ++ cur_klass = new_class->super(); ++ while (cur_klass != NULL) { ++ if (!the_class->is_subclass_of(cur_klass->klass_part()->old_version())) { ++ TRACE_RC2("Added super class %s", cur_klass->klass_part()->name()->as_C_string()); ++ result = result | Klass::ModifyClass | Klass::ModifyInstances; ++ } ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check interfaces ++ ++ // Interfaces removed? ++ objArrayOop old_interfaces = the_class->transitive_interfaces(); ++ for (i = 0; i<old_interfaces->length(); i++) { ++ instanceKlassHandle old_interface((klassOop)old_interfaces->obj_at(i)); ++ if (!new_class->implements_interface_any_version(old_interface())) { ++ result = result | Klass::RemoveSuperType | Klass::ModifyClass; ++ TRACE_RC2("Removed interface %s", old_interface->name()->as_C_string()); ++ ++ if (!old_interface->has_subtype_changed()) { ++ TRACE_RC2("Subtype changed of interface %s", old_interface->name()->as_C_string()); ++ old_interface->set_subtype_changed(true); ++ } ++ } ++ } ++ ++ // Interfaces added? ++ objArrayOop new_interfaces = new_class->transitive_interfaces(); ++ for (i = 0; i<new_interfaces->length(); i++) { ++ if (!the_class->implements_interface_any_version((klassOop)new_interfaces->obj_at(i))) { ++ result = result | Klass::ModifyClass; ++ TRACE_RC2("Added interface %s", ((klassOop)new_interfaces->obj_at(i))->klass_part()->name()->as_C_string()); ++ } ++ } ++ ++ ++ // Check whether class modifiers are the same. ++ jushort old_flags = (jushort) the_class->access_flags().get_flags(); ++ jushort new_flags = (jushort) new_class->access_flags().get_flags(); ++ if (old_flags != new_flags) { ++ // TODO (tw): Can this have any effects? ++ } ++ ++ // Check if the number, names, types and order of fields declared in these classes ++ // are the same. ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ // access ++ old_flags = old_fs.access_flags().as_short(); ++ new_flags = new_fs.access_flags().as_short(); ++ if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) { ++ // TODO (tw) can this have any effect? ++ } ++ // offset ++ if (old_fs.offset() != new_fs.offset()) { ++ result = result | Klass::ModifyInstances; ++ } ++ // name and signature ++ Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index()); ++ Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index()); ++ Symbol* name_sym2 = new_class->constants()->symbol_at(new_fs.name_index()); ++ Symbol* sig_sym2 = new_class->constants()->symbol_at(new_fs.signature_index()); ++ if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { ++ result = result | Klass::ModifyInstances; + } + } + + // If both streams aren't done then we have a differing number of + // fields. + if (!old_fs.done() || !new_fs.done()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ result = result | Klass::ModifyInstances; + } + + // Do a parallel walk through the old and new methods. Detect +@@ -606,7 +1054,7 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + // the methods with that name, and the name mismatch code will + // handle them. + objArrayHandle k_old_methods(the_class->methods()); +- objArrayHandle k_new_methods(scratch_class->methods()); ++ objArrayHandle k_new_methods(new_class->methods()); + int n_old_methods = k_old_methods->length(); + int n_new_methods = k_new_methods->length(); + +@@ -672,2278 +1120,701 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + } + + switch (method_was) { +- case matched: +- // methods match, be sure modifiers do too +- old_flags = (jushort) k_old_method->access_flags().get_flags(); +- new_flags = (jushort) k_new_method->access_flags().get_flags(); +- if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED; +- } +- { +- u2 new_num = k_new_method->method_idnum(); +- u2 old_num = k_old_method->method_idnum(); +- if (new_num != old_num) { +- methodOop idnum_owner = scratch_class->method_with_idnum(old_num); +- if (idnum_owner != NULL) { +- // There is already a method assigned this idnum -- switch them +- idnum_owner->set_method_idnum(new_num); +- } +- k_new_method->set_method_idnum(old_num); +- swap_all_method_annotations(old_num, new_num, scratch_class); +- } +- } +- RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni, +- k_old_method->name_and_sig_as_C_string(), oi)); +- // advance to next pair of methods +- ++oi; +- ++ni; +- break; +- case added: +- // method added, see if it is OK +- new_flags = (jushort) k_new_method->access_flags().get_flags(); +- if ((new_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // new methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; +- } +- { +- u2 num = the_class->next_method_idnum(); +- if (num == constMethodOopDesc::UNSET_IDNUM) { +- // cannot add any more methods +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; +- } +- u2 new_num = k_new_method->method_idnum(); +- methodOop idnum_owner = scratch_class->method_with_idnum(num); ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ // (tw) Can this have any effects? Probably yes on vtables? ++ result = result | Klass::ModifyClass; ++ } ++ { ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = new_class->method_with_idnum(old_num); + if (idnum_owner != NULL) { + // There is already a method assigned this idnum -- switch them + idnum_owner->set_method_idnum(new_num); + } +- k_new_method->set_method_idnum(num); +- swap_all_method_annotations(new_num, num, scratch_class); +- } +- RC_TRACE(0x00008000, ("Method added: new: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni)); +- ++ni; // advance to next new method +- break; +- case deleted: +- // method deleted, see if it is OK +- old_flags = (jushort) k_old_method->access_flags().get_flags(); +- if ((old_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // deleted methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; +- } +- RC_TRACE(0x00008000, ("Method deleted: old: %s [%d]", +- k_old_method->name_and_sig_as_C_string(), oi)); +- ++oi; // advance to next old method +- break; +- default: +- ShouldNotReachHere(); +- } +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Find new constant pool index value for old constant pool index value +-// by seaching the index map. Returns zero (0) if there is no mapped +-// value for the old constant pool index. +-int VM_RedefineClasses::find_new_index(int old_index) { +- if (_index_map_count == 0) { +- // map is empty so nothing can be found +- return 0; +- } +- +- if (old_index < 1 || old_index >= _index_map_p->length()) { +- // The old_index is out of range so it is not mapped. This should +- // not happen in regular constant pool merging use, but it can +- // happen if a corrupt annotation is processed. +- return 0; +- } +- +- int value = _index_map_p->at(old_index); +- if (value == -1) { +- // the old_index is not mapped +- return 0; +- } +- +- return value; +-} // end find_new_index() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// class pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_Class && t2 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->klass_name_at(index1)->as_C_string(); +- char *s2 = cp2->klass_name_at(index2)->as_C_string(); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_class_mismatch() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// string pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_string_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_String && t1 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_String && t2 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->string_at_noresolve(index1); +- char *s2 = cp2->string_at_noresolve(index2); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_string_mismatch() +- +- +-jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { +- // For consistency allocate memory using os::malloc wrapper. +- _scratch_classes = (instanceKlassHandle *) +- os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal); +- if (_scratch_classes == NULL) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } +- +- ResourceMark rm(THREAD); +- +- JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); +- // state can only be NULL if the current thread is exiting which +- // should not happen since we're trying to do a RedefineClasses +- guarantee(state != NULL, "exiting thread calling load_new_class_versions"); +- for (int i = 0; i < _class_count; i++) { +- oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); +- // classes for primitives cannot be redefined +- if (!is_modifiable_class(mirror)) { +- return JVMTI_ERROR_UNMODIFIABLE_CLASS; +- } +- klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- Symbol* the_class_sym = the_class->name(); +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loading name=%s kind=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), _class_load_kind, +- os::available_memory() >> 10)); +- +- ClassFileStream st((u1*) _class_defs[i].class_bytes, +- _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); +- +- // Parse the stream. +- Handle the_class_loader(THREAD, the_class->class_loader()); +- Handle protection_domain(THREAD, the_class->protection_domain()); +- // Set redefined class handle in JvmtiThreadState class. +- // This redefined class is sent to agent event handler for class file +- // load hook event. +- state->set_class_being_redefined(&the_class, _class_load_kind); +- +- klassOop k = SystemDictionary::parse_stream(the_class_sym, +- the_class_loader, +- protection_domain, +- &st, +- THREAD); +- // Clear class_being_redefined just to be sure. +- state->clear_class_being_redefined(); +- +- // TODO: if this is retransform, and nothing changed we can skip it +- +- instanceKlassHandle scratch_class (THREAD, k); +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("parse_stream exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- +- if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { +- return JVMTI_ERROR_UNSUPPORTED_VERSION; +- } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { +- return JVMTI_ERROR_INVALID_CLASS_FORMAT; +- } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { +- return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; +- } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { +- // The message will be "XXX (wrong name: YYY)" +- return JVMTI_ERROR_NAMES_DONT_MATCH; +- } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { // Just in case more exceptions can be thrown.. +- return JVMTI_ERROR_FAILS_VERIFICATION; +- } +- } +- +- // Ensure class is linked before redefine +- if (!the_class->is_linked()) { +- the_class->link_class(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("link_class exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } ++ k_new_method->set_method_idnum(old_num); ++ TRACE_RC2("swapping idnum of new and old method %d / %d!", new_num, old_num); ++ swap_all_method_annotations(old_num, new_num, new_class); + } + } +- +- // Do the validity checks in compare_and_normalize_class_versions() +- // before verifying the byte codes. By doing these checks first, we +- // limit the number of functions that require redirection from +- // the_class to scratch_class. In particular, we don't have to +- // modify JNI GetSuperclass() and thus won't change its performance. +- jvmtiError res = compare_and_normalize_class_versions(the_class, +- scratch_class); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ TRACE_RC3("Method matched: new: %s [%d] == old: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni, ++ k_old_method->name_and_sig_as_C_string(), oi); ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ result = result | Klass::ModifyClass; + } +- +- // verify what the caller passed us + { +- // The bug 6214132 caused the verification to fail. +- // Information about the_class and scratch_class is temporarily +- // recorded into jvmtiThreadState. This data is used to redirect +- // the_class to scratch_class in the JVM_* functions called by the +- // verifier. Please, refer to jvmtiThreadState.hpp for the detailed +- // description. +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify( +- scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes exception: '%s'", ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller the bytecodes are bad +- return JVMTI_ERROR_FAILS_VERIFICATION; +- } +- } +- +- res = merge_cp_and_rewrite(the_class, scratch_class, THREAD); +- if (res != JVMTI_ERROR_NONE) { +- return res; +- } +- +- if (VerifyMergedCPBytecodes) { +- // verify what we have done during constant pool merging +- { +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes post merge-CP exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller that constant pool merging screwed up +- return JVMTI_ERROR_INTERNAL; +- } +- } +- } +- +- Rewriter::rewrite(scratch_class, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- Rewriter::relocate_and_link(scratch_class, THREAD); +- } +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } +- } +- +- _scratch_classes[i] = scratch_class; +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), os::available_memory() >> 10)); +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Map old_index to new_index as needed. scratch_cp is only needed +-// for RC_TRACE() calls. +-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, +- int old_index, int new_index) { +- if (find_new_index(old_index) != 0) { +- // old_index is already mapped +- return; +- } +- +- if (old_index == new_index) { +- // no mapping is needed +- return; +- } +- +- _index_map_p->at_put(old_index, new_index); +- _index_map_count++; +- +- RC_TRACE(0x00040000, ("mapped tag %d at index %d to %d", +- scratch_cp->tag_at(old_index).value(), old_index, new_index)); +-} // end map_index() +- +- +-// Merge old_cp and scratch_cp and return the results of the merge via +-// merge_cp_p. The number of entries in *merge_cp_p is returned via +-// merge_cp_length_p. The entries in old_cp occupy the same locations +-// in *merge_cp_p. Also creates a map of indices from entries in +-// scratch_cp to the corresponding entry in *merge_cp_p. Index map +-// entries are only created for entries in scratch_cp that occupy a +-// different location in *merged_cp_p. +-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS) { +- +- if (merge_cp_p == NULL) { +- assert(false, "caller must provide scatch constantPool"); +- return false; // robustness +- } +- if (merge_cp_length_p == NULL) { +- assert(false, "caller must provide scatch CP length"); +- return false; // robustness +- } +- // Worst case we need old_cp->length() + scratch_cp()->length(), +- // but the caller might be smart so make sure we have at least +- // the minimum. +- if ((*merge_cp_p)->length() < old_cp->length()) { +- assert(false, "merge area too small"); +- return false; // robustness +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("old_cp_len=%d, scratch_cp_len=%d", old_cp->length(), +- scratch_cp->length())); +- +- { +- // Pass 0: +- // The old_cp is copied to *merge_cp_p; this means that any code +- // using old_cp does not have to change. This work looks like a +- // perfect fit for constantPoolOop::copy_cp_to(), but we need to +- // handle one special case: +- // - revert JVM_CONSTANT_Class to JVM_CONSTANT_UnresolvedClass +- // This will make verification happy. +- +- int old_i; // index into old_cp +- +- // index zero (0) is not used in constantPools +- for (old_i = 1; old_i < old_cp->length(); old_i++) { +- // leave debugging crumb +- jbyte old_tag = old_cp->tag_at(old_i).value(); +- switch (old_tag) { +- case JVM_CONSTANT_Class: +- case JVM_CONSTANT_UnresolvedClass: +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- // May be resolving while calling this so do the same for +- // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) +- (*merge_cp_p)->unresolved_klass_at_put(old_i, +- old_cp->klass_name_at(old_i)); +- break; +- +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // just copy the entry to *merge_cp_p, but double and long take +- // two constant pool entries +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- old_i++; +- break; +- +- default: +- // just copy the entry to *merge_cp_p +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- break; +- } +- } // end for each old_cp entry +- +- // We don't need to sanity check that *merge_cp_length_p is within +- // *merge_cp_p bounds since we have the minimum on-entry check above. +- (*merge_cp_length_p) = old_i; +- } +- +- // merge_cp_len should be the same as old_cp->length() at this point +- // so this trace message is really a "warm-and-breathing" message. +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 0: merge_cp_len=%d", *merge_cp_length_p)); +- +- int scratch_i; // index into scratch_cp +- { +- // Pass 1a: +- // Compare scratch_cp entries to the old_cp entries that we have +- // already copied to *merge_cp_p. In this pass, we are eliminating +- // exact duplicates (matching entry at same index) so we only +- // compare entries in the common indice range. +- int increment = 1; +- int pass1a_length = MIN2(old_cp->length(), scratch_cp->length()); +- for (scratch_i = 1; scratch_i < pass1a_length; scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- bool match = scratch_cp->compare_entry_to(scratch_i, *merge_cp_p, +- scratch_i, CHECK_0); +- if (match) { +- // found a match at the same index so nothing more to do +- continue; +- } else if (is_unresolved_class_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved class entry at the same index +- // with the same string value. Since Pass 0 reverted any +- // class entries to unresolved class entries in *merge_cp_p, +- // we go with the unresolved class entry. +- continue; +- } else if (is_unresolved_string_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved string entry at the same index +- // with the same string value. We can live with whichever +- // happens to be at scratch_i in *merge_cp_p. +- continue; +- } +- +- int found_i = scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, +- CHECK_0); +- if (found_i != 0) { +- guarantee(found_i != scratch_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // The find_matching_entry() call above could fail to find a match +- // due to a resolved versus unresolved class or string entry situation +- // like we solved above with the is_unresolved_*_mismatch() calls. +- // However, we would have to call is_unresolved_*_mismatch() over +- // all of *merge_cp_p (potentially) and that doesn't seem to be +- // worth the time. +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1a: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- +- if (scratch_i < scratch_cp->length()) { +- // Pass 1b: +- // old_cp is smaller than scratch_cp so there are entries in +- // scratch_cp that we have not yet processed. We take care of +- // those now. +- int increment = 1; +- for (; scratch_i < scratch_cp->length(); scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- int found_i = +- scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, CHECK_0); +- if (found_i != 0) { +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- } +- +- return true; +-} // end merge_constant_pools() +- +- +-// Merge constant pools between the_class and scratch_class and +-// potentially rewrite bytecodes in scratch_class to use the merged +-// constant pool. +-jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class, +- TRAPS) { +- // worst case merged constant pool length is old and new combined +- int merge_cp_length = the_class->constants()->length() +- + scratch_class->constants()->length(); +- +- constantPoolHandle old_cp(THREAD, the_class->constants()); +- constantPoolHandle scratch_cp(THREAD, scratch_class->constants()); +- +- // Constant pools are not easily reused so we allocate a new one +- // each time. +- // merge_cp is created unsafe for concurrent GC processing. It +- // should be marked safe before discarding it. Even though +- // garbage, if it crosses a card boundary, it may be scanned +- // in order to find the start of the first complete object on the card. +- constantPoolHandle merge_cp(THREAD, +- oopFactory::new_constantPool(merge_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- int orig_length = old_cp->orig_length(); +- if (orig_length == 0) { +- // This old_cp is an actual original constant pool. We save +- // the original length in the merged constant pool so that +- // merge_constant_pools() can be more efficient. If a constant +- // pool has a non-zero orig_length() value, then that constant +- // pool was created by a merge operation in RedefineClasses. +- merge_cp->set_orig_length(old_cp->length()); +- } else { +- // This old_cp is a merged constant pool from a previous +- // RedefineClasses() calls so just copy the orig_length() +- // value. +- merge_cp->set_orig_length(old_cp->orig_length()); +- } +- +- ResourceMark rm(THREAD); +- _index_map_count = 0; +- _index_map_p = new intArray(scratch_cp->length(), -1); +- +- bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, +- &merge_cp_length, THREAD); +- if (!result) { +- // The merge can fail due to memory allocation failure or due +- // to robustness checks. +- return JVMTI_ERROR_INTERNAL; +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("merge_cp_len=%d, index_map_len=%d", merge_cp_length, _index_map_count)); +- +- if (_index_map_count == 0) { +- // there is nothing to map between the new and merged constant pools +- +- if (old_cp->length() == scratch_cp->length()) { +- // The old and new constant pools are the same length and the +- // index map is empty. This means that the three constant pools +- // are equivalent (but not the same). Unfortunately, the new +- // constant pool has not gone through link resolution nor have +- // the new class bytecodes gone through constant pool cache +- // rewriting so we can't use the old constant pool with the new +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else if (old_cp->length() < scratch_cp->length()) { +- // The old constant pool has fewer entries than the new constant +- // pool and the index map is empty. This means the new constant +- // pool is a superset of the old constant pool. However, the old +- // class bytecodes have already gone through constant pool cache +- // rewriting so we can't use the new constant pool with the old +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else { +- // The old constant pool has more entries than the new constant +- // pool and the index map is empty. This means that both the old +- // and merged constant pools are supersets of the new constant +- // pool. +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool; the previous new constant pool will +- // get GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- // drop local ref to the merged constant pool +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); +- } +- } else { +- if (RC_TRACE_ENABLED(0x00040000)) { +- // don't want to loop unless we are tracing +- int count = 0; +- for (int i = 1; i < _index_map_p->length(); i++) { +- int value = _index_map_p->at(i); +- +- if (value != -1) { +- RC_TRACE_WITH_THREAD(0x00040000, THREAD, +- ("index_map[%d]: old=%d new=%d", count, i, value)); +- count++; +- } +- } +- } +- +- // We have entries mapped between the new and merged constant pools +- // so we have to rewrite some constant pool references. +- if (!rewrite_cp_refs(scratch_class, THREAD)) { +- return JVMTI_ERROR_INTERNAL; +- } +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool so now the rewritten bytecodes have +- // valid references; the previous new constant pool will get +- // GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- merge_cp()->set_is_conc_safe(true); +- } +- assert(old_cp()->is_conc_safe(), "Just checking"); +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +- +- return JVMTI_ERROR_NONE; +-} // end merge_cp_and_rewrite() +- +- +-// Rewrite constant pool references in klass scratch_class. +-bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, +- TRAPS) { +- +- // rewrite constant pool references in the methods: +- if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the class_annotations: +- if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the fields_annotations: +- if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_annotations: +- if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_parameter_annotations: +- if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_default_annotations: +- if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- return true; +-} // end rewrite_cp_refs() +- +- +-// Rewrite constant pool references in the methods. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods(THREAD, scratch_class->methods()); +- +- if (methods.is_null() || methods->length() == 0) { +- // no methods so nothing to do +- return true; +- } +- +- // rewrite constant pool references in the methods: +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- methodHandle new_method; +- rewrite_cp_refs_in_method(method, &new_method, CHECK_false); +- if (!new_method.is_null()) { +- // the method has been replaced so save the new method version +- methods->obj_at_put(i, new_method()); +- } +- } +- +- return true; +-} +- +- +-// Rewrite constant pool references in the specific method. This code +-// was adapted from Rewriter::rewrite_method(). +-void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method, +- methodHandle *new_method_p, TRAPS) { +- +- *new_method_p = methodHandle(); // default is no new method +- +- // We cache a pointer to the bytecodes here in code_base. If GC +- // moves the methodOop, then the bytecodes will also move which +- // will likely cause a crash. We create a No_Safepoint_Verifier +- // object to detect whether we pass a possible safepoint in this +- // code block. +- No_Safepoint_Verifier nsv; +- +- // Bytecodes and their length +- address code_base = method->code_base(); +- int code_length = method->code_size(); +- +- int bc_length; +- for (int bci = 0; bci < code_length; bci += bc_length) { +- address bcp = code_base + bci; +- Bytecodes::Code c = (Bytecodes::Code)(*bcp); +- +- bc_length = Bytecodes::length_for(c); +- if (bc_length == 0) { +- // More complicated bytecodes report a length of zero so +- // we have to try again a slightly different way. +- bc_length = Bytecodes::length_at(method(), bcp); +- } +- +- assert(bc_length != 0, "impossible bytecode length"); +- +- switch (c) { +- case Bytecodes::_ldc: +- { +- int cp_index = *(bcp + 1); +- int new_index = find_new_index(cp_index); +- +- if (StressLdcRewrite && new_index == 0) { +- // If we are stressing ldc -> ldc_w rewriting, then we +- // always need a new_index value. +- new_index = cp_index; +- } +- if (new_index != 0) { +- // the original index is mapped so we have more work to do +- if (!StressLdcRewrite && new_index <= max_jubyte) { +- // The new value can still use ldc instead of ldc_w +- // unless we are trying to stress ldc -> ldc_w rewriting +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- *(bcp + 1) = new_index; +- } else { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s->ldc_w@" INTPTR_FORMAT " old=%d, new=%d", +- Bytecodes::name(c), bcp, cp_index, new_index)); +- // the new value needs ldc_w instead of ldc +- u_char inst_buffer[4]; // max instruction size is 4 bytes +- bcp = (address)inst_buffer; +- // construct new instruction sequence +- *bcp = Bytecodes::_ldc_w; +- bcp++; +- // Rewriter::rewrite_method() does not rewrite ldc -> ldc_w. +- // See comment below for difference between put_Java_u2() +- // and put_native_u2(). +- Bytes::put_Java_u2(bcp, new_index); +- +- Relocator rc(method, NULL /* no RelocatorListener needed */); +- methodHandle m; +- { +- Pause_No_Safepoint_Verifier pnsv(&nsv); +- +- // ldc is 2 bytes and ldc_w is 3 bytes +- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD); +- if (m.is_null() || HAS_PENDING_EXCEPTION) { +- guarantee(false, "insert_space_at() failed"); +- } +- } +- +- // return the new method so that the caller can update +- // the containing class +- *new_method_p = method = m; +- // switch our bytecode processing loop from the old method +- // to the new method +- code_base = method->code_base(); +- code_length = method->code_size(); +- bcp = code_base + bci; +- c = (Bytecodes::Code)(*bcp); +- bc_length = Bytecodes::length_for(c); +- assert(bc_length != 0, "sanity check"); +- } // end we need ldc_w instead of ldc +- } // end if there is a mapped index +- } break; +- +- // these bytecodes have a two-byte constant pool index +- case Bytecodes::_anewarray : // fall through +- case Bytecodes::_checkcast : // fall through +- case Bytecodes::_getfield : // fall through +- case Bytecodes::_getstatic : // fall through +- case Bytecodes::_instanceof : // fall through +- case Bytecodes::_invokeinterface: // fall through +- case Bytecodes::_invokespecial : // fall through +- case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokevirtual : // fall through +- case Bytecodes::_ldc_w : // fall through +- case Bytecodes::_ldc2_w : // fall through +- case Bytecodes::_multianewarray : // fall through +- case Bytecodes::_new : // fall through +- case Bytecodes::_putfield : // fall through +- case Bytecodes::_putstatic : +- { +- address p = bcp + 1; +- int cp_index = Bytes::get_Java_u2(p); +- int new_index = find_new_index(cp_index); +- if (new_index != 0) { +- // the original index is mapped so update w/ new value +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- // Rewriter::rewrite_method() uses put_native_u2() in this +- // situation because it is reusing the constant pool index +- // location for a native index into the constantPoolCache. +- // Since we are updating the constant pool index prior to +- // verification and constantPoolCache initialization, we +- // need to keep the new index in Java byte order. +- Bytes::put_Java_u2(p, new_index); +- } +- } break; ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ result = result | Klass::ModifyClass; ++ } ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = new_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ swap_all_method_annotations(new_num, num, new_class); + } +- } // end for each bytecode +-} // end rewrite_cp_refs_in_method() +- +- +-// Rewrite constant pool references in the class_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ TRACE_RC1("Method added: new: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni); ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ TRACE_RC1("Method deleted: old: %s [%d]", ++ k_old_method->name_and_sig_as_C_string(), oi); ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } + +- typeArrayHandle class_annotations(THREAD, +- scratch_class->class_annotations()); +- if (class_annotations.is_null() || class_annotations->length() == 0) { +- // no class_annotations so nothing to do +- return true; ++ if (new_class()->size() != new_class->old_version()->size()) { ++ result |= Klass::ModifyClassSize; + } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_annotations length=%d", class_annotations->length())); ++ if (new_class->size_helper() != ((instanceKlass*)(new_class->old_version()->klass_part()))->size_helper()) { ++ result |= Klass::ModifyInstanceSize; ++ } + +- int byte_i = 0; // byte index into class_annotations +- return rewrite_cp_refs_in_annotations_typeArray(class_annotations, byte_i, +- THREAD); ++ // (tw) Check method bodies to be able to return NoChange? ++ return result; + } + ++void VM_RedefineClasses::calculate_instance_update_information(klassOop new_version) { + +-// Rewrite constant pool references in an annotations typeArray. This +-// "structure" is adapted from the RuntimeVisibleAnnotations_attribute +-// that is described in section 4.8.15 of the 2nd-edition of the VM spec: +-// +-// annotations_typeArray { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for num_annotations field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for num_annotations field")); +- return false; +- } ++ class CalculateFieldUpdates : public FieldClosure { + +- u2 num_annotations = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; ++ private: ++ instanceKlass* _old_ik; ++ GrowableArray<int> _update_info; ++ int _position; ++ bool _copy_backwards; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_annotations=%d", num_annotations)); ++ public: + +- int calc_num_annotations = 0; +- for (; calc_num_annotations < num_annotations; calc_num_annotations++) { +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad annotation_struct at %d", calc_num_annotations)); +- // propagate failure back to caller +- return false; ++ bool does_copy_backwards() { ++ return _copy_backwards; + } +- } +- assert(num_annotations == calc_num_annotations, "sanity check"); + +- return true; +-} // end rewrite_cp_refs_in_annotations_typeArray() ++ CalculateFieldUpdates(instanceKlass* old_ik) : ++ _old_ik(old_ik), _position(instanceOopDesc::base_offset_in_bytes()), _copy_backwards(false) { ++ _update_info.append(_position); ++ _update_info.append(0); ++ } + ++ GrowableArray<int> &finish() { ++ _update_info.append(0); ++ return _update_info; ++ } + +-// Rewrite constant pool references in the annotation struct portion of +-// an annotations_typeArray. This "structure" is from section 4.8.15 of +-// the 2nd-edition of the VM spec: +-// +-// struct annotation { +-// u2 type_index; +-// u2 num_element_value_pairs; +-// { +-// u2 element_name_index; +-// element_value value; +-// } element_value_pairs[num_element_value_pairs]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- if ((byte_i_ref + 2 + 2) > annotations_typeArray->length()) { +- // not enough room for smallest annotation_struct +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for annotation_struct")); +- return false; +- } ++ void do_field(fieldDescriptor* fd) { ++ int alignment = fd->offset() - _position; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } + +- u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray, +- byte_i_ref, "mapped old type_index=%d", THREAD); ++ assert(_position == fd->offset(), "must be correct offset!"); + +- u2 num_element_value_pairs = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr( +- byte_i_ref)); +- byte_i_ref += 2; ++ fieldDescriptor old_fd; ++ if (_old_ik->find_field(fd->name(), fd->signature(), false, &old_fd) != NULL) { ++ // Found field in the old class, copy ++ copy(old_fd.offset(), type2aelembytes(fd->field_type())); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_index=%d num_element_value_pairs=%d", type_index, +- num_element_value_pairs)); ++ if (old_fd.offset() < fd->offset()) { ++ _copy_backwards = true; ++ } + +- int calc_num_element_value_pairs = 0; +- for (; calc_num_element_value_pairs < num_element_value_pairs; +- calc_num_element_value_pairs++) { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for another element_name_index, let alone +- // the rest of another component +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for element_name_index")); +- return false; ++ // Transfer special flags ++ fd->set_is_field_modification_watched(old_fd.is_field_modification_watched()); ++ fd->set_is_field_access_watched(old_fd.is_field_access_watched()); ++ } else { ++ // New field, fill ++ fill(type2aelembytes(fd->field_type())); ++ } + } + +- u2 element_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old element_name_index=%d", THREAD); ++ private: + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("element_name_index=%d", element_name_index)); +- +- if (!rewrite_cp_refs_in_element_value(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad element_value at %d", calc_num_element_value_pairs)); +- // propagate failure back to caller +- return false; ++ void fill(int size) { ++ if (_update_info.length() > 0 && _update_info.at(_update_info.length() - 1) < 0) { ++ (*_update_info.adr_at(_update_info.length() - 1)) -= size; ++ } else { ++ _update_info.append(-size); ++ } ++ _position += size; + } +- } // end for each component +- assert(num_element_value_pairs == calc_num_element_value_pairs, +- "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotation_struct() +- +- +-// Rewrite a constant pool reference at the current position in +-// annotations_typeArray if needed. Returns the original constant +-// pool reference if a rewrite was not needed or the new constant +-// pool reference if a rewrite was needed. +-u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS) { +- +- address cp_index_addr = (address) +- annotations_typeArray->byte_at_addr(byte_i_ref); +- u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr); +- u2 new_cp_index = find_new_index(old_cp_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index)); +- Bytes::put_Java_u2(cp_index_addr, new_cp_index); +- old_cp_index = new_cp_index; +- } +- byte_i_ref += 2; +- return old_cp_index; +-} +- +- +-// Rewrite constant pool references in the element_value portion of an +-// annotations_typeArray. This "structure" is from section 4.8.15.1 of +-// the 2nd-edition of the VM spec: +-// +-// struct element_value { +-// u1 tag; +-// union { +-// u2 const_value_index; +-// { +-// u2 type_name_index; +-// u2 const_name_index; +-// } enum_const_value; +-// u2 class_info_index; +-// annotation annotation_value; +-// struct { +-// u2 num_values; +-// element_value values[num_values]; +-// } array_value; +-// } value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_element_value( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { + +- if ((byte_i_ref + 1) > annotations_typeArray->length()) { +- // not enough room for a tag let alone the rest of an element_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a tag")); +- return false; +- } ++ void copy(int offset, int size) { ++ int prev_end = -1; ++ if (_update_info.length() > 0 && _update_info.at(_update_info.length() - 1) > 0) { ++ prev_end = _update_info.at(_update_info.length() - 2) + _update_info.at(_update_info.length() - 1); ++ } + +- u1 tag = annotations_typeArray->byte_at(byte_i_ref); +- byte_i_ref++; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("tag='%c'", tag)); +- +- switch (tag) { +- // These BaseType tag values are from Table 4.2 in VM spec: +- case 'B': // byte +- case 'C': // char +- case 'D': // double +- case 'F': // float +- case 'I': // int +- case 'J': // long +- case 'S': // short +- case 'Z': // boolean +- +- // The remaining tag values are from Table 4.8 in the 2nd-edition of +- // the VM spec: +- case 's': +- { +- // For the above tag values (including the BaseType values), +- // value.const_value_index is right union field. +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a const_value_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a const_value_index")); +- return false; ++ if (prev_end == offset) { ++ (*_update_info.adr_at(_update_info.length() - 2)) += size; ++ } else { ++ _update_info.append(size); ++ _update_info.append(offset); + } + +- u2 const_value_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_value_index=%d", THREAD); ++ _position += size; ++ } ++ }; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("const_value_index=%d", const_value_index)); +- } break; ++ instanceKlass* ik = instanceKlass::cast(new_version); ++ instanceKlass* old_ik = instanceKlass::cast(new_version->klass_part()->old_version()); ++ CalculateFieldUpdates cl(old_ik); ++ ik->do_nonstatic_fields(&cl); + +- case 'e': +- { +- // for the above tag value, value.enum_const_value is right union field ++ GrowableArray<int> result = cl.finish(); ++ ik->store_update_information(result); ++ ik->set_copying_backwards(cl.does_copy_backwards()); + +- if ((byte_i_ref + 4) > annotations_typeArray->length()) { +- // not enough room for a enum_const_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a enum_const_value")); +- return false; ++ IF_TRACE_RC2 { ++ TRACE_RC2("Instance update information for %s:", new_version->klass_part()->name()->as_C_string()); ++ if (cl.does_copy_backwards()) { ++ TRACE_RC2("\tDoes copy backwards!"); ++ } ++ for (int i=0; i<result.length(); i++) { ++ int curNum = result.at(i); ++ if (curNum < 0) { ++ TRACE_RC2("\t%d CLEAN", curNum); ++ } else if (curNum > 0) { ++ TRACE_RC2("\t%d COPY from %d", curNum, result.at(i + 1)); ++ i++; ++ } else { ++ TRACE_RC2("\tEND"); + } ++ } ++ } ++} + +- u2 type_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old type_name_index=%d", THREAD); +- +- u2 const_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_name_index=%d", THREAD); ++void VM_RedefineClasses::rollback() { ++ TRACE_RC1("Rolling back redefinition!"); ++ SystemDictionary::rollback_redefinition(); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_name_index=%d const_name_index=%d", type_name_index, +- const_name_index)); +- } break; ++ TRACE_RC1("After rolling back system dictionary!"); ++ for (int i=0; i<_new_classes->length(); i++) { ++ SystemDictionary::remove_from_hierarchy(_new_classes->at(i)); ++ } + +- case 'c': +- { +- // for the above tag value, value.class_info_index is right union field ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle new_class = _new_classes->at(i); ++ new_class->set_redefining(false); ++ new_class->old_version()->klass_part()->set_new_version(NULL); ++ new_class->set_old_version(NULL); ++ } + +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a class_info_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a class_info_index")); +- return false; +- } ++} + +- u2 class_info_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old class_info_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_info_index=%d", class_info_index)); +- } break; +- +- case '@': +- // For the above tag value, value.attr_value is the right union +- // field. This is a nested annotation. +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- break; ++void VM_RedefineClasses::swap_marks(oop first, oop second) { ++ markOop first_mark = first->mark(); ++ markOop second_mark = second->mark(); ++ first->set_mark(second_mark); ++ second->set_mark(first_mark); ++} + +- case '[': +- { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a num_values field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_values field")); +- return false; +- } + +- // For the above tag value, value.array_value is the right union +- // field. This is an array of nested element_value. +- u2 num_values = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("num_values=%d", num_values)); +- +- int calc_num_values = 0; +- for (; calc_num_values < num_values; calc_num_values++) { +- if (!rewrite_cp_refs_in_element_value( +- annotations_typeArray, byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad nested element_value at %d", calc_num_values)); +- // propagate failure back to caller +- return false; ++class FieldCopier : public FieldClosure { ++ public: ++ void do_field(fieldDescriptor* fd) { ++ instanceKlass* cur = instanceKlass::cast(fd->field_holder()); ++ oop cur_oop = cur->java_mirror(); ++ ++ instanceKlass* old = instanceKlass::cast(cur->old_version()); ++ oop old_oop = old->java_mirror(); ++ ++ fieldDescriptor result; ++ bool found = old->find_local_field(fd->name(), fd->signature(), &result); ++ if (found && result.is_static()) { ++ TRACE_RC3("Copying static field value for field %s old_offset=%d new_offset=%d", ++ fd->name()->as_C_string(), result.offset(), fd->offset()); ++ memcpy(cur_oop->obj_field_addr<HeapWord>(fd->offset()), ++ old_oop->obj_field_addr<HeapWord>(result.offset()), ++ type2aelembytes(fd->field_type())); ++ ++ // Static fields may have references to java.lang.Class ++ if (fd->field_type() == T_OBJECT) { ++ oop oop = cur_oop->obj_field(fd->offset()); ++ if (oop != NULL && oop->is_instanceMirror()) { ++ klassOop klass = java_lang_Class::as_klassOop(oop); ++ if (klass != NULL && klass->klass_part()->oop_is_instance()) { ++ assert(oop == instanceKlass::cast(klass)->java_mirror(), "just checking"); ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = instanceKlass::cast(klass->klass_part()->new_version())->java_mirror(); ++ ++ cur_oop->obj_field_put(fd->offset(), oop); ++ } ++ } + } + } +- assert(num_values == calc_num_values, "sanity check"); +- } break; +- +- default: +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("bad tag=0x%x", tag)); +- return false; +- } // end decode tag field +- +- return true; +-} // end rewrite_cp_refs_in_element_value() +- +- +-// Rewrite constant pool references in a fields_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle fields_annotations(THREAD, +- scratch_class->fields_annotations()); ++ } ++ } ++}; + +- if (fields_annotations.is_null() || fields_annotations->length() == 0) { +- // no fields_annotations so nothing to do +- return true; ++void VM_RedefineClasses::mark_as_scavengable(nmethod* nm) { ++ if (!nm->on_scavenge_root_list()) { ++ CodeCache::add_scavenge_root_nmethod(nm); + } ++} + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("fields_annotations length=%d", fields_annotations->length())); ++struct StoreBarrier { ++ template <class T> static void oop_store(T* p, oop v) { ::oop_store(p, v); } ++}; + +- for (int i = 0; i < fields_annotations->length(); i++) { +- typeArrayHandle field_annotations(THREAD, +- (typeArrayOop)fields_annotations->obj_at(i)); +- if (field_annotations.is_null() || field_annotations->length() == 0) { +- // this field does not have any annotations so skip it +- continue; +- } ++struct StoreNoBarrier { ++ template <class T> static void oop_store(T* p, oop v) { oopDesc::encode_store_heap_oop_not_null(p, v); } ++}; + +- int byte_i = 0; // byte index into field_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(field_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad field_annotations at %d", i)); +- // propagate failure back to caller +- return false; ++template <class S> ++class ChangePointersOopClosure : public OopClosureNoHeader { ++ // Forward pointers to instanceKlass and mirror class to new versions ++ template <class T> ++ inline void do_oop_work(T* p) { ++ oop oop = oopDesc::load_decode_heap_oop(p); ++ if (oop == NULL) { ++ return; ++ } ++ if (oop->is_instanceKlass()) { ++ klassOop klass = (klassOop) oop; ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = klass->klass_part()->new_version(); ++ S::oop_store(p, oop); ++ } ++ } else if (oop->is_instanceMirror()) { ++ klassOop klass = java_lang_Class::as_klassOop(oop); ++ if (klass != NULL && klass->klass_part()->oop_is_instance()) { ++ assert(oop == instanceKlass::cast(klass)->java_mirror(), "just checking"); ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = instanceKlass::cast(klass->klass_part()->new_version())->java_mirror(); ++ S::oop_store(p, oop); ++ } ++ } + } + } + +- return true; +-} // end rewrite_cp_refs_in_fields_annotations() +- +- +-// Rewrite constant pool references in a methods_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_annotations(THREAD, +- scratch_class->methods_annotations()); ++ virtual void do_oop(oop* o) { ++ do_oop_work(o); ++ } + +- if (methods_annotations.is_null() || methods_annotations->length() == 0) { +- // no methods_annotations so nothing to do +- return true; ++ virtual void do_oop(narrowOop* o) { ++ do_oop_work(o); + } ++}; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_annotations length=%d", methods_annotations->length())); ++void VM_RedefineClasses::doit() { ++ Thread *thread = Thread::current(); ++ ++ TRACE_RC1("Entering doit!"); + +- for (int i = 0; i < methods_annotations->length(); i++) { +- typeArrayHandle method_annotations(THREAD, +- (typeArrayOop)methods_annotations->obj_at(i)); +- if (method_annotations.is_null() || method_annotations->length() == 0) { +- // this method does not have any annotations so skip it +- continue; +- } ++ assert((_max_redefinition_flags & Klass::RemoveSuperType) == 0, "removing super types not allowed"); + +- int byte_i = 0; // byte index into method_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(method_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_annotations at %d", i)); +- // propagate failure back to caller +- return false; ++ if (UseSharedSpaces) { ++ // Sharing is enabled so we remap the shared readonly space to ++ // shared readwrite, private just in case we need to redefine ++ // a shared class. We do the remap during the doit() phase of ++ // the safepoint to be safer. ++ if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { ++ TRACE_RC1("failed to remap shared readonly space to readwrite, private"); ++ _result = JVMTI_ERROR_INTERNAL; ++ return; + } + } ++ ++ RC_TIMER_START(_timer_prepare_redefinition); ++ for (int i = 0; i < _new_classes->length(); i++) { ++ redefine_single_class(_new_classes->at(i), thread); ++ } ++ ++ // Deoptimize all compiled code that depends on this class ++ flush_dependent_code(instanceKlassHandle(Thread::current(), (klassOop)NULL), Thread::current()); + +- return true; +-} // end rewrite_cp_refs_in_methods_annotations() ++ // Adjust constantpool caches and vtables for all classes ++ // that reference methods of the evolved class. ++ SystemDictionary::classes_do(adjust_cpool_cache, Thread::current()); + ++ RC_TIMER_STOP(_timer_prepare_redefinition); ++ RC_TIMER_START(_timer_heap_iteration); + +-// Rewrite constant pool references in a methods_parameter_annotations +-// field. This "structure" is adapted from the +-// RuntimeVisibleParameterAnnotations_attribute described in section +-// 4.8.17 of the 2nd-edition of the VM spec: +-// +-// methods_parameter_annotations_typeArray { +-// u1 num_parameters; +-// { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } parameter_annotations[num_parameters]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ class ChangePointersObjectClosure : public ObjectClosure { + +- objArrayHandle methods_parameter_annotations(THREAD, +- scratch_class->methods_parameter_annotations()); ++ private: + +- if (methods_parameter_annotations.is_null() +- || methods_parameter_annotations->length() == 0) { +- // no methods_parameter_annotations so nothing to do +- return true; +- } ++ OopClosureNoHeader *_closure; ++ bool _needs_instance_update; ++ oop _tmp_obj; ++ int _tmp_obj_size; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_parameter_annotations length=%d", +- methods_parameter_annotations->length())); ++ public: ++ ChangePointersObjectClosure(OopClosureNoHeader *closure) : _closure(closure), _needs_instance_update(false), _tmp_obj(NULL), _tmp_obj_size(0) {} + +- for (int i = 0; i < methods_parameter_annotations->length(); i++) { +- typeArrayHandle method_parameter_annotations(THREAD, +- (typeArrayOop)methods_parameter_annotations->obj_at(i)); +- if (method_parameter_annotations.is_null() +- || method_parameter_annotations->length() == 0) { +- // this method does not have any parameter annotations so skip it +- continue; +- } ++ bool needs_instance_update() { ++ return _needs_instance_update; ++ } + +- if (method_parameter_annotations->length() < 1) { +- // not enough room for a num_parameters field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_parameters field at %d", i)); +- return false; +- } ++ void copy_to_tmp(oop o) { ++ int size = o->size(); ++ if (_tmp_obj_size < size) { ++ _tmp_obj_size = size; ++ _tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ } ++ Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)_tmp_obj, size); ++ } + +- int byte_i = 0; // byte index into method_parameter_annotations ++ virtual void do_object(oop obj) { ++ if (obj->is_instanceKlass()) return; ++ if (obj->is_instanceMirror()) { ++ // static fields may have references to old java.lang.Class instances, update them ++ // at the same time, we don't want to update other oops in the java.lang.Class ++ // Causes SIGSEGV? ++ //instanceMirrorKlass::oop_fields_iterate(obj, _closure); ++ } else { ++ obj->oop_iterate(_closure); ++ } + +- u1 num_parameters = method_parameter_annotations->byte_at(byte_i); +- byte_i++; ++ if (obj->blueprint()->new_version() != NULL) { ++ Klass* new_klass = obj->blueprint()->new_version()->klass_part(); ++ if (obj->is_perm()) { ++ _needs_instance_update = true; ++ } else if(new_klass->update_information() != NULL) { ++ int size_diff = obj->size() - obj->size_given_klass(new_klass); ++ ++ // Either new size is bigger or gap is to small to be filled ++ if (size_diff < 0 || (size_diff > 0 && (size_t) size_diff < CollectedHeap::min_fill_size())) { ++ // We need an instance update => set back to old klass ++ _needs_instance_update = true; ++ } else { ++ oop src = obj; ++ if (new_klass->is_copying_backwards()) { ++ copy_to_tmp(obj); ++ src = _tmp_obj; ++ } ++ src->set_klass_no_check(obj->blueprint()->new_version()); ++ MarkSweep::update_fields(obj, src, new_klass->update_information()); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_parameters=%d", num_parameters)); ++ if (size_diff > 0) { ++ HeapWord* dead_space = ((HeapWord *)obj) + obj->size(); ++ CollectedHeap::fill_with_object(dead_space, size_diff); ++ } ++ } ++ } else { ++ obj->set_klass_no_check(obj->blueprint()->new_version()); ++ } ++ } ++ } ++ }; ++ ++ ChangePointersOopClosure<StoreNoBarrier> oopClosureNoBarrier; ++ ChangePointersOopClosure<StoreBarrier> oopClosure; ++ ChangePointersObjectClosure objectClosure(&oopClosure); + +- int calc_num_parameters = 0; +- for (; calc_num_parameters < num_parameters; calc_num_parameters++) { +- if (!rewrite_cp_refs_in_annotations_typeArray( +- method_parameter_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_parameter_annotations at %d", calc_num_parameters)); +- // propagate failure back to caller +- return false; ++ { ++ // Since we may update oops inside nmethod's code blob to point to java.lang.Class in new generation, we need to ++ // make sure such references are properly recognized by GC. For that, If ScavengeRootsInCode is true, we need to ++ // mark such nmethod's as "scavengable". ++ // For now, mark all nmethod's as scavengable that are not scavengable already ++ if (ScavengeRootsInCode) { ++ CodeCache::nmethods_do(mark_as_scavengable); + } ++ ++ SharedHeap::heap()->gc_prologue(true); ++ Universe::heap()->object_iterate(&objectClosure); ++ Universe::root_oops_do(&oopClosureNoBarrier); ++ SharedHeap::heap()->gc_epilogue(false); + } +- assert(num_parameters == calc_num_parameters, "sanity check"); +- } + +- return true; +-} // end rewrite_cp_refs_in_methods_parameter_annotations() + ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur_oop = _new_classes->at(i)(); ++ instanceKlass* cur = instanceKlass::cast(cur_oop); ++ klassOop old_oop = cur->old_version(); ++ instanceKlass* old = instanceKlass::cast(old_oop); + +-// Rewrite constant pool references in a methods_default_annotations +-// field. This "structure" is adapted from the AnnotationDefault_attribute +-// that is described in section 4.8.19 of the 2nd-edition of the VM spec: +-// +-// methods_default_annotations_typeArray { +-// element_value default_value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ // Swap marks to have same hashcodes ++ swap_marks(cur_oop, old_oop); ++ swap_marks(cur->java_mirror(), old->java_mirror()); + +- objArrayHandle methods_default_annotations(THREAD, +- scratch_class->methods_default_annotations()); ++ // Revert pool holder for old version of klass (it was updated by one of ours closure!) ++ old->constants()->set_pool_holder(old_oop); + +- if (methods_default_annotations.is_null() +- || methods_default_annotations->length() == 0) { +- // no methods_default_annotations so nothing to do +- return true; +- } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_default_annotations length=%d", +- methods_default_annotations->length())); ++ if (old->array_klasses() != NULL) { ++ // Transfer the array classes, otherwise we might get cast exceptions when casting array types. ++ assert(cur->array_klasses() == NULL, "just checking"); ++ cur->set_array_klasses(old->array_klasses()); ++ } + +- for (int i = 0; i < methods_default_annotations->length(); i++) { +- typeArrayHandle method_default_annotations(THREAD, +- (typeArrayOop)methods_default_annotations->obj_at(i)); +- if (method_default_annotations.is_null() +- || method_default_annotations->length() == 0) { +- // this method does not have any default annotations so skip it +- continue; ++ // Initialize the new class! Special static initialization that does not execute the ++ // static constructor but copies static field values from the old class if name ++ // and signature of a static field match. ++ FieldCopier copier; ++ cur->do_local_static_fields(&copier); // TODO (tw): What about internal static fields?? ++ old->set_java_mirror(cur->java_mirror()); ++ ++ // Transfer init state ++ instanceKlass::ClassState state = old->init_state(); ++ if (state > instanceKlass::linked) { ++ cur->set_init_state(state); ++ } + } + +- int byte_i = 0; // byte index into method_default_annotations ++ RC_TIMER_STOP(_timer_heap_iteration); ++ RC_TIMER_START(_timer_redefinition); ++ if (objectClosure.needs_instance_update()){ + +- if (!rewrite_cp_refs_in_element_value( +- method_default_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad default element_value at %d", i)); +- // propagate failure back to caller +- return false; +- } ++ // Do a full garbage collection to update the instance sizes accordingly ++ TRACE_RC1("Before performing full GC!"); ++ Universe::set_redefining_gc_run(true); ++ notify_gc_begin(true); ++ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); ++ notify_gc_end(); ++ Universe::set_redefining_gc_run(false); ++ TRACE_RC1("GC done!"); + } + +- return true; +-} // end rewrite_cp_refs_in_methods_default_annotations() ++ // Unmark klassOops as "redefining" ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur_klass = _new_classes->at(i)(); ++ instanceKlass* cur = (instanceKlass*)cur_klass->klass_part(); ++ cur->set_redefining(false); ++ cur->clear_update_information(); ++ } + ++ // Disable any dependent concurrent compilations ++ SystemDictionary::notice_modification(); + +-// Rewrite constant pool references in the method's stackmap table. +-// These "structures" are adapted from the StackMapTable_attribute that +-// is described in section 4.8.4 of the 6.0 version of the VM spec +-// (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// stack_map { +-// u2 number_of_entries; +-// stack_map_frame entries[number_of_entries]; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table( +- methodHandle method, TRAPS) { ++ // Set flag indicating that some invariants are no longer true. ++ // See jvmtiExport.hpp for detailed explanation. ++ JvmtiExport::set_has_redefined_a_class(); + +- if (!method->has_stackmap_table()) { +- return; +- } ++ // Clean up caches in the compiler interface and compiler threads ++ ciObjectFactory::resort_shared_ci_objects(); + +- typeArrayOop stackmap_data = method->stackmap_data(); +- address stackmap_p = (address)stackmap_data->byte_at_addr(0); +- address stackmap_end = stackmap_p + stackmap_data->length(); +- +- assert(stackmap_p + 2 <= stackmap_end, "no room for number_of_entries"); +- u2 number_of_entries = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("number_of_entries=%u", number_of_entries)); +- +- // walk through each stack_map_frame +- u2 calc_number_of_entries = 0; +- for (; calc_number_of_entries < number_of_entries; calc_number_of_entries++) { +- // The stack_map_frame structure is a u1 frame_type followed by +- // 0 or more bytes of data: +- // +- // union stack_map_frame { +- // same_frame; +- // same_locals_1_stack_item_frame; +- // same_locals_1_stack_item_frame_extended; +- // chop_frame; +- // same_frame_extended; +- // append_frame; +- // full_frame; +- // } +- +- assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type"); +- // The Linux compiler does not like frame_type to be u1 or u2. It +- // issues the following warning for the first if-statement below: +- // +- // "warning: comparison is always true due to limited range of data type" +- // +- u4 frame_type = *stackmap_p; +- stackmap_p++; +- +- // same_frame { +- // u1 frame_type = SAME; /* 0-63 */ +- // } +- if (frame_type >= 0 && frame_type <= 63) { +- // nothing more to do for same_frame +- } +- +- // same_locals_1_stack_item_frame { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */ +- // verification_type_info stack[1]; +- // } +- else if (frame_type >= 64 && frame_type <= 127) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // reserved for future use +- else if (frame_type >= 128 && frame_type <= 246) { +- // nothing more to do for reserved frame_types +- } +- +- // same_locals_1_stack_item_frame_extended { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */ +- // u2 offset_delta; +- // verification_type_info stack[1]; +- // } +- else if (frame_type == 247) { +- stackmap_p += 2; +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // chop_frame { +- // u1 frame_type = CHOP; /* 248-250 */ +- // u2 offset_delta; +- // } +- else if (frame_type >= 248 && frame_type <= 250) { +- stackmap_p += 2; +- } +- +- // same_frame_extended { +- // u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/ +- // u2 offset_delta; +- // } +- else if (frame_type == 251) { +- stackmap_p += 2; +- } +- +- // append_frame { +- // u1 frame_type = APPEND; /* 252-254 */ +- // u2 offset_delta; +- // verification_type_info locals[frame_type - 251]; +- // } +- else if (frame_type >= 252 && frame_type <= 254) { +- assert(stackmap_p + 2 <= stackmap_end, +- "no room for offset_delta"); +- stackmap_p += 2; +- u1 len = frame_type - 251; +- for (u1 i = 0; i < len; i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } ++#ifdef ASSERT + +- // full_frame { +- // u1 frame_type = FULL_FRAME; /* 255 */ +- // u2 offset_delta; +- // u2 number_of_locals; +- // verification_type_info locals[number_of_locals]; +- // u2 number_of_stack_items; +- // verification_type_info stack[number_of_stack_items]; +- // } +- else if (frame_type == 255) { +- assert(stackmap_p + 2 + 2 <= stackmap_end, +- "no room for smallest full_frame"); +- stackmap_p += 2; +- +- u2 number_of_locals = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 locals_i = 0; locals_i < number_of_locals; locals_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } ++ // Universe::verify(); ++ // JNIHandles::verify(); + +- // Use the largest size for the number_of_stack_items, but only get +- // the right number of bytes. +- u2 number_of_stack_items = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; ++ SystemDictionary::classes_do(check_class, thread); ++#endif + +- for (u2 stack_i = 0; stack_i < number_of_stack_items; stack_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- } // end while there is a stack_map_frame +- assert(number_of_entries == calc_number_of_entries, "sanity check"); +-} // end rewrite_cp_refs_in_stack_map_table() ++ RC_TIMER_STOP(_timer_redefinition); + ++ if (TraceRedefineClasses > 0) { ++ tty->flush(); ++ } ++} + +-// Rewrite constant pool references in the verification type info +-// portion of the method's stackmap table. These "structures" are +-// adapted from the StackMapTable_attribute that is described in +-// section 4.8.4 of the 6.0 version of the VM spec (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// The verification_type_info structure is a u1 tag followed by 0 or +-// more bytes of data: +-// +-// union verification_type_info { +-// Top_variable_info; +-// Integer_variable_info; +-// Float_variable_info; +-// Long_variable_info; +-// Double_variable_info; +-// Null_variable_info; +-// UninitializedThis_variable_info; +-// Object_variable_info; +-// Uninitialized_variable_info; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_verification_type_info( +- address& stackmap_p_ref, address stackmap_end, u2 frame_i, +- u1 frame_type, TRAPS) { +- +- assert(stackmap_p_ref + 1 <= stackmap_end, "no room for tag"); +- u1 tag = *stackmap_p_ref; +- stackmap_p_ref++; +- +- switch (tag) { +- // Top_variable_info { +- // u1 tag = ITEM_Top; /* 0 */ +- // } +- // verificationType.hpp has zero as ITEM_Bogus instead of ITEM_Top +- case 0: // fall through +- +- // Integer_variable_info { +- // u1 tag = ITEM_Integer; /* 1 */ +- // } +- case ITEM_Integer: // fall through +- +- // Float_variable_info { +- // u1 tag = ITEM_Float; /* 2 */ +- // } +- case ITEM_Float: // fall through +- +- // Double_variable_info { +- // u1 tag = ITEM_Double; /* 3 */ +- // } +- case ITEM_Double: // fall through +- +- // Long_variable_info { +- // u1 tag = ITEM_Long; /* 4 */ +- // } +- case ITEM_Long: // fall through +- +- // Null_variable_info { +- // u1 tag = ITEM_Null; /* 5 */ +- // } +- case ITEM_Null: // fall through +- +- // UninitializedThis_variable_info { +- // u1 tag = ITEM_UninitializedThis; /* 6 */ +- // } +- case ITEM_UninitializedThis: +- // nothing more to do for the above tag types +- break; ++void VM_RedefineClasses::doit_epilogue() { + +- // Object_variable_info { +- // u1 tag = ITEM_Object; /* 7 */ +- // u2 cpool_index; +- // } +- case ITEM_Object: +- { +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for cpool_index"); +- u2 cpool_index = Bytes::get_Java_u2(stackmap_p_ref); +- u2 new_cp_index = find_new_index(cpool_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("mapped old cpool_index=%d", cpool_index)); +- Bytes::put_Java_u2(stackmap_p_ref, new_cp_index); +- cpool_index = new_cp_index; +- } +- stackmap_p_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, cpool_index=%d", frame_i, +- frame_type, cpool_index)); +- } break; +- +- // Uninitialized_variable_info { +- // u1 tag = ITEM_Uninitialized; /* 8 */ +- // u2 offset; +- // } +- case ITEM_Uninitialized: +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for offset"); +- stackmap_p_ref += 2; +- break; ++ RC_TIMER_START(_timer_vm_op_epilogue); + +- default: +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, bad tag=0x%x", frame_i, frame_type, tag)); +- ShouldNotReachHere(); +- break; +- } // end switch (tag) +-} // end rewrite_cp_refs_in_verification_type_info() +- +- +-// Change the constant pool associated with klass scratch_class to +-// scratch_cp. If shrink is true, then scratch_cp_length elements +-// are copied from scratch_cp to a smaller constant pool and the +-// smaller constant pool is associated with scratch_class. +-void VM_RedefineClasses::set_new_constant_pool( +- instanceKlassHandle scratch_class, constantPoolHandle scratch_cp, +- int scratch_cp_length, bool shrink, TRAPS) { +- assert(!shrink || scratch_cp->length() >= scratch_cp_length, "sanity check"); +- +- if (shrink) { +- // scratch_cp is a merged constant pool and has enough space for a +- // worst case merge situation. We want to associate the minimum +- // sized constant pool with the klass to save space. +- constantPoolHandle smaller_cp(THREAD, +- oopFactory::new_constantPool(scratch_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- // preserve orig_length() value in the smaller copy +- int orig_length = scratch_cp->orig_length(); +- assert(orig_length != 0, "sanity check"); +- smaller_cp->set_orig_length(orig_length); +- scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); +- scratch_cp = smaller_cp; +- smaller_cp()->set_is_conc_safe(true); +- } +- +- // attach new constant pool to klass +- scratch_cp->set_pool_holder(scratch_class()); +- +- // attach klass to new constant pool +- scratch_class->set_constants(scratch_cp()); +- +- int i; // for portability +- +- // update each field in klass to use new constant pool indices as needed +- for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) { +- jshort cur_index = fs.name_index(); +- jshort new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-name_index change: %d to %d", cur_index, new_index)); +- fs.set_name_index(new_index); +- } +- cur_index = fs.signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-signature_index change: %d to %d", cur_index, new_index)); +- fs.set_signature_index(new_index); +- } +- cur_index = fs.initval_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-initval_index change: %d to %d", cur_index, new_index)); +- fs.set_initval_index(new_index); +- } +- cur_index = fs.generic_signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-generic_signature change: %d to %d", cur_index, new_index)); +- fs.set_generic_signature_index(new_index); +- } +- } // end for each field +- +- // Update constant pool indices in the inner classes info to use +- // new constant indices as needed. The inner classes info is a +- // quadruple: +- // (inner_class_info, outer_class_info, inner_name, inner_access_flags) +- InnerClassesIterator iter(scratch_class); +- for (; !iter.done(); iter.next()) { +- int cur_index = iter.inner_class_info_index(); +- if (cur_index == 0) { +- continue; // JVM spec. allows null inner class refs so skip it +- } +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_class_info change: %d to %d", cur_index, new_index)); +- iter.set_inner_class_info_index(new_index); +- } +- cur_index = iter.outer_class_info_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("outer_class_info change: %d to %d", cur_index, new_index)); +- iter.set_outer_class_info_index(new_index); +- } +- cur_index = iter.inner_name_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_name change: %d to %d", cur_index, new_index)); +- iter.set_inner_name_index(new_index); +- } +- } // end for each inner class +- +- // Attach each method in klass to the new constant pool and update +- // to use new constant pool indices as needed: +- objArrayHandle methods(THREAD, scratch_class->methods()); +- for (i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- method->set_constants(scratch_cp()); +- +- int new_index = find_new_index(method->name_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-name_index change: %d to %d", method->name_index(), +- new_index)); +- method->set_name_index(new_index); +- } +- new_index = find_new_index(method->signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-signature_index change: %d to %d", +- method->signature_index(), new_index)); +- method->set_signature_index(new_index); +- } +- new_index = find_new_index(method->generic_signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-generic_signature_index change: %d to %d", +- method->generic_signature_index(), new_index)); +- method->set_generic_signature_index(new_index); +- } +- +- // Update constant pool indices in the method's checked exception +- // table to use new constant indices as needed. +- int cext_length = method->checked_exceptions_length(); +- if (cext_length > 0) { +- CheckedExceptionElement * cext_table = +- method->checked_exceptions_start(); +- for (int j = 0; j < cext_length; j++) { +- int cur_index = cext_table[j].class_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("cext-class_cp_index change: %d to %d", cur_index, new_index)); +- cext_table[j].class_cp_index = (u2)new_index; +- } +- } // end for each checked exception table entry +- } // end if there are checked exception table entries +- +- // Update each catch type index in the method's exception table +- // to use new constant pool indices as needed. The exception table +- // holds quadruple entries of the form: +- // (beg_bci, end_bci, handler_bci, klass_index) +- +- ExceptionTable ex_table(method()); +- int ext_length = ex_table.length(); +- +- for (int j = 0; j < ext_length; j ++) { +- int cur_index = ex_table.catch_type_index(j); +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("ext-klass_index change: %d to %d", cur_index, new_index)); +- ex_table.set_catch_type_index(j, new_index); +- } +- } // end for each exception table entry +- +- // Update constant pool indices in the method's local variable +- // table to use new constant indices as needed. The local variable +- // table hold sextuple entries of the form: +- // (start_pc, length, name_index, descriptor_index, signature_index, slot) +- int lvt_length = method->localvariable_table_length(); +- if (lvt_length > 0) { +- LocalVariableTableElement * lv_table = +- method->localvariable_table_start(); +- for (int j = 0; j < lvt_length; j++) { +- int cur_index = lv_table[j].name_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-name_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].name_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].descriptor_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-descriptor_cp_index change: %d to %d", cur_index, +- new_index)); +- lv_table[j].descriptor_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].signature_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-signature_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].signature_cp_index = (u2)new_index; +- } +- } // end for each local variable table entry +- } // end if there are local variable table entries ++ //unlock_threads(); + +- rewrite_cp_refs_in_stack_map_table(method, THREAD); +- } // end for each method +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +-} // end set_new_constant_pool() ++ ResourceMark mark; + ++ VM_GC_Operation::doit_epilogue(); ++ TRACE_RC1("GC Operation epilogue finished! "); + +-// Unevolving classes may point to methods of the_class directly +-// from their constant pool caches, itables, and/or vtables. We +-// use the SystemDictionary::classes_do() facility and this helper +-// to fix up these pointers. +-// +-// Note: We currently don't support updating the vtable in +-// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. +-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; ++ // Free the array of scratch classes ++ delete _new_classes; ++ _new_classes = NULL; + +- // HotSpot specific optimization! HotSpot does not currently +- // support delegation from the bootstrap class loader to a +- // user-defined class loader. This means that if the bootstrap +- // class loader is the initiating class loader, then it will also +- // be the defining class loader. This also means that classes +- // loaded by the bootstrap class loader cannot refer to classes +- // loaded by a user-defined class loader. Note: a user-defined +- // class loader can delegate to the bootstrap class loader. +- // +- // If the current class being redefined has a user-defined class +- // loader as its defining class loader, then we can skip all +- // classes loaded by the bootstrap class loader. +- bool is_user_defined = +- instanceKlass::cast(_the_class_oop)->class_loader() != NULL; +- if (is_user_defined && ik->class_loader() == NULL) { +- return; +- } ++ // Free the array of affected classes ++ delete _affected_klasses; ++ _affected_klasses = NULL; + +- // This is a very busy routine. We don't want too much tracing +- // printed out. +- bool trace_name_printed = false; +- +- // Very noisy: only enable this call if you are trying to determine +- // that a specific class gets found by this routine. +- // RC_TRACE macro has an embedded ResourceMark +- // RC_TRACE_WITH_THREAD(0x00100000, THREAD, +- // ("adjust check: name=%s", ik->external_name())); +- // trace_name_printed = true; +- +- // Fix the vtable embedded in the_class and subclasses of the_class, +- // if one exists. We discard scratch_class and we don't keep an +- // instanceKlass around to hold obsolete methods so we don't have +- // any other instanceKlass embedded vtables to update. The vtable +- // holds the methodOops for virtual (but not final) methods. +- if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { +- // ik->vtable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->vtable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // If the current class has an itable and we are either redefining an +- // interface or if the current class is a subclass of the_class, then +- // we potentially have to fix the itable. If we are redefining an +- // interface, then we have to call adjust_method_entries() for +- // every instanceKlass that has an itable since there isn't a +- // subclass relationship between an interface and an instanceKlass. +- if (ik->itable_length() > 0 && (Klass::cast(_the_class_oop)->is_interface() +- || ik->is_subclass_of(_the_class_oop))) { +- // ik->itable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->itable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // The constant pools in other classes (other_cp) can refer to +- // methods in the_class. We have to update method information in +- // other_cp's cache. If other_cp has a previous version, then we +- // have to repeat the process for each previous version. The +- // constant pool cache holds the methodOops for non-virtual +- // methods and for virtual, final methods. +- // +- // Special case: if the current class is the_class, then new_cp +- // has already been attached to the_class and old_cp has already +- // been added as a previous version. The new_cp doesn't have any +- // cached references to old methods so it doesn't need to be +- // updated. We can simply start with the previous version(s) in +- // that case. +- constantPoolHandle other_cp; +- constantPoolCacheOop cp_cache; +- +- if (k_oop != _the_class_oop) { +- // this klass' constant pool cache may need adjustment +- other_cp = constantPoolHandle(ik->constants()); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- { +- ResourceMark rm(THREAD); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // the previous versions' constant pool caches may need adjustment +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- other_cp = pv_info->prev_constant_pool_handle(); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up +- } +-} ++ TRACE_RC1("Redefinition finished!"); + +-void VM_RedefineClasses::update_jmethod_ids() { +- for (int j = 0; j < _matching_methods_length; ++j) { +- methodOop old_method = _matching_old_methods[j]; +- jmethodID jmid = old_method->find_jmethod_id_or_null(); +- if (jmid != NULL) { +- // There is a jmethodID, change it to point to the new method +- methodHandle new_method_h(_matching_new_methods[j]); +- JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); +- assert(JNIHandles::resolve_jmethod_id(jmid) == _matching_new_methods[j], +- "should be replaced"); +- } +- } ++ RC_TIMER_STOP(_timer_vm_op_epilogue); + } + +-void VM_RedefineClasses::check_methods_and_mark_as_obsolete( +- BitMap *emcp_methods, int * emcp_method_count_p) { +- *emcp_method_count_p = 0; +- int obsolete_count = 0; +- int old_index = 0; +- for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { +- methodOop old_method = _matching_old_methods[j]; +- methodOop new_method = _matching_new_methods[j]; +- methodOop old_array_method; +- +- // Maintain an old_index into the _old_methods array by skipping +- // deleted methods +- while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) +- != old_method) { +- ++old_index; +- } +- +- if (MethodComparator::methods_EMCP(old_method, new_method)) { +- // The EMCP definition from JSR-163 requires the bytecodes to be +- // the same with the exception of constant pool indices which may +- // differ. However, the constants referred to by those indices +- // must be the same. +- // +- // We use methods_EMCP() for comparison since constant pool +- // merging can remove duplicate constant pool entries that were +- // present in the old method and removed from the rewritten new +- // method. A faster binary comparison function would consider the +- // old and new methods to be different when they are actually +- // EMCP. +- // +- // The old and new methods are EMCP and you would think that we +- // could get rid of one of them here and now and save some space. +- // However, the concept of EMCP only considers the bytecodes and +- // the constant pool entries in the comparison. Other things, +- // e.g., the line number table (LNT) or the local variable table +- // (LVT) don't count in the comparison. So the new (and EMCP) +- // method can have a new LNT that we need so we can't just +- // overwrite the new method with the old method. +- // +- // When this routine is called, we have already attached the new +- // methods to the_class so the old methods are effectively +- // overwritten. However, if an old method is still executing, +- // then the old method cannot be collected until sometime after +- // the old method call has returned. So the overwriting of old +- // methods by new methods will save us space except for those +- // (hopefully few) old methods that are still executing. +- // +- // A method refers to a constMethodOop and this presents another +- // possible avenue to space savings. The constMethodOop in the +- // new method contains possibly new attributes (LNT, LVT, etc). +- // At first glance, it seems possible to save space by replacing +- // the constMethodOop in the old method with the constMethodOop +- // from the new method. The old and new methods would share the +- // same constMethodOop and we would save the space occupied by +- // the old constMethodOop. However, the constMethodOop contains +- // a back reference to the containing method. Sharing the +- // constMethodOop between two methods could lead to confusion in +- // the code that uses the back reference. This would lead to +- // brittle code that could be broken in non-obvious ways now or +- // in the future. +- // +- // Another possibility is to copy the constMethodOop from the new +- // method to the old method and then overwrite the new method with +- // the old method. Since the constMethodOop contains the bytecodes +- // for the method embedded in the oop, this option would change +- // the bytecodes out from under any threads executing the old +- // method and make the thread's bcp invalid. Since EMCP requires +- // that the bytecodes be the same modulo constant pool indices, it +- // is straight forward to compute the correct new bcp in the new +- // constMethodOop from the old bcp in the old constMethodOop. The +- // time consuming part would be searching all the frames in all +- // of the threads to find all of the calls to the old method. +- // +- // It looks like we will have to live with the limited savings +- // that we get from effectively overwriting the old methods +- // when the new methods are attached to the_class. +- +- // track which methods are EMCP for add_previous_version() call +- emcp_methods->set_bit(old_index); +- (*emcp_method_count_p)++; +- +- // An EMCP method is _not_ obsolete. An obsolete method has a +- // different jmethodID than the current method. An EMCP method +- // has the same jmethodID as the current method. Having the +- // same jmethodID for all EMCP versions of a method allows for +- // a consistent view of the EMCP methods regardless of which +- // EMCP method you happen to have in hand. For example, a +- // breakpoint set in one EMCP method will work for all EMCP +- // versions of the method including the current one. +- } else { +- // mark obsolete methods as such +- old_method->set_is_obsolete(); +- obsolete_count++; +- +- // obsolete methods need a unique idnum +- u2 num = instanceKlass::cast(_the_class_oop)->next_method_idnum(); +- if (num != constMethodOopDesc::UNSET_IDNUM) { +-// u2 old_num = old_method->method_idnum(); +- old_method->set_method_idnum(num); +-// TO DO: attach obsolete annotations to obsolete method's new idnum +- } +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); +- } +- old_method->set_is_old(); +- } +- for (int i = 0; i < _deleted_methods_length; ++i) { +- methodOop old_method = _deleted_methods[i]; +- +- assert(old_method->vtable_index() < 0, +- "cannot delete methods with vtable entries");; +- +- // Mark all deleted methods as old and obsolete +- old_method->set_is_old(); +- old_method->set_is_obsolete(); +- ++obsolete_count; +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark deleted %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); +- } +- assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), +- "sanity check"); +- RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p, +- obsolete_count)); ++bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { ++ // classes for primitives cannot be redefined ++ if (java_lang_Class::is_primitive(klass_mirror)) { ++ return false; ++ } ++ klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); ++ // classes for arrays cannot be redefined ++ if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ return false; ++ } ++ return true; + } + +-// This internal class transfers the native function registration from old methods +-// to new methods. It is designed to handle both the simple case of unchanged +-// native methods and the complex cases of native method prefixes being added and/or +-// removed. +-// It expects only to be used during the VM_RedefineClasses op (a safepoint). +-// +-// This class is used after the new methods have been installed in "the_class". +-// +-// So, for example, the following must be handled. Where 'm' is a method and +-// a number followed by an underscore is a prefix. +-// +-// Old Name New Name +-// Simple transfer to new method m -> m +-// Add prefix m -> 1_m +-// Remove prefix 1_m -> m +-// Simultaneous add of prefixes m -> 3_2_1_m +-// Simultaneous removal of prefixes 3_2_1_m -> m +-// Simultaneous add and remove 1_m -> 2_m +-// Same, caused by prefix removal only 3_2_1_m -> 3_2_m +-// +-class TransferNativeFunctionRegistration { +- private: +- instanceKlassHandle the_class; +- int prefix_count; +- char** prefixes; +- +- // Recursively search the binary tree of possibly prefixed method names. +- // Iteration could be used if all agents were well behaved. Full tree walk is +- // more resilent to agents not cleaning up intermediate methods. +- // Branch at each depth in the binary tree is: +- // (1) without the prefix. +- // (2) with the prefix. +- // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) +- methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, +- Symbol* signature) { +- TempNewSymbol name_symbol = SymbolTable::probe(name_str, (int)name_len); +- if (name_symbol != NULL) { +- methodOop method = Klass::cast(the_class())->lookup_method(name_symbol, signature); +- if (method != NULL) { +- // Even if prefixed, intermediate methods must exist. +- if (method->is_native()) { +- // Wahoo, we found a (possibly prefixed) version of the method, return it. +- return method; +- } +- if (depth < prefix_count) { +- // Try applying further prefixes (other than this one). +- method = search_prefix_name_space(depth+1, name_str, name_len, signature); +- if (method != NULL) { +- return method; // found +- } +- +- // Try adding this prefix to the method name and see if it matches +- // another method name. +- char* prefix = prefixes[depth]; +- size_t prefix_len = strlen(prefix); +- size_t trial_len = name_len + prefix_len; +- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); +- strcpy(trial_name_str, prefix); +- strcat(trial_name_str, name_str); +- method = search_prefix_name_space(depth+1, trial_name_str, trial_len, +- signature); +- if (method != NULL) { +- // If found along this branch, it was prefixed, mark as such +- method->set_is_prefixed_native(); +- return method; // found +- } +- } +- } +- } +- return NULL; // This whole branch bore nothing ++#ifdef ASSERT ++ ++void VM_RedefineClasses::verify_classes(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ ++ instanceKlassHandle k_handle(THREAD, k_oop); ++ Verifier::verify(k_handle, Verifier::ThrowException, true, true, THREAD); ++ k_oop = k_oop->klass_part()->old_version(); + } ++} + +- // Return the method name with old prefixes stripped away. +- char* method_name_without_prefixes(methodOop method) { +- Symbol* name = method->name(); +- char* name_str = name->as_utf8(); ++#endif + +- // Old prefixing may be defunct, strip prefixes, if any. +- for (int i = prefix_count-1; i >= 0; i--) { +- char* prefix = prefixes[i]; +- size_t prefix_len = strlen(prefix); +- if (strncmp(prefix, name_str, prefix_len) == 0) { +- name_str += prefix_len; ++// Rewrite faster byte-codes back to their slower equivalent. Undoes rewriting happening in templateTable_xxx.cpp ++// The reason is that once we zero cpool caches, we need to re-resolve all entries again. Faster bytecodes do not ++// do that, they assume that cache entry is resolved already. ++void VM_RedefineClasses::unpatch_bytecode(methodOop method) { ++ RawBytecodeStream bcs(method); ++ Bytecodes::Code code; ++ Bytecodes::Code java_code; ++ while (!bcs.is_last_bytecode()) { ++ code = bcs.raw_next(); ++ address bcp = bcs.bcp(); ++ ++ if (code == Bytecodes::_breakpoint) { ++ int bci = method->bci_from(bcp); ++ code = method->orig_bytecode_at(bci); ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ // Let breakpoint table handling unpatch bytecode ++ method->set_orig_bytecode_at(bci, java_code); ++ } ++ } else { ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ *bcp = java_code; + } + } +- return name_str; +- } + +- // Strip any prefixes off the old native method, then try to find a +- // (possibly prefixed) new native that matches it. +- methodOop strip_and_search_for_new_native(methodOop method) { +- ResourceMark rm; +- char* name_str = method_name_without_prefixes(method); +- return search_prefix_name_space(0, name_str, strlen(name_str), +- method->signature()); ++ // Additionally, we need to unpatch bytecode at bcp+1 for fast_xaccess (which would be fast field access) ++ if (code == Bytecodes::_fast_iaccess_0 || code == Bytecodes::_fast_aaccess_0 || code == Bytecodes::_fast_faccess_0) { ++ Bytecodes::Code code2 = Bytecodes::code_or_bp_at(bcp + 1); ++ assert(code2 == Bytecodes::_fast_igetfield || ++ code2 == Bytecodes::_fast_agetfield || ++ code2 == Bytecodes::_fast_fgetfield, ""); ++ *(bcp + 1) = Bytecodes::java_code(code2); ++ } + } ++} + +- public: ++// Unevolving classes may point to old methods directly ++// from their constant pool caches, itables, and/or vtables. We ++// use the SystemDictionary::classes_do() facility and this helper ++// to fix up these pointers. Additional field offsets and vtable indices ++// in the constant pool cache entries are fixed. ++// ++// Note: We currently don't support updating the vtable in ++// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. ++void VM_RedefineClasses::adjust_cpool_cache(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; + +- // Construct a native method transfer processor for this class. +- TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { +- assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ constantPoolHandle other_cp; ++ constantPoolCacheOop cp_cache; + +- the_class = _the_class; +- prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); +- } ++ other_cp = constantPoolHandle(ik->constants()); + +- // Attempt to transfer any of the old or deleted methods that are native +- void transfer_registrations(methodOop* old_methods, int methods_length) { +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; ++ for (int i=0; i<other_cp->length(); i++) { ++ if (other_cp->tag_at(i).is_klass()) { ++ klassOop klass = other_cp->klass_at(i, THREAD); ++ if (klass->klass_part()->new_version() != NULL) { + +- if (old_method->is_native() && old_method->has_native_function()) { +- methodOop new_method = strip_and_search_for_new_native(old_method); +- if (new_method != NULL) { +- // Actually set the native function in the new method. +- // Redefine does not send events (except CFLH), certainly not this +- // behind the scenes re-registration. +- new_method->set_native_function(old_method->native_function(), +- !methodOopDesc::native_bind_event_is_interesting); ++ // (tw) TODO: check why/if this is necessary ++ other_cp->klass_at_put(i, klass->klass_part()->new_version()); ++ } ++ klass = other_cp->klass_at(i, THREAD); ++ assert(klass->klass_part()->new_version() == NULL, "Must be new klass!"); + } + } ++ ++ cp_cache = other_cp->cache(); ++ ++ if (cp_cache != NULL) { ++ cp_cache->adjust_entries(); ++ } ++ ++ // If bytecode rewriting is enabled, we also need to unpatch bytecode to force resolution of zeroed entries ++ if (RewriteBytecodes) { ++ ik->methods_do(unpatch_bytecode); ++ } + } ++ k_oop = k_oop->klass_part()->old_version(); + } +-}; ++} + +-// Don't lose the association between a native method and its JNI function. +-void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle the_class) { +- TransferNativeFunctionRegistration transfer(the_class); +- transfer.transfer_registrations(_deleted_methods, _deleted_methods_length); +- transfer.transfer_registrations(_matching_old_methods, _matching_methods_length); ++void VM_RedefineClasses::update_jmethod_ids() { ++ for (int j = 0; j < _matching_methods_length; ++j) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ TRACE_RC3("matching method %s", old_method->name_and_sig_as_C_string()); ++ ++ jmethodID jmid = old_method->find_jmethod_id_or_null(); ++ if (old_method->new_version() != NULL && jmid == NULL) { ++ // (tw) Have to create jmethodID in this case ++ jmid = old_method->jmethod_id(); ++ } ++ ++ if (jmid != NULL) { ++ // There is a jmethodID, change it to point to the new method ++ methodHandle new_method_h((methodOop)_new_methods->obj_at(_matching_new_methods[j])); ++ if (old_method->new_version() == NULL) { ++ methodHandle old_method_h((methodOop)_old_methods->obj_at(_matching_old_methods[j])); ++ jmethodID new_jmethod_id = JNIHandles::make_jmethod_id(old_method_h); ++ bool result = instanceKlass::cast(old_method_h->method_holder())->update_jmethod_id(old_method_h(), new_jmethod_id); ++ //TRACE_RC3("Changed jmethodID for old method assigned to %d / result=%d", new_jmethod_id, result); ++ //TRACE_RC3("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } else { ++ jmethodID mid = new_method_h->jmethod_id(); ++ bool result = instanceKlass::cast(new_method_h->method_holder())->update_jmethod_id(new_method_h(), jmid); ++ //TRACE_RC3("Changed jmethodID for new method assigned to %d / result=%d", jmid, result); ++ } ++ JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); ++ //TRACE_RC3("changing method associated with jmethod id %d to %s", (int)jmid, new_method_h->name()->as_C_string()); ++ assert(JNIHandles::resolve_jmethod_id(jmid) == (methodOop)_new_methods->obj_at(_matching_new_methods[j]), "should be replaced"); ++ jmethodID mid = ((methodOop)_new_methods->obj_at(_matching_new_methods[j]))->jmethod_id(); ++ assert(JNIHandles::resolve_non_null((jobject)mid) == new_method_h(), "must match!"); ++ ++ //TRACE_RC3("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } ++ } + } + ++ + // Deoptimize all compiled code that depends on this class. + // + // If the can_redefine_classes capability is obtained in the onload +@@ -2964,7 +1835,10 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { + + // All dependencies have been recorded from startup or this is a second or + // subsequent use of RedefineClasses +- if (JvmtiExport::all_dependencies_are_recorded()) { ++ ++ // For now deopt all ++ // (tw) TODO: Improve the dependency system such that we can safely deopt only a subset of the methods ++ if (0 && JvmtiExport::all_dependencies_are_recorded()) { + Universe::flush_evol_dependents_on(k_h); + } else { + CodeCache::mark_all_nmethods_for_deoptimization(); +@@ -2987,10 +1861,10 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + methodOop old_method; + methodOop new_method; + +- _matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _added_methods = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length()); +- _deleted_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ _matching_old_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _matching_new_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _added_methods = NEW_RESOURCE_ARRAY(int, _new_methods->length()); ++ _deleted_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); + + _matching_methods_length = 0; + _deleted_methods_length = 0; +@@ -3005,36 +1879,36 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + // New method at the end + new_method = (methodOop) _new_methods->obj_at(nj); +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj; + ++nj; + } else if (nj >= _new_methods->length()) { + // Old method, at the end, is deleted + old_method = (methodOop) _old_methods->obj_at(oj); +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj; + ++oj; + } else { + old_method = (methodOop) _old_methods->obj_at(oj); + new_method = (methodOop) _new_methods->obj_at(nj); + if (old_method->name() == new_method->name()) { + if (old_method->signature() == new_method->signature()) { +- _matching_old_methods[_matching_methods_length ] = old_method; +- _matching_new_methods[_matching_methods_length++] = new_method; ++ _matching_old_methods[_matching_methods_length ] = oj;//old_method; ++ _matching_new_methods[_matching_methods_length++] = nj;//new_method; + ++nj; + ++oj; + } else { + // added overloaded have already been moved to the end, + // so this is a deleted overloaded method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } else { // names don't match + if (old_method->name()->fast_compare(new_method->name()) > 0) { + // new method +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj;//new_method; + ++nj; + } else { + // deleted method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } +@@ -3042,6 +1916,7 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); + assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); ++ TRACE_RC3("Matching methods = %d / deleted methods = %d / added methods = %d", _matching_methods_length, _deleted_methods_length, _added_methods_length); + } + + +@@ -3049,287 +1924,176 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + // Install the redefinition of a class: + // - house keeping (flushing breakpoints and caches, deoptimizing + // dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding a weak reference to track the obsolete but interesting +-// parts of the_class + // - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-void VM_RedefineClasses::redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS) { ++void VM_RedefineClasses::redefine_single_class(instanceKlassHandle the_new_class, TRAPS) { ++ ++ ResourceMark rm(THREAD); + +- RC_TIMER_START(_timer_rsc_phase1); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); + +- oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass); +- klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); ++ instanceKlassHandle the_old_class = instanceKlassHandle(THREAD, the_new_class->old_version()); + ++#ifndef JVMTI_KERNEL + // Remove all breakpoints in methods of this class + JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); +- jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); ++ jvmti_breakpoints.clearall_in_class_at_safepoint(the_old_class()); ++#endif // !JVMTI_KERNEL + +- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) { ++ if (the_old_class() == Universe::reflect_invoke_cache()->klass()) { + // We are redefining java.lang.reflect.Method. Method.invoke() is + // cached and users of the cache care about each active version of + // the method so we have to track this previous version. + // Do this before methods get switched + Universe::reflect_invoke_cache()->add_previous_version( +- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); ++ the_old_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); + } + +- // Deoptimize all compiled code that depends on this class +- flush_dependent_code(the_class, THREAD); +- +- _old_methods = the_class->methods(); +- _new_methods = scratch_class->methods(); +- _the_class_oop = the_class_oop; ++ _old_methods = the_old_class->methods(); ++ _new_methods = the_new_class->methods(); + compute_added_deleted_matching_methods(); +- update_jmethod_ids(); +- +- // Attach new constant pool to the original klass. The original +- // klass still refers to the old constant pool (for now). +- scratch_class->constants()->set_pool_holder(the_class()); +- +-#if 0 +- // In theory, with constant pool merging in place we should be able +- // to save space by using the new, merged constant pool in place of +- // the old constant pool(s). By "pool(s)" I mean the constant pool in +- // the klass version we are replacing now and any constant pool(s) in +- // previous versions of klass. Nice theory, doesn't work in practice. +- // When this code is enabled, even simple programs throw NullPointer +- // exceptions. I'm guessing that this is caused by some constant pool +- // cache difference between the new, merged constant pool and the +- // constant pool that was just being used by the klass. I'm keeping +- // this code around to archive the idea, but the code has to remain +- // disabled for now. +- +- // Attach each old method to the new constant pool. This can be +- // done here since we are past the bytecode verification and +- // constant pool optimization phases. +- for (int i = _old_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)_old_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- +- { +- // walk all previous versions of the klass +- instanceKlass *ik = (instanceKlass *)the_class()->klass_part(); +- PreviousVersionWalker pvw(ik); +- instanceKlassHandle ikh; +- do { +- ikh = pvw.next_previous_version(); +- if (!ikh.is_null()) { +- ik = ikh(); +- +- // attach previous version of klass to the new constant pool +- ik->set_constants(scratch_class->constants()); +- +- // Attach each method in the previous version of klass to the +- // new constant pool +- objArrayOop prev_methods = ik->methods(); +- for (int i = prev_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)prev_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- } +- } while (!ikh.is_null()); +- } +-#endif +- +- // Replace methods and constantpool +- the_class->set_methods(_new_methods); +- scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods, +- // and to be able to undo operation easily. +- +- constantPoolOop old_constants = the_class->constants(); +- the_class->set_constants(scratch_class->constants()); +- scratch_class->set_constants(old_constants); // See the previous comment. +-#if 0 +- // We are swapping the guts of "the new class" with the guts of "the +- // class". Since the old constant pool has just been attached to "the +- // new class", it seems logical to set the pool holder in the old +- // constant pool also. However, doing this will change the observable +- // class hierarchy for any old methods that are still executing. A +- // method can query the identity of its "holder" and this query uses +- // the method's constant pool link to find the holder. The change in +- // holding class from "the class" to "the new class" can confuse +- // things. +- // +- // Setting the old constant pool's holder will also cause +- // verification done during vtable initialization below to fail. +- // During vtable initialization, the vtable's class is verified to be +- // a subtype of the method's holder. The vtable's class is "the +- // class" and the method's holder is gotten from the constant pool +- // link in the method itself. For "the class"'s directly implemented +- // methods, the method holder is "the class" itself (as gotten from +- // the new constant pool). The check works fine in this case. The +- // check also works fine for methods inherited from super classes. +- // +- // Miranda methods are a little more complicated. A miranda method is +- // provided by an interface when the class implementing the interface +- // does not provide its own method. These interfaces are implemented +- // internally as an instanceKlass. These special instanceKlasses +- // share the constant pool of the class that "implements" the +- // interface. By sharing the constant pool, the method holder of a +- // miranda method is the class that "implements" the interface. In a +- // non-redefine situation, the subtype check works fine. However, if +- // the old constant pool's pool holder is modified, then the check +- // fails because there is no class hierarchy relationship between the +- // vtable's class and "the new class". +- +- old_constants->set_pool_holder(scratch_class()); +-#endif + + // track which methods are EMCP for add_previous_version() call below +- BitMap emcp_methods(_old_methods->length()); ++ ++ // (tw) TODO: Check if we need the concept of EMCP? ++ BitMap emcp_methods(_old_methods->length()); + int emcp_method_count = 0; + emcp_methods.clear(); // clears 0..(length() - 1) ++ ++ // We need to mark methods as old!! + check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); +- transfer_old_native_function_registrations(the_class); +- +- // The class file bytes from before any retransformable agents mucked +- // with them was cached on the scratch class, move to the_class. +- // Note: we still want to do this if nothing needed caching since it +- // should get cleared in the_class too. +- if (the_class->get_cached_class_file_bytes() == 0) { +- // the_class doesn't have a cache yet so copy it +- the_class->set_cached_class_file( +- scratch_class->get_cached_class_file_bytes(), +- scratch_class->get_cached_class_file_len()); +- } +-#ifndef PRODUCT +- else { +- assert(the_class->get_cached_class_file_bytes() == +- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match"); +- assert(the_class->get_cached_class_file_len() == +- scratch_class->get_cached_class_file_len(), "cache lens must match"); +- } +-#endif ++ update_jmethod_ids(); + +- // Replace inner_classes +- typeArrayOop old_inner_classes = the_class->inner_classes(); +- the_class->set_inner_classes(scratch_class->inner_classes()); +- scratch_class->set_inner_classes(old_inner_classes); ++ // TODO: ++ transfer_old_native_function_registrations(the_old_class); + +- // Initialize the vtable and interface table after +- // methods have been rewritten +- { +- ResourceMark rm(THREAD); +- // no exception should happen here since we explicitly +- // do not check loader constraints. +- // compare_and_normalize_class_versions has already checked: +- // - classloaders unchanged, signatures unchanged +- // - all instanceKlasses for redefined classes reused & contents updated +- the_class->vtable()->initialize_vtable(false, THREAD); +- the_class->itable()->initialize_itable(false, THREAD); +- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception"); +- } +- +- // Leave arrays of jmethodIDs and itable index cache unchanged +- +- // Copy the "source file name" attribute from new class version +- the_class->set_source_file_name(scratch_class->source_file_name()); +- +- // Copy the "source debug extension" attribute from new class version +- the_class->set_source_debug_extension( +- scratch_class->source_debug_extension(), +- scratch_class->source_debug_extension() == NULL ? 0 : +- (int)strlen(scratch_class->source_debug_extension())); +- +- // Use of javac -g could be different in the old and the new +- if (scratch_class->access_flags().has_localvariable_table() != +- the_class->access_flags().has_localvariable_table()) { +- +- AccessFlags flags = the_class->access_flags(); +- if (scratch_class->access_flags().has_localvariable_table()) { +- flags.set_has_localvariable_table(); +- } else { +- flags.clear_has_localvariable_table(); +- } +- the_class->set_access_flags(flags); +- } +- +- // Replace class annotation fields values +- typeArrayOop old_class_annotations = the_class->class_annotations(); +- the_class->set_class_annotations(scratch_class->class_annotations()); +- scratch_class->set_class_annotations(old_class_annotations); +- +- // Replace fields annotation fields values +- objArrayOop old_fields_annotations = the_class->fields_annotations(); +- the_class->set_fields_annotations(scratch_class->fields_annotations()); +- scratch_class->set_fields_annotations(old_fields_annotations); +- +- // Replace methods annotation fields values +- objArrayOop old_methods_annotations = the_class->methods_annotations(); +- the_class->set_methods_annotations(scratch_class->methods_annotations()); +- scratch_class->set_methods_annotations(old_methods_annotations); +- +- // Replace methods parameter annotation fields values +- objArrayOop old_methods_parameter_annotations = +- the_class->methods_parameter_annotations(); +- the_class->set_methods_parameter_annotations( +- scratch_class->methods_parameter_annotations()); +- scratch_class->set_methods_parameter_annotations(old_methods_parameter_annotations); +- +- // Replace methods default annotation fields values +- objArrayOop old_methods_default_annotations = +- the_class->methods_default_annotations(); +- the_class->set_methods_default_annotations( +- scratch_class->methods_default_annotations()); +- scratch_class->set_methods_default_annotations(old_methods_default_annotations); +- +- // Replace minor version number of class file +- u2 old_minor_version = the_class->minor_version(); +- the_class->set_minor_version(scratch_class->minor_version()); +- scratch_class->set_minor_version(old_minor_version); +- +- // Replace major version number of class file +- u2 old_major_version = the_class->major_version(); +- the_class->set_major_version(scratch_class->major_version()); +- scratch_class->set_major_version(old_major_version); +- +- // Replace CP indexes for class and name+type of enclosing method +- u2 old_class_idx = the_class->enclosing_method_class_index(); +- u2 old_method_idx = the_class->enclosing_method_method_index(); +- the_class->set_enclosing_method_indices( +- scratch_class->enclosing_method_class_index(), +- scratch_class->enclosing_method_method_index()); +- scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx); +- +- // keep track of previous versions of this class +- the_class->add_previous_version(scratch_class, &emcp_methods, +- emcp_method_count); +- +- RC_TIMER_STOP(_timer_rsc_phase1); +- RC_TIMER_START(_timer_rsc_phase2); + +- // Adjust constantpool caches and vtables for all classes +- // that reference methods of the evolved class. +- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); + +- if (the_class->oop_map_cache() != NULL) { +- // Flush references to any obsolete methods from the oop map cache +- // so that obsolete methods are not pinned. +- the_class->oop_map_cache()->flush_obsolete_entries(); ++#ifdef ASSERT ++ ++// klassOop systemLookup1 = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++// assert(systemLookup1 == the_new_class(), "New class must be in system dictionary!"); ++ ++ //JNIHandles::verify(); ++ ++// klassOop systemLookup = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++ ++// assert(systemLookup == the_new_class(), "New class must be in system dictionary!"); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ for (int i=0; i<the_new_class->methods()->length(); i++) { ++ assert(((methodOop)the_new_class->methods()->obj_at(i))->method_holder() == the_new_class(), "method holder must match!"); + } + ++ _old_methods->verify(); ++ _new_methods->verify(); ++ ++ the_new_class->vtable()->verify(tty); ++ the_old_class->vtable()->verify(tty); ++ ++#endif ++ + // increment the classRedefinedCount field in the_class and in any + // direct and indirect subclasses of the_class +- increment_class_counter((instanceKlass *)the_class()->klass_part(), THREAD); ++ increment_class_counter((instanceKlass *)the_old_class()->klass_part(), THREAD); ++ ++} ++ ++ ++void VM_RedefineClasses::check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p) { ++ TRACE_RC3("Checking matching methods for EMCP"); ++ *emcp_method_count_p = 0; ++ int obsolete_count = 0; ++ int old_index = 0; ++ for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ methodOop new_method = (methodOop)_new_methods->obj_at(_matching_new_methods[j]); ++ methodOop old_array_method; ++ ++ // Maintain an old_index into the _old_methods array by skipping ++ // deleted methods ++ while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) ++ != old_method) { ++ ++old_index; ++ } + +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), +- java_lang_Class::classRedefinedCount(the_class_mirror), +- os::available_memory() >> 10)); ++ if (MethodComparator::methods_EMCP(old_method, new_method)) { ++ // The EMCP definition from JSR-163 requires the bytecodes to be ++ // the same with the exception of constant pool indices which may ++ // differ. However, the constants referred to by those indices ++ // must be the same. ++ // ++ // We use methods_EMCP() for comparison since constant pool ++ // merging can remove duplicate constant pool entries that were ++ // present in the old method and removed from the rewritten new ++ // method. A faster binary comparison function would consider the ++ // old and new methods to be different when they are actually ++ // EMCP. ++ ++ // track which methods are EMCP for add_previous_version() call ++ emcp_methods->set_bit(old_index); ++ (*emcp_method_count_p)++; ++ ++ // An EMCP method is _not_ obsolete. An obsolete method has a ++ // different jmethodID than the current method. An EMCP method ++ // has the same jmethodID as the current method. Having the ++ // same jmethodID for all EMCP versions of a method allows for ++ // a consistent view of the EMCP methods regardless of which ++ // EMCP method you happen to have in hand. For example, a ++ // breakpoint set in one EMCP method will work for all EMCP ++ // versions of the method including the current one. ++ ++ old_method->set_new_version(new_method); ++ new_method->set_old_version(old_method); ++ ++ TRACE_RC3("Found EMCP method %s", old_method->name_and_sig_as_C_string()); ++ ++ // Transfer breakpoints ++ instanceKlass *ik = instanceKlass::cast(old_method->method_holder()); ++ for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = bp->next()) { ++ TRACE_RC2("Checking breakpoint"); ++ TRACE_RC2("%d / %d", bp->match(old_method), bp->match(new_method)); ++ if (bp->match(old_method)) { ++ assert(bp->match(new_method), "if old method is method, then new method must match too"); ++ TRACE_RC2("Found a breakpoint in an old EMCP method"); ++ new_method->set_breakpoint(bp->bci()); ++ } ++ } ++ } else { ++ // mark obsolete methods as such ++ old_method->set_is_obsolete(); ++ obsolete_count++; ++ ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ TRACE_RC3("mark %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string()); ++ } ++ old_method->set_is_old(); ++ } ++ for (int i = 0; i < _deleted_methods_length; ++i) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_deleted_methods[i]); + +- RC_TIMER_STOP(_timer_rsc_phase2); +-} // end redefine_single_class() ++ //assert(old_method->vtable_index() < 0, ++ // "cannot delete methods with vtable entries");; + ++ // Mark all deleted methods as old and obsolete ++ old_method->set_is_old(); ++ old_method->set_is_obsolete(); ++ ++obsolete_count; ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ TRACE_RC3("mark deleted %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string()); ++ } ++ //assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), "sanity check"); ++ TRACE_RC3("EMCP_cnt=%d, obsolete_cnt=%d !", *emcp_method_count_p, obsolete_count); ++} + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. +@@ -3338,134 +2102,267 @@ void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { + klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); + int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; + java_lang_Class::set_classRedefinedCount(class_mirror, new_count); +- +- if (class_oop != _the_class_oop) { +- // _the_class_oop count is printed at end of redefine_single_class() +- RC_TRACE_WITH_THREAD(0x00000008, THREAD, +- ("updated count in subclass=%s to %d", ik->external_name(), new_count)); +- } +- +- for (Klass *subk = ik->subklass(); subk != NULL; +- subk = subk->next_sibling()) { +- if (subk->oop_is_instance()) { +- // Only update instanceKlasses +- instanceKlass *subik = (instanceKlass*)subk; +- // recursively do subclasses of the current subclass +- increment_class_counter(subik, THREAD); +- } +- } ++ TRACE_RC3("updated count for class=%s to %d", ik->external_name(), new_count); + } + +-void VM_RedefineClasses::check_class(klassOop k_oop, +- oop initiating_loader, TRAPS) { ++#ifndef PRODUCT ++void VM_RedefineClasses::check_class(klassOop k_oop, TRAPS) { + Klass *k = k_oop->klass_part(); + if (k->oop_is_instance()) { + HandleMark hm(THREAD); + instanceKlass *ik = (instanceKlass *) k; +- bool no_old_methods = true; // be optimistic +- ResourceMark rm(THREAD); ++ assert(ik->is_newest_version(), "must be latest version in system dictionary"); ++ ++ if (ik->vtable_length() > 0) { ++ ResourceMark rm(THREAD); ++ if (!ik->vtable()->check_no_old_or_obsolete_entries()) { ++ TRACE_RC1("size of class: %d\n", k_oop->size()); ++ TRACE_RC1("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ik->signature_name()); ++ assert(false, "OLD method found"); ++ } ++ ++ ik->vtable()->verify(tty, true); ++ } ++ } ++} ++ ++#endif + +- // a vtable should never contain old or obsolete methods +- if (ik->vtable_length() > 0 && +- !ik->vtable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassVtable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->vtable()->dump_vtable(); ++static bool match_right(void* value, Pair<klassOop, klassOop> elem) { ++ return elem.right() == value; ++} ++ ++jvmtiError VM_RedefineClasses::do_topological_class_sorting( const jvmtiClassDefinition *class_defs, int class_count, TRAPS) ++{ ++ GrowableArray< Pair<klassOop, klassOop> > links; ++ ++ for (int i=0; i<class_count; i++) { ++ ++ oop mirror = JNIHandles::resolve_non_null(class_defs[i].klass); ++ instanceKlassHandle the_class(THREAD, java_lang_Class::as_klassOop(mirror)); ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ++ ClassFileStream st((u1*) class_defs[i].class_bytes, ++ class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ ClassFileParser cfp(&st); ++ ++ GrowableArray<Symbol*> symbolArr; ++ TempNewSymbol parsed_name; ++ TRACE_RC2("Before find super symbols of class %s", the_class->name()->as_C_string()); ++ cfp.parseClassFile(the_class->name(), the_class_loader, protection_domain, the_class, KlassHandle(), NULL, &symbolArr, parsed_name, false, THREAD); ++ ++ for (int j=0; j<symbolArr.length(); j++) { ++ Symbol* sym = symbolArr.at(j); ++ TRACE_RC3("Before adding link to super class %s", sym->as_C_string()); ++ klassOop super_klass = SystemDictionary::resolve_or_null(sym, the_class_loader, protection_domain, THREAD); ++ if (super_klass != NULL) { ++ instanceKlassHandle the_super_class(THREAD, super_klass); ++ if (_affected_klasses->contains(the_super_class)) { ++ TRACE_RC2("Found class to link"); ++ links.append(Pair<klassOop, klassOop>(super_klass, the_class())); ++ } + } +- no_old_methods = false; +- } +- +- // an itable should never contain old or obsolete methods +- if (ik->itable_length() > 0 && +- !ik->itable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassItable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->itable()->dump_itable(); ++ } ++ ++ assert(the_class->check_redefinition_flag(Klass::MarkedAsAffected), ""); ++ the_class->clear_redefinition_flag(Klass::MarkedAsAffected); ++ } ++ ++ ++ TRACE_RC1("Identified links between classes! "); ++ ++ for (int i=0; i < _affected_klasses->length(); i++) { ++ instanceKlassHandle klass = _affected_klasses->at(i); ++ ++ if (klass->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ klass->clear_redefinition_flag(Klass::MarkedAsAffected); ++ klassOop superKlass = klass->super(); ++ if (_affected_klasses->contains(superKlass)) { ++ links.append(Pair<klassOop, klassOop>(superKlass, klass())); + } +- no_old_methods = false; +- } +- +- // the constant pool cache should never contain old or obsolete methods +- if (ik->constants() != NULL && +- ik->constants()->cache() != NULL && +- !ik->constants()->cache()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("cp-cache::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->constants()->cache()->dump_cache(); ++ ++ objArrayOop superInterfaces = klass->local_interfaces(); ++ for (int j=0; j<superInterfaces->length(); j++) { ++ klassOop interfaceKlass = (klassOop)superInterfaces->obj_at(j); ++ if (_affected_klasses->contains(interfaceKlass)) { ++ links.append(Pair<klassOop, klassOop>(interfaceKlass, klass())); ++ } + } +- no_old_methods = false; ++ } ++ } ++ ++ IF_TRACE_RC2 { ++ TRACE_RC2("Identified links: "); ++ for (int i=0; i<links.length(); i++) { ++ TRACE_RC2("%s to %s", links.at(i).left()->klass_part()->name()->as_C_string(), ++ links.at(i).right()->klass_part()->name()->as_C_string()); ++ } ++ } ++ ++ for (int i = 0; i < _affected_klasses->length(); i++) { ++ int j; ++ for (j = i; j < _affected_klasses->length(); j++) { ++ // Search for node with no incoming edges ++ klassOop oop = _affected_klasses->at(j)(); ++ int k = links.find(oop, match_right); ++ if (k == -1) break; ++ } ++ if (j == _affected_klasses->length()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; + } + +- if (!no_old_methods) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- dump_methods(); ++ // Remove all links from this node ++ klassOop oop = _affected_klasses->at(j)(); ++ int k = 0; ++ while (k < links.length()) { ++ if (links.adr_at(k)->left() == oop) { ++ links.delete_at(k); + } else { +- tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " +- "to see more info about the following guarantee() failure."); ++ k++; + } +- guarantee(false, "OLD and/or OBSOLETE method(s) found"); + } ++ ++ // Swap node ++ instanceKlassHandle tmp = _affected_klasses->at(j); ++ _affected_klasses->at_put(j, _affected_klasses->at(i)); ++ _affected_klasses->at_put(i, tmp); + } ++ ++ return JVMTI_ERROR_NONE; + } + +-void VM_RedefineClasses::dump_methods() { +- int j; +- RC_TRACE(0x00004000, ("_old_methods --")); +- for (j = 0; j < _old_methods->length(); ++j) { +- methodOop m = (methodOop) _old_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_new_methods --")); +- for (j = 0; j < _new_methods->length(); ++j) { +- methodOop m = (methodOop) _new_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_matching_(old/new)_methods --")); +- for (j = 0; j < _matching_methods_length; ++j) { +- methodOop m = _matching_old_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- m = _matching_new_methods[j]; +- RC_TRACE_NO_CR(0x00004000, (" (%5d) ", m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_deleted_methods --")); +- for (j = 0; j < _deleted_methods_length; ++j) { +- methodOop m = _deleted_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_added_methods --")); +- for (j = 0; j < _added_methods_length; ++j) { +- methodOop m = _added_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); ++// This internal class transfers the native function registration from old methods ++// to new methods. It is designed to handle both the simple case of unchanged ++// native methods and the complex cases of native method prefixes being added and/or ++// removed. ++// It expects only to be used during the VM_RedefineClasses op (a safepoint). ++// ++// This class is used after the new methods have been installed in "the_class". ++// ++// So, for example, the following must be handled. Where 'm' is a method and ++// a number followed by an underscore is a prefix. ++// ++// Old Name New Name ++// Simple transfer to new method m -> m ++// Add prefix m -> 1_m ++// Remove prefix 1_m -> m ++// Simultaneous add of prefixes m -> 3_2_1_m ++// Simultaneous removal of prefixes 3_2_1_m -> m ++// Simultaneous add and remove 1_m -> 2_m ++// Same, caused by prefix removal only 3_2_1_m -> 3_2_m ++// ++class TransferNativeFunctionRegistration { ++private: ++ instanceKlassHandle the_class; ++ int prefix_count; ++ char** prefixes; ++ ++ // Recursively search the binary tree of possibly prefixed method names. ++ // Iteration could be used if all agents were well behaved. Full tree walk is ++ // more resilent to agents not cleaning up intermediate methods. ++ // Branch at each depth in the binary tree is: ++ // (1) without the prefix. ++ // (2) with the prefix. ++ // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) ++ methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, ++ Symbol* signature) { ++ Symbol* name_symbol = SymbolTable::probe(name_str, (int)name_len); ++ if (name_symbol != NULL) { ++ methodOop method = Klass::cast(the_class()->klass_part()->new_version())->lookup_method(name_symbol, signature); ++ if (method != NULL) { ++ // Even if prefixed, intermediate methods must exist. ++ if (method->is_native()) { ++ // Wahoo, we found a (possibly prefixed) version of the method, return it. ++ return method; ++ } ++ if (depth < prefix_count) { ++ // Try applying further prefixes (other than this one). ++ method = search_prefix_name_space(depth+1, name_str, name_len, signature); ++ if (method != NULL) { ++ return method; // found ++ } ++ ++ // Try adding this prefix to the method name and see if it matches ++ // another method name. ++ char* prefix = prefixes[depth]; ++ size_t prefix_len = strlen(prefix); ++ size_t trial_len = name_len + prefix_len; ++ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); ++ strcpy(trial_name_str, prefix); ++ strcat(trial_name_str, name_str); ++ method = search_prefix_name_space(depth+1, trial_name_str, trial_len, ++ signature); ++ if (method != NULL) { ++ // If found along this branch, it was prefixed, mark as such ++ method->set_is_prefixed_native(); ++ return method; // found ++ } ++ } ++ } ++ } ++ return NULL; // This whole branch bore nothing ++ } ++ ++ // Return the method name with old prefixes stripped away. ++ char* method_name_without_prefixes(methodOop method) { ++ Symbol* name = method->name(); ++ char* name_str = name->as_utf8(); ++ ++ // Old prefixing may be defunct, strip prefixes, if any. ++ for (int i = prefix_count-1; i >= 0; i--) { ++ char* prefix = prefixes[i]; ++ size_t prefix_len = strlen(prefix); ++ if (strncmp(prefix, name_str, prefix_len) == 0) { ++ name_str += prefix_len; ++ } ++ } ++ return name_str; ++ } ++ ++ // Strip any prefixes off the old native method, then try to find a ++ // (possibly prefixed) new native that matches it. ++ methodOop strip_and_search_for_new_native(methodOop method) { ++ ResourceMark rm; ++ char* name_str = method_name_without_prefixes(method); ++ return search_prefix_name_space(0, name_str, strlen(name_str), ++ method->signature()); ++ } ++ ++public: ++ ++ // Construct a native method transfer processor for this class. ++ TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { ++ assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ ++ the_class = _the_class; ++ prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); ++ } ++ ++ // Attempt to transfer any of the old or deleted methods that are native ++ void transfer_registrations(instanceKlassHandle old_klass, int* old_methods, int methods_length) { ++ for (int j = 0; j < methods_length; j++) { ++ methodOop old_method = (methodOop)old_klass->methods()->obj_at(old_methods[j]); ++ ++ if (old_method->is_native() && old_method->has_native_function()) { ++ methodOop new_method = strip_and_search_for_new_native(old_method); ++ if (new_method != NULL) { ++ // Actually set the native function in the new method. ++ // Redefine does not send events (except CFLH), certainly not this ++ // behind the scenes re-registration. ++ new_method->set_native_function(old_method->native_function(), ++ !methodOopDesc::native_bind_event_is_interesting); ++ ++ TRACE_RC3("Transfering native function for method %s", old_method->name()->as_C_string()); ++ } ++ } ++ } + } ++}; ++ ++// Don't lose the association between a native method and its JNI function. ++void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle old_klass) { ++ TransferNativeFunctionRegistration transfer(old_klass); ++ transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); ++ transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); + } +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.hpp b/src/share/vm/prims/jvmtiRedefineClasses.hpp +index 671f2ae..8333cee 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -30,332 +30,29 @@ + #include "memory/resourceArea.hpp" + #include "oops/objArrayKlass.hpp" + #include "oops/objArrayOop.hpp" ++#include "oops/fieldStreams.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" +-#include "runtime/vm_operations.hpp" +- +-// Introduction: +-// +-// The RedefineClasses() API is used to change the definition of one or +-// more classes. While the API supports redefining more than one class +-// in a single call, in general, the API is discussed in the context of +-// changing the definition of a single current class to a single new +-// class. For clarity, the current class is will always be called +-// "the_class" and the new class will always be called "scratch_class". +-// +-// The name "the_class" is used because there is only one structure +-// that represents a specific class; redefinition does not replace the +-// structure, but instead replaces parts of the structure. The name +-// "scratch_class" is used because the structure that represents the +-// new definition of a specific class is simply used to carry around +-// the parts of the new definition until they are used to replace the +-// appropriate parts in the_class. Once redefinition of a class is +-// complete, scratch_class is thrown away. +-// +-// +-// Implementation Overview: +-// +-// The RedefineClasses() API is mostly a wrapper around the VM op that +-// does the real work. The work is split in varying degrees between +-// doit_prologue(), doit() and doit_epilogue(). +-// +-// 1) doit_prologue() is called by the JavaThread on the way to a +-// safepoint. It does parameter verification and loads scratch_class +-// which involves: +-// - parsing the incoming class definition using the_class' class +-// loader and security context +-// - linking scratch_class +-// - merging constant pools and rewriting bytecodes as needed +-// for the merged constant pool +-// - verifying the bytecodes in scratch_class +-// - setting up the constant pool cache and rewriting bytecodes +-// as needed to use the cache +-// - finally, scratch_class is compared to the_class to verify +-// that it is a valid replacement class +-// - if everything is good, then scratch_class is saved in an +-// instance field in the VM operation for the doit() call +-// +-// Note: A JavaThread must do the above work. +-// +-// 2) doit() is called by the VMThread during a safepoint. It installs +-// the new class definition(s) which involves: +-// - retrieving the scratch_class from the instance field in the +-// VM operation +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding weak reference(s) to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-// - telling the SystemDictionary to notice our changes +-// +-// Note: the above work must be done by the VMThread to be safe. +-// +-// 3) doit_epilogue() is called by the JavaThread after the VM op +-// is finished and the safepoint is done. It simply cleans up +-// memory allocated in doit_prologue() and used in doit(). +-// +-// +-// Constant Pool Details: +-// +-// When the_class is redefined, we cannot just replace the constant +-// pool in the_class with the constant pool from scratch_class because +-// that could confuse obsolete methods that may still be running. +-// Instead, the constant pool from the_class, old_cp, is merged with +-// the constant pool from scratch_class, scratch_cp. The resulting +-// constant pool, merge_cp, replaces old_cp in the_class. +-// +-// The key part of any merging algorithm is the entry comparison +-// function so we have to know the types of entries in a constant pool +-// in order to merge two of them together. Constant pools can contain +-// up to 12 different kinds of entries; the JVM_CONSTANT_Unicode entry +-// is not presently used so we only have to worry about the other 11 +-// entry types. For the purposes of constant pool merging, it is +-// helpful to know that the 11 entry types fall into 3 different +-// subtypes: "direct", "indirect" and "double-indirect". +-// +-// Direct CP entries contain data and do not contain references to +-// other CP entries. The following are direct CP entries: +-// JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} +-// +-// Indirect CP entries contain 1 or 2 references to a direct CP entry +-// and no other data. The following are indirect CP entries: +-// JVM_CONSTANT_{Class,NameAndType,String} +-// +-// Double-indirect CP entries contain two references to indirect CP +-// entries and no other data. The following are double-indirect CP +-// entries: +-// JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} +-// +-// When comparing entries between two constant pools, the entry types +-// are compared first and if they match, then further comparisons are +-// made depending on the entry subtype. Comparing direct CP entries is +-// simply a matter of comparing the data associated with each entry. +-// Comparing both indirect and double-indirect CP entries requires +-// recursion. +-// +-// Fortunately, the recursive combinations are limited because indirect +-// CP entries can only refer to direct CP entries and double-indirect +-// CP entries can only refer to indirect CP entries. The following is +-// an example illustration of the deepest set of indirections needed to +-// access the data associated with a JVM_CONSTANT_Fieldref entry: +-// +-// JVM_CONSTANT_Fieldref { +-// class_index => JVM_CONSTANT_Class { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-1> +-// } +-// } +-// name_and_type_index => JVM_CONSTANT_NameAndType { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-2> +-// } +-// descriptor_index => JVM_CONSTANT_Utf8 { +-// <data-3> +-// } +-// } +-// } +-// +-// The above illustration is not a data structure definition for any +-// computer language. The curly braces ('{' and '}') are meant to +-// delimit the context of the "fields" in the CP entry types shown. +-// Each indirection from the JVM_CONSTANT_Fieldref entry is shown via +-// "=>", e.g., the class_index is used to indirectly reference a +-// JVM_CONSTANT_Class entry where the name_index is used to indirectly +-// reference a JVM_CONSTANT_Utf8 entry which contains the interesting +-// <data-1>. In order to understand a JVM_CONSTANT_Fieldref entry, we +-// have to do a total of 5 indirections just to get to the CP entries +-// that contain the interesting pieces of data and then we have to +-// fetch the three pieces of data. This means we have to do a total of +-// (5 + 3) * 2 == 16 dereferences to compare two JVM_CONSTANT_Fieldref +-// entries. +-// +-// Here is the indirection, data and dereference count for each entry +-// type: +-// +-// JVM_CONSTANT_Class 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Double 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Fieldref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Float 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Integer 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_InterfaceMethodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Long 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Methodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_NameAndType 1 indir, 2 data, 4 derefs +-// JVM_CONSTANT_String 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Utf8 0 indir, 1 data, 1 deref +-// +-// So different subtypes of CP entries require different amounts of +-// work for a proper comparison. +-// +-// Now that we've talked about the different entry types and how to +-// compare them we need to get back to merging. This is not a merge in +-// the "sort -u" sense or even in the "sort" sense. When we merge two +-// constant pools, we copy all the entries from old_cp to merge_cp, +-// preserving entry order. Next we append all the unique entries from +-// scratch_cp to merge_cp and we track the index changes from the +-// location in scratch_cp to the possibly new location in merge_cp. +-// When we are done, any obsolete code that is still running that +-// uses old_cp should not be able to observe any difference if it +-// were to use merge_cp. As for the new code in scratch_class, it is +-// modified to use the appropriate index values in merge_cp before it +-// is used to replace the code in the_class. +-// +-// There is one small complication in copying the entries from old_cp +-// to merge_cp. Two of the CP entry types are special in that they are +-// lazily resolved. Before explaining the copying complication, we need +-// to digress into CP entry resolution. +-// +-// JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in +-// the class file, but are not stored in memory as such until they are +-// resolved. The entries are not resolved unless they are used because +-// resolution is expensive. During class file parsing the entries are +-// initially stored in memory as JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries. These special CP entry types +-// indicate that the JVM_CONSTANT_Class and JVM_CONSTANT_String entries +-// have been parsed, but the index values in the entries have not been +-// validated. After the entire constant pool has been parsed, the index +-// values can be validated and then the entries are converted into +-// JVM_CONSTANT_UnresolvedClass and JVM_CONSTANT_UnresolvedString +-// entries. During this conversion process, the UTF8 values that are +-// indirectly referenced by the JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries are changed into Symbol*s and the +-// entries are modified to refer to the Symbol*s. This optimization +-// eliminates one level of indirection for those two CP entry types and +-// gets the entries ready for verification. During class file parsing +-// it is also possible for JVM_CONSTANT_UnresolvedString entries to be +-// resolved into JVM_CONSTANT_String entries. Verification expects to +-// find JVM_CONSTANT_UnresolvedClass and either JVM_CONSTANT_String or +-// JVM_CONSTANT_UnresolvedString entries and not JVM_CONSTANT_Class +-// entries. +-// +-// Now we can get back to the copying complication. When we copy +-// entries from old_cp to merge_cp, we have to revert any +-// JVM_CONSTANT_Class entries to JVM_CONSTANT_UnresolvedClass entries +-// or verification will fail. +-// +-// It is important to explicitly state that the merging algorithm +-// effectively unresolves JVM_CONSTANT_Class entries that were in the +-// old_cp when they are changed into JVM_CONSTANT_UnresolvedClass +-// entries in the merge_cp. This is done both to make verification +-// happy and to avoid adding more brittleness between RedefineClasses +-// and the constant pool cache. By allowing the constant pool cache +-// implementation to (re)resolve JVM_CONSTANT_UnresolvedClass entries +-// into JVM_CONSTANT_Class entries, we avoid having to embed knowledge +-// about those algorithms in RedefineClasses. +-// +-// Appending unique entries from scratch_cp to merge_cp is straight +-// forward for direct CP entries and most indirect CP entries. For the +-// indirect CP entry type JVM_CONSTANT_NameAndType and for the double- +-// indirect CP entry types, the presence of more than one piece of +-// interesting data makes appending the entries more complicated. +-// +-// For the JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} entry types, +-// the entry is simply copied from scratch_cp to the end of merge_cp. +-// If the index in scratch_cp is different than the destination index +-// in merge_cp, then the change in index value is tracked. +-// +-// Note: the above discussion for the direct CP entries also applies +-// to the JVM_CONSTANT_Unresolved{Class,String} entry types. +-// +-// For the JVM_CONSTANT_{Class,String} entry types, since there is only +-// one data element at the end of the recursion, we know that we have +-// either one or two unique entries. If the JVM_CONSTANT_Utf8 entry is +-// unique then it is appended to merge_cp before the current entry. +-// If the JVM_CONSTANT_Utf8 entry is not unique, then the current entry +-// is updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// Note: the above discussion for JVM_CONSTANT_{Class,String} entry +-// types is theoretical. Since those entry types have already been +-// optimized into JVM_CONSTANT_Unresolved{Class,String} entry types, +-// they are handled as direct CP entries. +-// +-// For the JVM_CONSTANT_NameAndType entry type, since there are two +-// data elements at the end of the recursions, we know that we have +-// between one and three unique entries. Any unique JVM_CONSTANT_Utf8 +-// entries are appended to merge_cp before the current entry. For any +-// JVM_CONSTANT_Utf8 entries that are not unique, the current entry is +-// updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// For the JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} entry +-// types, since there are two indirect CP entries and three data +-// elements at the end of the recursions, we know that we have between +-// one and six unique entries. See the JVM_CONSTANT_Fieldref diagram +-// above for an example of all six entries. The uniqueness algorithm +-// for the JVM_CONSTANT_Class and JVM_CONSTANT_NameAndType entries is +-// covered above. Any unique entries are appended to merge_cp before +-// the current entry. For any entries that are not unique, the current +-// entry is updated to refer to the duplicate entry in merge_cp before +-// it is appended to merge_cp. Again, any changes in index values are +-// tracked as needed. +-// +-// +-// Other Details: +-// +-// Details for other parts of RedefineClasses need to be written. +-// This is a placeholder section. +-// +-// +-// Open Issues (in no particular order): +-// +-// - How do we serialize the RedefineClasses() API without deadlocking? +-// +-// - SystemDictionary::parse_stream() was called with a NULL protection +-// domain since the initial version. This has been changed to pass +-// the_class->protection_domain(). This change has been tested with +-// all NSK tests and nothing broke, but what will adding it now break +-// in ways that we don't test? +-// +-// - GenerateOopMap::rewrite_load_or_store() has a comment in its +-// (indirect) use of the Relocator class that the max instruction +-// size is 4 bytes. goto_w and jsr_w are 5 bytes and wide/iinc is +-// 6 bytes. Perhaps Relocator only needs a 4 byte buffer to do +-// what it does to the bytecodes. More investigation is needed. +-// +-// - java.lang.Object methods can be called on arrays. This is +-// implemented via the arrayKlassOop vtable which we don't +-// update. For example, if we redefine java.lang.Object.toString(), +-// then the new version of the method will not be called for array +-// objects. +-// +-// - How do we know if redefine_single_class() and the guts of +-// instanceKlass are out of sync? I don't think this can be +-// automated, but we should probably order the work in +-// redefine_single_class() to match the order of field +-// definitions in instanceKlass. We also need to add some +-// comments about keeping things in sync. +-// +-// - set_new_constant_pool() is huge and we should consider refactoring +-// it into smaller chunks of work. +-// +-// - The exception table update code in set_new_constant_pool() defines +-// const values that are also defined in a local context elsewhere. +-// The same literal values are also used in elsewhere. We need to +-// coordinate a cleanup of these constants with Runtime. +-// +- +-class VM_RedefineClasses: public VM_Operation { ++#include "gc_implementation/shared/vmGCOperations.hpp" ++ ++// New version that allows arbitrary changes to already loaded classes. ++class VM_RedefineClasses: public VM_GC_Operation { + private: ++ + // These static fields are needed by SystemDictionary::classes_do() + // facility and the adjust_cpool_cache_and_vtable() helper: + static objArrayOop _old_methods; + static objArrayOop _new_methods; +- static methodOop* _matching_old_methods; +- static methodOop* _matching_new_methods; +- static methodOop* _deleted_methods; +- static methodOop* _added_methods; ++ static int* _matching_old_methods; ++ static int* _matching_new_methods; ++ static int* _deleted_methods; ++ static int* _added_methods; + static int _matching_methods_length; + static int _deleted_methods_length; + static int _added_methods_length; +- static klassOop _the_class_oop; ++ ++ static int _revision_number; ++ ++ static GrowableArray<instanceKlassHandle>* _affected_klasses; + + // The instance fields are used to pass information from + // doit_prologue() to doit() and doit_epilogue(). +@@ -366,40 +63,28 @@ class VM_RedefineClasses: public VM_Operation { + // RetransformClasses. Indicate which. + JvmtiClassLoadKind _class_load_kind; + +- // _index_map_count is just an optimization for knowing if +- // _index_map_p contains any entries. +- int _index_map_count; +- intArray * _index_map_p; +- // ptr to _class_count scratch_classes +- instanceKlassHandle * _scratch_classes; +- jvmtiError _res; ++ GrowableArray<instanceKlassHandle>* _new_classes; ++ jvmtiError _result; ++ int _max_redefinition_flags; + + // Performance measurement support. These timers do not cover all + // the work done for JVM/TI RedefineClasses() but they do cover + // the heavy lifting. +- elapsedTimer _timer_rsc_phase1; +- elapsedTimer _timer_rsc_phase2; +- elapsedTimer _timer_vm_op_prologue; +- +- // These routines are roughly in call order unless otherwise noted. +- +- // Load the caller's new class definition(s) into _scratch_classes. +- // Constant pool merging work is done here as needed. Also calls +- // compare_and_normalize_class_versions() to verify the class +- // definition(s). ++ elapsedTimer _timer_total; ++ elapsedTimer _timer_prologue; ++ elapsedTimer _timer_class_linking; ++ elapsedTimer _timer_class_loading; ++ elapsedTimer _timer_prepare_redefinition; ++ elapsedTimer _timer_wait_for_locks; ++ elapsedTimer _timer_heap_iteration; ++ elapsedTimer _timer_redefinition; ++ elapsedTimer _timer_vm_op_epilogue; ++ ++ jvmtiError check_redefinition_allowed(instanceKlassHandle new_class); ++ jvmtiError find_sorted_affected_classes( ); ++ jvmtiError find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed); + jvmtiError load_new_class_versions(TRAPS); + +- // Verify that the caller provided class definition(s) that meet +- // the restrictions of RedefineClasses. Normalize the order of +- // overloaded methods as needed. +- jvmtiError compare_and_normalize_class_versions( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class); +- +- // Swap annotations[i] with annotations[j] +- // Used by compare_and_normalize_class_versions() when normalizing +- // overloaded methods or changing idnum as when adding or deleting methods. +- void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); +- + // Figure out which new methods match old methods in name and signature, + // which methods have been added, and which are no longer present + void compute_added_deleted_matching_methods(); +@@ -407,95 +92,71 @@ class VM_RedefineClasses: public VM_Operation { + // Change jmethodIDs to point to the new methods + void update_jmethod_ids(); + +- // In addition to marking methods as obsolete, this routine +- // records which methods are EMCP (Equivalent Module Constant +- // Pool) in the emcp_methods BitMap and returns the number of +- // EMCP methods via emcp_method_count_p. This information is +- // used when information about the previous version of the_class +- // is squirreled away. +- void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, +- int * emcp_method_count_p); +- void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); ++ ++ static void add_affected_klasses( klassOop obj ); + +- // Unevolving classes may point to methods of the_class directly +- // from their constant pool caches, itables, and/or vtables. We +- // use the SystemDictionary::classes_do() facility and this helper +- // to fix up these pointers. +- static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS); ++ static jvmtiError do_topological_class_sorting(const jvmtiClassDefinition *class_definitions, int class_count, TRAPS); + + // Install the redefinition of a class +- void redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS); ++ void redefine_single_class(instanceKlassHandle the_new_class, TRAPS); + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. + void increment_class_counter(instanceKlass *ik, TRAPS); + +- // Support for constant pool merging (these routines are in alpha +- // order): +- void append_entry(constantPoolHandle scratch_cp, int scratch_i, +- constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); +- int find_new_index(int old_index); +- bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- bool is_unresolved_string_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); +- bool merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS); +- jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class, +- instanceKlassHandle scratch_class, TRAPS); +- u2 rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS); +- bool rewrite_cp_refs(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_element_value( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_method(methodHandle method, +- methodHandle * new_method_p, TRAPS); +- bool rewrite_cp_refs_in_methods(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); +- void rewrite_cp_refs_in_verification_type_info( +- address& stackmap_addr_ref, address stackmap_end, u2 frame_i, +- u1 frame_size, TRAPS); +- void set_new_constant_pool(instanceKlassHandle scratch_class, +- constantPoolHandle scratch_cp, int scratch_cp_length, bool shrink, TRAPS); + + void flush_dependent_code(instanceKlassHandle k_h, TRAPS); + +- static void check_class(klassOop k_oop, oop initiating_loader, TRAPS); +- static void dump_methods(); ++ static void check_class(klassOop k_oop,/* oop initiating_loader,*/ TRAPS) PRODUCT_RETURN; ++ ++ static void adjust_cpool_cache(klassOop k_oop, oop initiating_loader, TRAPS); ++ ++ static void unpatch_bytecode(methodOop method); ++ ++#ifdef ASSERT ++ static void verify_classes(klassOop k_oop, oop initiating_loader, TRAPS); ++#endif ++ ++ int calculate_redefinition_flags(instanceKlassHandle new_version); ++ void calculate_instance_update_information(klassOop new_version); ++ void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p); ++ static void mark_as_scavengable(nmethod* nm); ++ ++ bool check_arguments(); ++ jvmtiError check_arguments_error(); + + public: +- VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind); +- VMOp_Type type() const { return VMOp_RedefineClasses; } ++ VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind); ++ virtual ~VM_RedefineClasses(); ++ + bool doit_prologue(); + void doit(); + void doit_epilogue(); ++ void rollback(); + +- bool allow_nested_vm_operations() const { return true; } +- jvmtiError check_error() { return _res; } ++ jvmtiError check_exception() const; ++ VMOp_Type type() const { return VMOp_RedefineClasses; } ++ bool skip_operation() const { return false; } ++ bool allow_nested_vm_operations() const { return true; } ++ jvmtiError check_error() { return _result; } + + // Modifiable test must be shared between IsModifiableClass query + // and redefine implementation + static bool is_modifiable_class(oop klass_mirror); ++ ++ // Utility methods for transfering field access flags ++ ++ static void transfer_special_access_flags(JavaFieldStream *from, JavaFieldStream *to); ++ static void transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to); ++ ++ void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ ++ void lock_threads(); ++ void unlock_threads(); ++ ++ static void swap_marks(oop first, oop second); ++ + }; + + #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP +diff --git a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp +index 878d300..9dbe748 100644 +--- a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -22,114 +22,26 @@ + * + */ + +-#ifndef SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP +-#define SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP ++#define IF_TRACE_RC1 if (TraceRedefineClasses >= 1) ++#define IF_TRACE_RC2 if (TraceRedefineClasses >= 2) ++#define IF_TRACE_RC3 if (TraceRedefineClasses >= 3) ++#define IF_TRACE_RC4 if (TraceRedefineClasses >= 4) ++#define IF_TRACE_RC5 if (TraceRedefineClasses >= 5) + +-// RedefineClasses tracing support via the TraceRedefineClasses +-// option. A bit is assigned to each group of trace messages. +-// Groups of messages are individually selectable. We have to use +-// decimal values on the command line since the command option +-// parsing logic doesn't like non-decimal numerics. The HEX values +-// are used in the actual RC_TRACE() calls for sanity. To achieve +-// the old cumulative behavior, pick the level after the one in +-// which you are interested and subtract one, e.g., 33554431 will +-// print every tracing message. +-// +-// 0x00000000 | 0 - default; no tracing messages +-// 0x00000001 | 1 - name each target class before loading, after +-// loading and after redefinition is completed +-// 0x00000002 | 2 - print info if parsing, linking or +-// verification throws an exception +-// 0x00000004 | 4 - print timer info for the VM operation +-// 0x00000008 | 8 - print subclass counter updates +-// 0x00000010 | 16 - unused +-// 0x00000020 | 32 - unused +-// 0x00000040 | 64 - unused +-// 0x00000080 | 128 - unused +-// 0x00000100 | 256 - previous class weak reference addition +-// 0x00000200 | 512 - previous class weak reference mgmt during +-// class unloading checks (GC) +-// 0x00000400 | 1024 - previous class weak reference mgmt during +-// add previous ops (GC) +-// 0x00000800 | 2048 - previous class breakpoint mgmt +-// 0x00001000 | 4096 - detect calls to obsolete methods +-// 0x00002000 | 8192 - fail a guarantee() in addition to detection +-// 0x00004000 | 16384 - detect old/obsolete methods in metadata +-// 0x00008000 | 32768 - old/new method matching/add/delete +-// 0x00010000 | 65536 - impl details: CP size info +-// 0x00020000 | 131072 - impl details: CP merge pass info +-// 0x00040000 | 262144 - impl details: CP index maps +-// 0x00080000 | 524288 - impl details: modified CP index values +-// 0x00100000 | 1048576 - impl details: vtable updates +-// 0x00200000 | 2097152 - impl details: itable updates +-// 0x00400000 | 4194304 - impl details: constant pool cache updates +-// 0x00800000 | 8388608 - impl details: methodComparator info +-// 0x01000000 | 16777216 - impl details: nmethod evolution info +-// 0x02000000 | 33554432 - impl details: annotation updates +-// 0x04000000 | 67108864 - impl details: StackMapTable updates +-// 0x08000000 | 134217728 - impl details: OopMapCache updates +-// 0x10000000 | 268435456 - unused +-// 0x20000000 | 536870912 - unused +-// 0x40000000 | 1073741824 - unused +-// 0x80000000 | 2147483648 - unused +-// +-// Note: The ResourceMark is to cleanup resource allocated args. +-// The "while (0)" is so we can use semi-colon at end of RC_TRACE(). +-#define RC_TRACE(level, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print_cr args; \ +- } while (0) +- +-#define RC_TRACE_NO_CR(level, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print args; \ +- } while (0) +- +-#define RC_TRACE_WITH_THREAD(level, thread, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm(thread); \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print_cr args; \ +- } while (0) +- +-#define RC_TRACE_MESG(args) \ +- { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses: "); \ +- tty->print_cr args; \ +- } while (0) +- +-// Macro for checking if TraceRedefineClasses has a specific bit +-// enabled. Returns true if the bit specified by level is set. +-#define RC_TRACE_ENABLED(level) ((TraceRedefineClasses & level) != 0) +- +-// Macro for checking if TraceRedefineClasses has one or more bits +-// set in a range of bit values. Returns true if one or more bits +-// is set in the range from low..high inclusive. Assumes that low +-// and high are single bit values. +-// +-// ((high << 1) - 1) +-// Yields a mask that removes bits greater than the high bit value. +-// This algorithm doesn't work with highest bit. +-// ~(low - 1) +-// Yields a mask that removes bits lower than the low bit value. +-#define RC_TRACE_IN_RANGE(low, high) \ +-(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0) ++#define TRACE_RC1 if (TraceRedefineClasses >= 1) tty->print("TraceRedefineClasses-1: "); if (TraceRedefineClasses >= 1) tty->print_cr ++#define TRACE_RC2 if (TraceRedefineClasses >= 2) tty->print(" TraceRedefineClasses-2: "); if (TraceRedefineClasses >= 2) tty->print_cr ++#define TRACE_RC3 if (TraceRedefineClasses >= 3) tty->print(" TraceRedefineClasses-3: "); if (TraceRedefineClasses >= 3) tty->print_cr ++#define TRACE_RC4 if (TraceRedefineClasses >= 4) tty->print(" TraceRedefineClasses-4: "); if (TraceRedefineClasses >= 4) tty->print_cr ++#define TRACE_RC5 if (TraceRedefineClasses >= 5) tty->print(" TraceRedefineClasses-5: "); if (TraceRedefineClasses >= 5) tty->print_cr + + // Timer support macros. Only do timer operations if timer tracing + // is enabled. The "while (0)" is so we can use semi-colon at end of + // the macro. + #define RC_TIMER_START(t) \ +- if (RC_TRACE_ENABLED(0x00000004)) { \ ++ if (TimeRedefineClasses) { \ + t.start(); \ + } while (0) + #define RC_TIMER_STOP(t) \ +- if (RC_TRACE_ENABLED(0x00000004)) { \ ++ if (TimeRedefineClasses) { \ + t.stop(); \ + } while (0) +- +-#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP +diff --git a/src/share/vm/prims/methodComparator.cpp b/src/share/vm/prims/methodComparator.cpp +index 60eaf97..785dc24 100644 +--- a/src/share/vm/prims/methodComparator.cpp ++++ b/src/share/vm/prims/methodComparator.cpp +@@ -42,10 +42,9 @@ bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) + if (old_method->code_size() != new_method->code_size()) + return false; + if (check_stack_and_locals_size(old_method, new_method) != 0) { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00800000, ("Methods %s non-comparable with diagnosis %d", ++ TRACE_RC4("Methods %s non-comparable with diagnosis %d", + old_method->name()->as_C_string(), +- check_stack_and_locals_size(old_method, new_method))); ++ check_stack_and_locals_size(old_method, new_method)); + return false; + } + +@@ -114,10 +113,9 @@ bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_me + // Now we can test all forward jumps + for (int i = 0; i < fwd_jmps.length() / 2; i++) { + if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) { +- RC_TRACE(0x00800000, +- ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d", ++ TRACE_RC4("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d", + fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)), +- fwd_jmps.at(i*2+1))); ++ fwd_jmps.at(i*2+1)); + return false; + } + } +diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp +index 22d450b..bac1ae6 100644 +--- a/src/share/vm/runtime/arguments.cpp ++++ b/src/share/vm/runtime/arguments.cpp +@@ -54,8 +54,8 @@ + #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" + #endif + +-// Note: This is a special bug reporting site for the JVM +-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" ++// (tw) The DCE VM has its own JIRA bug tracking system. ++#define DEFAULT_VENDOR_URL_BUG "http://ssw.jku.at/dcevm/bugreport/" + #define DEFAULT_JAVA_LAUNCHER "generic" + + char** Arguments::_jvm_flags_array = NULL; +@@ -1792,6 +1792,16 @@ bool Arguments::check_gc_consistency() { + status = false; + } + ++ // (tw) Must use serial GC. This limitation applies because the instance size changing GC modifications ++ // are only built into the mark and compact algorithm. ++ if (!UseSerialGC && i >= 1) { ++ //jio_fprintf(defaultStream::error_stream(), ++ // "Must use the serial GC in the Dynamic Code Evolution VM\n"); ++ //status = false; ++ } else { ++ UseSerialGC = true; ++ } ++ + return status; + } + +@@ -3208,7 +3218,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) { + + // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled. + set_aggressive_opts_flags(); +- ++#ifndef COMPILER2 + // Turn off biased locking for locking debug mode flags, + // which are subtlely different from each other but neither works with + // biased locking. +@@ -3225,6 +3235,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) { + } + UseBiasedLocking = false; + } ++#endif + + #ifdef CC_INTERP + // Clear flags not supported by the C++ interpreter +diff --git a/src/share/vm/runtime/fieldDescriptor.cpp b/src/share/vm/runtime/fieldDescriptor.cpp +index 3d5213f..9cc701b 100644 +--- a/src/share/vm/runtime/fieldDescriptor.cpp ++++ b/src/share/vm/runtime/fieldDescriptor.cpp +@@ -92,7 +92,8 @@ void fieldDescriptor::initialize(klassOop k, int index) { + instanceKlass* ik = instanceKlass::cast(k); + _cp = ik->constants(); + FieldInfo* f = ik->field(index); +- assert(!f->is_internal(), "regular Java fields only"); ++ // (tw) do we need this? ++// assert(!f->is_internal(), "regular Java fields only"); + + _access_flags = accessFlags_from(f->access_flags()); + guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor"); +diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp +index 8df7220..341b399 100644 +--- a/src/share/vm/runtime/globals.hpp ++++ b/src/share/vm/runtime/globals.hpp +@@ -1230,6 +1230,11 @@ class CommandLineFlags { + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, TimeRedefineClasses, false, \ ++ "Measure timing for JVMTI RedefineClasses") \ ++ \ ++ product(bool, AllowAdvancedClassRedefinition, true, \ ++ "Allow advanced class redefinition beyond swapping method bodies")\ + develop(bool, StressMethodComparator, false, \ + "run the MethodComparator on all loaded methods") \ + \ +diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp +index 2875ee0..61fd8fe 100644 +--- a/src/share/vm/runtime/interfaceSupport.hpp ++++ b/src/share/vm/runtime/interfaceSupport.hpp +@@ -296,7 +296,7 @@ class ThreadToNativeFromVM : public ThreadStateTransition { + ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { + // We are leaving the VM at this point and going directly to native code. + // Block, if we are in the middle of a safepoint synchronization. +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + thread->frame_anchor()->make_walkable(thread); + trans_and_fence(_thread_in_vm, _thread_in_native); + // Check for pending. async. exceptions or suspends. +diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp +index edbba98..4a27925 100644 +--- a/src/share/vm/runtime/javaCalls.cpp ++++ b/src/share/vm/runtime/javaCalls.cpp +@@ -60,7 +60,7 @@ JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, Ja + bool clear_pending_exception = true; + + guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code"); +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); + _result = result; + +diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp +index 3cbcaca..30839d7 100644 +--- a/src/share/vm/runtime/jniHandles.cpp ++++ b/src/share/vm/runtime/jniHandles.cpp +@@ -112,6 +112,10 @@ jobject JNIHandles::make_weak_global(Handle obj) { + } + + jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { ++ if (mh->newest_version() != mh()) { ++ methodHandle mh_new(Thread::current(), mh()->newest_version()); ++ return (jmethodID) make_weak_global(mh_new); ++ } + return (jmethodID) make_weak_global(mh); + } + +diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp +index 2095237..c541434 100644 +--- a/src/share/vm/runtime/mutex.cpp ++++ b/src/share/vm/runtime/mutex.cpp +@@ -1227,7 +1227,7 @@ Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1247,7 +1247,7 @@ Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1310,6 +1310,7 @@ void Monitor::set_owner_implementation(Thread *new_owner) { + // already hold Terminator_lock - may happen because of periodic safepoints + if (this->rank() != Mutex::native && + this->rank() != Mutex::suspend_resume && ++ this->rank() != Mutex::redefine_classes && + locks != NULL && locks->rank() <= this->rank() && + !SafepointSynchronize::is_at_safepoint() && + this != Interrupt_lock && +diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp +index 7d2cd82..11eb32e 100644 +--- a/src/share/vm/runtime/mutex.hpp ++++ b/src/share/vm/runtime/mutex.hpp +@@ -109,7 +109,8 @@ class Monitor : public CHeapObj<mtInternal> { + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, +- native = max_nonleaf + 1 ++ native = max_nonleaf + 1, ++ redefine_classes = native + 1 + }; + + // The WaitSet and EntryList linked lists are composed of ParkEvents. +diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp +index a6b2106..3a3110b 100644 +--- a/src/share/vm/runtime/mutexLocker.cpp ++++ b/src/share/vm/runtime/mutexLocker.cpp +@@ -49,6 +49,7 @@ + // Consider using GCC's __read_mostly. + + Mutex* Patching_lock = NULL; ++Mutex* RedefineClasses_lock = NULL; + Monitor* SystemDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -278,6 +279,7 @@ void mutex_init() { + def(MethodCompileQueue_lock , Monitor, nonleaf+4, true ); + def(Debug2_lock , Mutex , nonleaf+4, true ); + def(Debug3_lock , Mutex , nonleaf+4, true ); ++ def(RedefineClasses_lock , Mutex , nonleaf+7, false ); // for ensuring that class redefinition is not done in parallel + def(CompileThread_lock , Monitor, nonleaf+5, false ); + + def(JfrMsg_lock , Monitor, leaf, true); +diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp +index 40008bb..60c7cce 100644 +--- a/src/share/vm/runtime/mutexLocker.hpp ++++ b/src/share/vm/runtime/mutexLocker.hpp +@@ -43,6 +43,7 @@ + // Mutexes used in the VM. + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code ++extern Mutex* RedefineClasses_lock; // a lock on class redefinition + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp +index cd009ed..a53ad09 100644 +--- a/src/share/vm/runtime/reflection.cpp ++++ b/src/share/vm/runtime/reflection.cpp +@@ -468,7 +468,8 @@ bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +@@ -519,6 +520,12 @@ bool Reflection::verify_field_access(klassOop current_class, + AccessFlags access, + bool classloader_only, + bool protected_restriction) { ++ ++ // (tw) Decide accessibility based on active version ++ if (current_class != NULL) { ++ current_class = current_class->klass_part()->active_version(); ++ } ++ + // Verify that current_class can access a field of field_class, where that + // field's access bits are "access". We assume that we've already verified + // that current_class can access field_class. +@@ -560,7 +567,8 @@ bool Reflection::verify_field_access(klassOop current_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp +index 709d783..689b9a2 100644 +--- a/src/share/vm/runtime/sharedRuntime.cpp ++++ b/src/share/vm/runtime/sharedRuntime.cpp +@@ -603,21 +603,13 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Symbol* n + // + JRT_LEAF(int, SharedRuntime::rc_trace_method_entry( + JavaThread* thread, methodOopDesc* method)) +- assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call"); ++ assert(TraceRedefineClasses >= 4, "wrong call"); + + if (method->is_obsolete()) { + // We are calling an obsolete method, but this is not necessarily + // an error. Our method could have been redefined just after we + // fetched the methodOop from the constant pool. +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00001000, thread, +- ("calling obsolete method '%s'", +- method->name_and_sig_as_C_string())); +- if (RC_TRACE_ENABLED(0x00002000)) { +- // this option is provided to debug calls to obsolete methods +- guarantee(false, "faulting at call to an obsolete method."); +- } ++ TRACE_RC4("calling obsolete method '%s'", method->name_and_sig_as_C_string()); + } + return 0; + JRT_END +@@ -1137,7 +1129,20 @@ methodHandle SharedRuntime::resolve_helper(JavaThread *thread, + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && +- callee_method->method_holder() != SystemDictionary::Object_klass()) { ++ callee_method->method_holder()->klass_part()->super() != NULL) { ++ ++ // (tw) If we are executing an old method, this is OK! ++ { ++ ResourceMark rm(thread); ++ RegisterMap cbl_map(thread, false); ++ frame caller_frame = thread->last_frame().sender(&cbl_map); ++ ++ CodeBlob* caller_cb = caller_frame.cb(); ++ guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); ++ nmethod* caller_nm = caller_cb->as_nmethod_or_null(); ++ if (caller_nm->method()->is_old()) break; ++ } ++ + // If has a pending exception then there is no need to re-try to + // resolve this method. + // If the method has been redefined, we need to try again. +diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp +index ae28b65..bb0681e 100644 +--- a/src/share/vm/runtime/thread.cpp ++++ b/src/share/vm/runtime/thread.cpp +@@ -216,6 +216,8 @@ Thread::Thread() { + set_self_raw_id(0); + set_lgrp_id(-1); + ++ _redefine_classes_mutex = new Mutex(Mutex::redefine_classes, "redefine classes lock", false); ++ + // allocated data structures + set_osthread(NULL); + set_resource_area(new (mtThread)ResourceArea()); +@@ -249,6 +251,7 @@ Thread::Thread() { + omFreeProvision = 32 ; + omInUseList = NULL ; + omInUseCount = 0 ; ++ _pretend_new_universe = false; + + #ifdef ASSERT + _visited_for_critical_count = false; +@@ -884,6 +887,15 @@ bool Thread::owns_locks_but_compiled_lock() const { + return false; + } + ++bool Thread::owns_locks_but_redefine_classes_lock() const { ++ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) { ++ if (cur != RedefineClasses_lock && cur->rank() != Mutex::redefine_classes) { ++ return true; ++ } ++ } ++ return false; ++} ++ + + #endif + +@@ -1637,7 +1649,7 @@ void JavaThread::run() { + ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm); + + assert(JavaThread::current() == this, "sanity check"); +- assert(!Thread::current()->owns_locks(), "sanity check"); ++ assert(!Thread::current()->owns_locks_but_redefine_classes_lock(), "sanity check"); + + DTRACE_THREAD_PROBE(start, this); + +@@ -3193,7 +3205,7 @@ static void compiler_thread_entry(JavaThread* thread, TRAPS) { + + // Create a CompilerThread + CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) +-: JavaThread(&compiler_thread_entry) { ++: JavaThread(&compiler_thread_entry), _should_bailout(false) { + _env = NULL; + _log = NULL; + _task = NULL; +@@ -3201,6 +3213,7 @@ CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) + _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; ++ _compilation_mutex = new Mutex(Mutex::redefine_classes, "compilationMutex", false); + + #ifndef PRODUCT + _ideal_graph_printer = NULL; +diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp +index 774bd27..4620820 100644 +--- a/src/share/vm/runtime/thread.hpp ++++ b/src/share/vm/runtime/thread.hpp +@@ -202,12 +202,15 @@ class Thread: public ThreadShadow { + public: + void enter_signal_handler() { _num_nested_signal++; } + void leave_signal_handler() { _num_nested_signal--; } +- bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ Mutex* redefine_classes_mutex() { return _redefine_classes_mutex; } + + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; + ++ Mutex* _redefine_classes_mutex; ++ + // Active_handles points to a block of handles + JNIHandleBlock* _active_handles; + +@@ -530,10 +533,15 @@ public: + uintptr_t _self_raw_id; // used by get_thread (mutable) + int _lgrp_id; + ++ ++ bool _pretend_new_universe; ++ + public: + // Stack overflow support + address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + ++ void set_pretend_new_universe(bool b) { if (_pretend_new_universe != b) { if (TraceRedefineClasses >= 5) tty->print_cr("Changing pretend universe to %d", (int)b); _pretend_new_universe = b; } } ++ bool pretend_new_universe() { return _pretend_new_universe; } + void set_stack_base(address base) { _stack_base = base; } + size_t stack_size() const { return _stack_size; } + void set_stack_size(size_t size) { _stack_size = size; } +@@ -570,6 +578,7 @@ public: + void print_owned_locks() const { print_owned_locks_on(tty); } + Monitor* owned_locks() const { return _owned_locks; } + bool owns_locks() const { return owned_locks() != NULL; } ++ bool owns_locks_but_redefine_classes_lock() const; + bool owns_locks_but_compiled_lock() const; + + // Deadlock detection +@@ -1793,6 +1802,8 @@ class CompilerThread : public JavaThread { + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; ++ bool _should_bailout; ++ Mutex* _compilation_mutex; + + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + +@@ -1802,12 +1813,16 @@ class CompilerThread : public JavaThread { + + CompilerThread(CompileQueue* queue, CompilerCounters* counters); + ++ bool should_bailout() const { return _should_bailout; } ++ void set_should_bailout(bool b) { _should_bailout = false; } ++ + bool is_Compiler_thread() const { return true; } + // Hide this compiler thread from external view. + bool is_hidden_from_external_view() const { return true; } + + CompileQueue* queue() { return _queue; } + CompilerCounters* counters() { return _counters; } ++ Mutex *compilation_mutex() { return _compilation_mutex; } + + // Get/set the thread's compilation environment. + ciEnv* env() { return _env; } +diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp +index 7643670..0d3cd70 100644 +--- a/src/share/vm/runtime/vmThread.cpp ++++ b/src/share/vm/runtime/vmThread.cpp +@@ -691,6 +691,9 @@ void VMThread::execute(VM_Operation* op) { + void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + Thread::oops_do(f, cf); + _vm_queue->oops_do(f); ++ if (_cur_vm_operation != NULL) { ++ _cur_vm_operation->oops_do(f); ++ } + } + + //------------------------------------------------------------------------------------------------------------------ +diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp +index 03f254d..18e324b 100644 +--- a/src/share/vm/utilities/exceptions.cpp ++++ b/src/share/vm/utilities/exceptions.cpp +@@ -254,6 +254,8 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + assert(thread->is_Java_thread(), "can only be called by a Java thread"); + assert(!thread->has_pending_exception(), "already has exception"); + ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception; + + // Resolve exception klass +@@ -285,6 +287,7 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +diff --git a/src/share/vm/utilities/growableArray.hpp b/src/share/vm/utilities/growableArray.hpp +index 2a6d6b8..4b6927f 100644 +--- a/src/share/vm/utilities/growableArray.hpp ++++ b/src/share/vm/utilities/growableArray.hpp +@@ -145,6 +145,33 @@ class GenericGrowableArray : public ResourceObj { + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } ++ ++}; ++ ++template<class E, class F> class Pair : public StackObj ++{ ++private: ++ E _left; ++ F _right; ++ ++public: ++ ++ Pair() { ++ ++ } ++ ++ Pair(E left, F right) { ++ this->_left = left; ++ this->_right = right; ++ } ++ ++ E left() { ++ return _left; ++ } ++ ++ F right() { ++ return _right; ++ } + }; + + template<class E> class GrowableArray : public GenericGrowableArray { diff --git a/hotspot/.hg/patches/light-jdk7u51-b13.patch b/hotspot/.hg/patches/light-jdk7u51-b13.patch new file mode 100644 index 00000000..96049ec2 --- /dev/null +++ b/hotspot/.hg/patches/light-jdk7u51-b13.patch @@ -0,0 +1,10126 @@ +diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp +index b0ebcfd..6366d68 100644 +--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp ++++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp +@@ -1364,7 +1364,7 @@ void InterpreterMacroAssembler::notify_method_entry() { + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + get_thread(rcx); + get_method(rbx); + call_VM_leaf( +diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp +index 2790c2a..c315b18 100644 +--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp ++++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp +@@ -1427,7 +1427,7 @@ void InterpreterMacroAssembler::notify_method_entry() { + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + get_method(c_rarg1); + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +index 16958cd..09d6300 100644 +--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp ++++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +@@ -1976,7 +1976,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + __ movoop(rax, JNIHandles::make_local(method())); + __ call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), +diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +index 7dc4e62..86c8c95 100644 +--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp ++++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +@@ -2235,7 +2235,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, + } + + // RedefineClasses() tracing support for obsolete method entry +- if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { ++ IF_TRACE_RC4 { + // protect the args we've loaded + save_args(masm, total_c_args, c_arg, out_regs); + __ movoop(c_rarg1, JNIHandles::make_local(method())); +diff --git a/src/share/vm/c1/c1_Compilation.hpp b/src/share/vm/c1/c1_Compilation.hpp +index 9a8ca61..83e6f54 100644 +--- a/src/share/vm/c1/c1_Compilation.hpp ++++ b/src/share/vm/c1/c1_Compilation.hpp +@@ -242,8 +242,9 @@ class Compilation: public StackObj { + #define BAILOUT(msg) { bailout(msg); return; } + #define BAILOUT_(msg, res) { bailout(msg); return res; } + +-#define CHECK_BAILOUT() { if (bailed_out()) return; } +-#define CHECK_BAILOUT_(res) { if (bailed_out()) return res; } ++// (tw) Also checks a thread local flag that can be set to trigger compiler bailout from another thread. ++#define CHECK_BAILOUT() { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return; } ++#define CHECK_BAILOUT_(res) { if (((CompilerThread *)Thread::current())->should_bailout()) bailout("Aborted externally"); if (bailed_out()) return res; } + + + class InstructionMark: public StackObj { +diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp +index e0ab96b..db8e551 100644 +--- a/src/share/vm/ci/ciObjectFactory.cpp ++++ b/src/share/vm/ci/ciObjectFactory.cpp +@@ -764,3 +764,26 @@ void ciObjectFactory::print() { + _unloaded_instances->length(), + _unloaded_klasses->length()); + } ++ ++int ciObjectFactory::compare_ciobjects(ciObject** a, ciObject** b) { ++ oop oop1 = (*a)->get_oop(); ++ oop oop2 = (*b)->get_oop(); ++ return ((oop1 > oop2) ? 1 : ((oop1 == oop2) ? 0 : -1)); ++} ++ ++// (DCEVM) Resoring the ciObject arrays after class redefinition ++void ciObjectFactory::resort_shared_ci_objects() { ++ _shared_ci_objects->sort(ciObjectFactory::compare_ciobjects); ++ ++#ifdef ASSERT ++ if (CIObjectFactoryVerify) { ++ oop last = NULL; ++ for (int j = 0; j < _shared_ci_objects->length(); j++) { ++ oop o = _shared_ci_objects->at(j)->get_oop(); ++ assert(last < o, "out of order"); ++ last = o; ++ } ++ } ++#endif // ASSERT ++} ++ +diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp +index 26cc2c3..d99d3d6 100644 +--- a/src/share/vm/ci/ciObjectFactory.hpp ++++ b/src/share/vm/ci/ciObjectFactory.hpp +@@ -88,6 +88,7 @@ private: + + ciInstance* get_unloaded_instance(ciInstanceKlass* klass); + ++ static int compare_ciobjects(ciObject** a, ciObject** b); + public: + static bool is_initialized() { return _initialized; } + +@@ -137,6 +138,8 @@ public: + + void print_contents(); + void print(); ++ ++ static void resort_shared_ci_objects(); + }; + + #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP +diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp +index 5ea1d43..8590ad1 100644 +--- a/src/share/vm/classfile/classFileParser.cpp ++++ b/src/share/vm/classfile/classFileParser.cpp +@@ -795,6 +795,7 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS) { + ClassFileStream* cfs = stream(); + assert(length > 0, "only called for length>0"); +@@ -813,6 +814,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + interface_index, CHECK_(nullHandle)); + if (cp->tag_at(interface_index).is_klass()) { + interf = KlassHandle(THREAD, cp->resolved_klass_at(interface_index)); ++ if (!old_klass.is_null() && !interf->is_newest_version()) { ++ interf = KlassHandle(THREAD, interf->newest_version()); ++ } + } else { + Symbol* unresolved_klass = cp->klass_name_at(interface_index); + +@@ -825,6 +829,9 @@ objArrayHandle ClassFileParser::parse_interfaces(constantPoolHandle cp, + klassOop k = SystemDictionary::resolve_super_or_fail(class_name, + unresolved_klass, class_loader, protection_domain, + false, CHECK_(nullHandle)); ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + interf = KlassHandle(THREAD, k); + } + +@@ -2921,8 +2928,10 @@ typeArrayHandle ClassFileParser::assemble_annotations(u1* runtime_visible_annota + instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, ++ GrowableArray<Symbol*>* parsed_super_symbols, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { +@@ -2948,7 +2957,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + + init_parsed_class_attributes(); + +- if (JvmtiExport::should_post_class_file_load_hook()) { ++ if (parsed_super_symbols == NULL && JvmtiExport::should_post_class_file_load_hook()) { + // Get the cached class file bytes (if any) from the class that + // is being redefined or retransformed. We use jvmti_thread_state() + // instead of JvmtiThreadState::state_for(jt) so we don't allocate +@@ -2971,10 +2980,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + unsigned char* ptr = cfs->buffer(); + unsigned char* end_ptr = cfs->buffer() + cfs->length(); + ++ bool pretend_new_universe = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + JvmtiExport::post_class_file_load_hook(name, class_loader, protection_domain, + &ptr, &end_ptr, + &cached_class_file_bytes, + &cached_class_file_length); ++ Thread::current()->set_pretend_new_universe(pretend_new_universe); + + if (ptr != cfs->buffer()) { + // JVMTI agent has modified class file data. +@@ -3090,6 +3102,30 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + CHECK_(nullHandle)); + } + ++ // (tw) Do not parse full class file, only get super symbols and return. ++ if (parsed_super_symbols != NULL) { ++ u2 super_class_index = cfs->get_u2_fast(); ++ ++ if (super_class_index != 0) { ++ parsed_super_symbols->append(cp->klass_name_at(super_class_index)); ++ } ++ ++ // Interfaces ++ u2 itfs_len = cfs->get_u2_fast(); ++ objArrayHandle local_interfaces; ++ if (itfs_len == 0) { ++ local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); ++ } else { ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_NULL); ++ } ++ ++ for (int i=0; i<local_interfaces->length(); i++) { ++ oop o = local_interfaces->obj_at(i); ++ parsed_super_symbols->append(((klassOop)o)->klass_part()->name()); ++ } ++ return NULL; ++ } ++ + klassOop preserve_this_klass; // for storing result across HandleMark + + // release all handles when parsing is done +@@ -3130,7 +3166,11 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + // However, make sure it is not an array type. + bool is_array = false; + if (cp->tag_at(super_class_index).is_klass()) { +- super_klass = instanceKlassHandle(THREAD, cp->resolved_klass_at(super_class_index)); ++ klassOop resolved_klass = cp->resolved_klass_at(super_class_index); ++ if (!old_klass.is_null()) { ++ resolved_klass = resolved_klass->klass_part()->newest_version(); ++ } ++ super_klass = instanceKlassHandle(THREAD, resolved_klass); + if (_need_verify) + is_array = super_klass->oop_is_array(); + } else if (_need_verify) { +@@ -3148,7 +3188,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + if (itfs_len == 0) { + local_interfaces = objArrayHandle(THREAD, Universe::the_empty_system_obj_array()); + } else { +- local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, CHECK_(nullHandle)); ++ local_interfaces = parse_interfaces(cp, itfs_len, class_loader, protection_domain, _class_name, old_klass, CHECK_(nullHandle)); + } + + u2 java_fields_count = 0; +@@ -3202,7 +3242,9 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + protection_domain, + true, + CHECK_(nullHandle)); +- ++ if (!old_klass.is_null()) { ++ k = k->klass_part()->newest_version(); ++ } + KlassHandle kh (THREAD, k); + super_klass = instanceKlassHandle(THREAD, kh()); + } +@@ -3591,6 +3633,19 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + rt = REF_NONE; + } else { + rt = super_klass->reference_type(); ++ ++ // (tw) With class redefinition, it can also happen that special classes are loaded. ++ if (name == vmSymbols::java_lang_ref_Reference()) { ++ rt = REF_OTHER; ++ } else if (name == vmSymbols::java_lang_ref_SoftReference()) { ++ rt = REF_SOFT; ++ } else if (name == vmSymbols::java_lang_ref_WeakReference()) { ++ rt = REF_WEAK; ++ } else if (name == vmSymbols::java_lang_ref_FinalReference()) { ++ rt = REF_FINAL; ++ } else if (name == vmSymbols::java_lang_ref_PhantomReference()) { ++ rt = REF_PHANTOM; ++ } + } + + // We can now create the basic klassOop for this klass +@@ -3599,6 +3654,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + total_oop_map_count, + access_flags, + rt, host_klass, ++ old_klass, + CHECK_(nullHandle)); + instanceKlassHandle this_klass (THREAD, ik); + +@@ -3691,7 +3747,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts); + + // Fill in has_finalizer, has_vanilla_constructor, and layout_helper +- set_precomputed_flags(this_klass); ++ set_precomputed_flags(this_klass, old_klass); + + // reinitialize modifiers, using the InnerClasses attribute + int computed_modifiers = this_klass->compute_modifier_flags(CHECK_(nullHandle)); +@@ -3711,6 +3767,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name, + check_illegal_static_method(this_klass, CHECK_(nullHandle)); + } + ++ if (rt == REF_OTHER) { ++ instanceRefKlass::update_nonstatic_oop_maps(ik); ++ } ++ + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + +@@ -3856,7 +3916,7 @@ void ClassFileParser::fill_oop_maps(instanceKlassHandle k, + } + + +-void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { ++void ClassFileParser::set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass) { + klassOop super = k->super(); + + // Check if this klass has an empty finalize method (i.e. one with return bytecode only), +@@ -3864,7 +3924,9 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + if (!_has_empty_finalizer) { + if (_has_finalizer || + (super != NULL && super->klass_part()->has_finalizer())) { +- k->set_has_finalizer(); ++ if (old_klass.is_null() || old_klass->has_finalizer()) { ++ k->set_has_finalizer(); ++ } + } + } + +@@ -3880,7 +3942,7 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) { + + // Check if this klass supports the java.lang.Cloneable interface + if (SystemDictionary::Cloneable_klass_loaded()) { +- if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) { ++ if (k->is_subtype_of(SystemDictionary::Cloneable_klass()) || k->is_subtype_of(SystemDictionary::Cloneable_klass()->klass_part()->newest_version())) { + k->set_is_cloneable(); + } + } +diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp +index 314ec5e..5fca1da 100644 +--- a/src/share/vm/classfile/classFileParser.hpp ++++ b/src/share/vm/classfile/classFileParser.hpp +@@ -151,6 +151,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + Handle class_loader, + Handle protection_domain, + Symbol* class_name, ++ KlassHandle old_klass, + TRAPS); + + // Field parsing +@@ -237,7 +238,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + unsigned int nonstatic_oop_map_count, + int* nonstatic_oop_offsets, + unsigned int* nonstatic_oop_counts); +- void set_precomputed_flags(instanceKlassHandle k); ++ void set_precomputed_flags(instanceKlassHandle k, KlassHandle old_klass); + objArrayHandle compute_transitive_interfaces(instanceKlassHandle super, + objArrayHandle local_ifs, TRAPS); + +@@ -349,17 +350,20 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + TempNewSymbol& parsed_name, + bool verify, + TRAPS) { + KlassHandle no_host_klass; +- return parseClassFile(name, class_loader, protection_domain, no_host_klass, NULL, parsed_name, verify, THREAD); ++ return parseClassFile(name, class_loader, protection_domain, old_klass, no_host_klass, NULL, NULL, parsed_name, verify, THREAD); + } + instanceKlassHandle parseClassFile(Symbol* name, + Handle class_loader, + Handle protection_domain, ++ KlassHandle old_klass, + KlassHandle host_klass, + GrowableArray<Handle>* cp_patches, ++ GrowableArray<Symbol*>* parsed_super_symbols, + TempNewSymbol& parsed_name, + bool verify, + TRAPS); +diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp +index a2e61a4..450e19f 100644 +--- a/src/share/vm/classfile/classLoader.cpp ++++ b/src/share/vm/classfile/classLoader.cpp +@@ -915,6 +915,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { + instanceKlassHandle result = parser.parseClassFile(h_name, + class_loader, + protection_domain, ++ KlassHandle(), + parsed_name, + false, + CHECK_(h)); +diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp +index 78e76cc..d167c98 100644 +--- a/src/share/vm/classfile/dictionary.cpp ++++ b/src/share/vm/classfile/dictionary.cpp +@@ -144,87 +144,10 @@ bool Dictionary::do_unloading(BoolObjectClosure* is_alive) { + probe = *p; + klassOop e = probe->klass(); + oop class_loader = probe->loader(); +- + instanceKlass* ik = instanceKlass::cast(e); +- if (ik->previous_versions() != NULL) { +- // This klass has previous versions so see what we can cleanup +- // while it is safe to do so. +- +- int gc_count = 0; // leave debugging breadcrumbs +- int live_count = 0; +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000200, ("unload: %s: previous version length=%d", +- ik->external_name(), ik->previous_versions()->length())); +- +- for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) { +- // check the previous versions array for GC'ed weak refs +- PreviousVersionNode * pv_node = ik->previous_versions()->at(i); +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- ik->previous_versions()->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop pvcp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (pvcp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- ik->previous_versions()->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- gc_count++; +- continue; +- } else { +- RC_TRACE(0x00000200, ("unload: previous version @%d is alive", i)); +- if (is_alive->do_object_b(pvcp)) { +- live_count++; +- } else { +- guarantee(false, "sanity check"); +- } +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs != NULL) { +- RC_TRACE(0x00000200, ("unload: previous methods length=%d", +- method_refs->length())); +- for (int j = method_refs->length() - 1; j >= 0; j--) { +- jweak method_ref = method_refs->at(j); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this method entry has been GC'ed so remove it +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(j); +- } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000200, +- ("unload: %s(%s): prev method @%d in version @%d is alive", +- method->name()->as_C_string(), +- method->signature()->as_C_string(), j, i)); +- } +- } +- } +- } +- assert(ik->previous_versions()->length() == live_count, "sanity check"); +- RC_TRACE(0x00000200, +- ("unload: previous version stats: live=%d, GC'ed=%d", live_count, +- gc_count)); +- } +- ++ + // Non-unloadable classes were handled in always_strong_oops_do +- if (!is_strongly_reachable(class_loader, e)) { ++ if (!ik->is_redefining() && !is_strongly_reachable(class_loader, e)) { + // Entry was not visited in phase1 (negated test from phase1) + assert(class_loader != NULL, "unloading entry with null class loader"); + oop k_def_class_loader = ik->class_loader(); +@@ -326,6 +249,7 @@ void Dictionary::classes_do(void f(klassOop)) { + } + } + ++ + // Added for initialize_itable_for_klass to handle exceptions + // Just the classes from defining class loaders + void Dictionary::classes_do(void f(klassOop, TRAPS), TRAPS) { +@@ -433,6 +357,33 @@ void Dictionary::add_klass(Symbol* class_name, Handle class_loader, + add_entry(index, entry); + } + ++// (tw) Updates the klass entry to point to the new klassOop. Necessary only for class redefinition. ++bool Dictionary::update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class) { ++ ++ // There are several entries for the same class in the dictionary: One extra entry for each parent classloader of the classloader of the class. ++ bool found = false; ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass() == old_class()) { ++ entry->set_literal(k()); ++ found = true; ++ } ++ } ++ } ++ ++ return found; ++} ++ ++// (tw) Undo previous updates to the system dictionary ++void Dictionary::rollback_redefinition() { ++ for (int index = 0; index < table_size(); index++) { ++ for (DictionaryEntry* entry = bucket(index); entry != NULL; entry = entry->next()) { ++ if (entry->klass()->klass_part()->is_redefining()) { ++ entry->set_literal(entry->klass()->klass_part()->old_version()); ++ } ++ } ++ } ++} + + // This routine does not lock the system dictionary. + // +@@ -459,12 +410,21 @@ DictionaryEntry* Dictionary::get_entry(int index, unsigned int hash, + return NULL; + } + ++klassOop Dictionary::intercept_for_version(klassOop k) { ++ if (k == NULL) return k; ++ ++ if (k->klass_part()->is_redefining() && !Thread::current()->pretend_new_universe()) { ++ return k->klass_part()->old_version(); ++ } ++ ++ return k; ++} + + klassOop Dictionary::find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS) { + DictionaryEntry* entry = get_entry(index, hash, name, loader); + if (entry != NULL && entry->is_valid_protection_domain(protection_domain)) { +- return entry->klass(); ++ return intercept_for_version(entry->klass()); + } else { + return NULL; + } +@@ -477,7 +437,7 @@ klassOop Dictionary::find_class(int index, unsigned int hash, + assert (index == index_for(name, loader), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, loader); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +@@ -489,7 +449,7 @@ klassOop Dictionary::find_shared_class(int index, unsigned int hash, + assert (index == index_for(name, Handle()), "incorrect index?"); + + DictionaryEntry* entry = get_entry(index, hash, name, Handle()); +- return (entry != NULL) ? entry->klass() : (klassOop)NULL; ++ return intercept_for_version((entry != NULL) ? entry->klass() : (klassOop)NULL); + } + + +diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp +index bd33760..ea1fe3c 100644 +--- a/src/share/vm/classfile/dictionary.hpp ++++ b/src/share/vm/classfile/dictionary.hpp +@@ -73,6 +73,10 @@ public: + + void add_klass(Symbol* class_name, Handle class_loader,KlassHandle obj); + ++ bool update_klass(int index, unsigned int hash, Symbol* name, Handle loader, KlassHandle k, KlassHandle old_class); ++ ++ void rollback_redefinition(); ++ + klassOop find_class(int index, unsigned int hash, + Symbol* name, Handle loader); + +@@ -105,6 +109,7 @@ public: + bool do_unloading(BoolObjectClosure* is_alive); + + // Protection domains ++ static klassOop intercept_for_version(klassOop k); + klassOop find(int index, unsigned int hash, Symbol* name, + Handle loader, Handle protection_domain, TRAPS); + bool is_valid_protection_domain(int index, unsigned int hash, +diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp +index f8b10b3..c417a29 100644 +--- a/src/share/vm/classfile/javaClasses.cpp ++++ b/src/share/vm/classfile/javaClasses.cpp +@@ -621,6 +621,10 @@ klassOop java_lang_Class::as_klassOop(oop java_class) { + assert(java_lang_Class::is_instance(java_class), "must be a Class object"); + klassOop k = klassOop(java_class->obj_field(_klass_offset)); + assert(k == NULL || k->is_klass(), "type check"); ++ // Necessary to make old verifier work. ++ if (Thread::current()->pretend_new_universe()) { ++ k = k->klass_part()->newest_version(); ++ } + return k; + } + +@@ -1541,6 +1545,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, methodHandle met + skip_throwableInit_check = true; + } + } ++ method = method->newest_version(); + if (method->is_hidden()) { + if (skip_hidden) continue; + } +diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp +index b741cfa..5412831 100644 +--- a/src/share/vm/classfile/javaClasses.hpp ++++ b/src/share/vm/classfile/javaClasses.hpp +@@ -213,6 +213,7 @@ class java_lang_String : AllStatic { + + class java_lang_Class : AllStatic { + friend class VMStructs; ++ friend class VM_RedefineClasses; + + private: + // The fake offsets are added by the class loader when java.lang.Class is loaded +@@ -248,7 +249,7 @@ class java_lang_Class : AllStatic { + static void print_signature(oop java_class, outputStream *st); + // Testing + static bool is_instance(oop obj) { +- return obj != NULL && obj->klass() == SystemDictionary::Class_klass(); ++ return obj != NULL && (obj->klass()->klass_part()->newest_version() == SystemDictionary::Class_klass()->klass_part()->newest_version()); + } + static bool is_primitive(oop java_class); + static BasicType primitive_type(oop java_class); +diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfile/loaderConstraints.cpp +index 8650cd9..965cce2 100644 +--- a/src/share/vm/classfile/loaderConstraints.cpp ++++ b/src/share/vm/classfile/loaderConstraints.cpp +@@ -449,7 +449,7 @@ void LoaderConstraintTable::verify(Dictionary* dictionary, + if (k != NULL) { + // We found the class in the system dictionary, so we should + // make sure that the klassOop matches what we already have. +- guarantee(k == probe->klass(), "klass should be in dictionary"); ++ guarantee(k == probe->klass()->klass_part()->newest_version(), "klass should be in dictionary"); + } else { + // If we don't find the class in the system dictionary, it + // has to be in the placeholders table. +diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp +index 899153a..3f64268 100644 +--- a/src/share/vm/classfile/systemDictionary.cpp ++++ b/src/share/vm/classfile/systemDictionary.cpp +@@ -157,6 +157,7 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, Handle class_load + // can return a null klass + klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); + } ++ assert(klass == NULL || klass->klass_part()->is_newest_version() || klass->klass_part()->newest_version()->klass_part()->is_redefining(), "must be"); + return klass; + } + +@@ -199,7 +200,7 @@ klassOop SystemDictionary::resolve_or_fail(Symbol* class_name, + // Forwards to resolve_instance_class_or_null + + klassOop SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) { +- assert(!THREAD->is_Compiler_thread(), ++ assert(!THREAD->is_Compiler_thread() || JvmtiThreadState::state_for(JavaThread::current())->get_class_being_redefined() != NULL, + err_msg("can not load classes with compiler thread: class=%s, classloader=%s", + class_name->as_C_string(), + class_loader.is_null() ? "null" : class_loader->klass()->klass_part()->name()->as_C_string())); +@@ -961,8 +962,10 @@ klassOop SystemDictionary::parse_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ KlassHandle(), + host_klass, + cp_patches, ++ NULL, + parsed_name, + true, + THREAD); +@@ -1022,7 +1025,15 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + Handle protection_domain, + ClassFileStream* st, + bool verify, ++ KlassHandle old_class, + TRAPS) { ++ ++ bool redefine_classes_locked = false; ++ if (!Thread::current()->redefine_classes_mutex()->owned_by_self()) { ++ Thread::current()->redefine_classes_mutex()->lock(); ++ redefine_classes_locked = true; ++ } ++ + // Classloaders that support parallelism, e.g. bootstrap classloader, + // or all classloaders with UnsyncloadClass do not acquire lock here + bool DoObjectLock = true; +@@ -1050,9 +1061,14 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name, + class_loader, + protection_domain, ++ old_class, + parsed_name, + verify, + THREAD); ++ if (!old_class.is_null() && !k.is_null()) { ++ k->set_redefining(true); ++ k->set_old_version(old_class()); ++ } + + const char* pkg = "java/"; + if (!HAS_PENDING_EXCEPTION && +@@ -1087,13 +1103,18 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + // Add class just loaded + // If a class loader supports parallel classloading handle parallel define requests + // find_or_define_instance_class may return a different instanceKlass +- if (is_parallelCapable(class_loader)) { ++ // (tw) TODO: for class redefinition the parallel version does not work, check if this is a problem? ++ if (is_parallelCapable(class_loader) && old_class.is_null()) { + k = find_or_define_instance_class(class_name, class_loader, k, THREAD); + } else { +- define_instance_class(k, THREAD); ++ define_instance_class(k, old_class, THREAD); + } + } + ++ if (redefine_classes_locked) { ++ Thread::current()->redefine_classes_mutex()->unlock(); ++ } ++ + // If parsing the class file or define_instance_class failed, we + // need to remove the placeholder added on our behalf. But we + // must make sure parsed_name is valid first (it won't be if we had +@@ -1122,7 +1143,7 @@ klassOop SystemDictionary::resolve_from_stream(Symbol* class_name, + MutexLocker mu(SystemDictionary_lock, THREAD); + + klassOop check = find_class(parsed_name, class_loader); +- assert(check == k(), "should be present in the dictionary"); ++ assert((check == k() && !k->is_redefining()) || (k->is_redefining() && check == k->old_version()), "should be present in the dictionary"); + + klassOop check2 = find_class(h_name, h_loader); + assert(check == check2, "name inconsistancy in SystemDictionary"); +@@ -1349,7 +1370,11 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha + } + } + +-void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { ++void SystemDictionary::rollback_redefinition() { ++ dictionary()->rollback_redefinition(); ++} ++ ++void SystemDictionary::define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS) { + + Handle class_loader_h(THREAD, k->class_loader()); + +@@ -1376,13 +1401,23 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + Symbol* name_h = k->name(); + unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader_h); + int d_index = dictionary()->hash_to_index(d_hash); +- check_constraints(d_index, d_hash, k, class_loader_h, true, CHECK); ++ ++ // (tw) Update version of the klassOop in the system dictionary ++ // TODO: Check for thread safety! ++ if (!old_class.is_null()) { ++ bool ok = dictionary()->update_klass(d_index, d_hash, name_h, class_loader_h, k, old_class); ++ assert (ok, "must have found old class and updated!"); ++ } ++ check_constraints(d_index, d_hash, k, class_loader_h, old_class.is_null(), CHECK); ++ ++ if(!old_class.is_null() && TraceRedefineClasses >= 3){ tty->print_cr("Class has been updated!"); } + + // Register class just loaded with class loader (placed in Vector) + // Note we do this before updating the dictionary, as this can + // fail with an OutOfMemoryError (if it does, we will *not* put this + // class in the dictionary and will not update the class hierarchy). +- if (k->class_loader() != NULL) { ++ // (tw) Only register if not redefining a class. ++ if (k->class_loader() != NULL && old_class.is_null()) { + methodHandle m(THREAD, Universe::loader_addClass_method()); + JavaValue result(T_VOID); + JavaCallArguments args(class_loader_h); +@@ -1408,8 +1443,9 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) { + } + k->eager_initialize(THREAD); + ++ // (tw) Only notify jvmti if not redefining a class. + // notify jvmti +- if (JvmtiExport::should_post_class_load()) { ++ if (JvmtiExport::should_post_class_load() && old_class.is_null()) { + assert(THREAD->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_load((JavaThread *) THREAD, k()); + +@@ -1482,7 +1518,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(Symbol* clas + } + } + +- define_instance_class(k, THREAD); ++ define_instance_class(k, KlassHandle(), THREAD); + + Handle linkage_exception = Handle(); // null handle + +@@ -1612,6 +1648,14 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) { + Universe::flush_dependents_on(k); + } + ++// (tw) Remove from hierarchy - Undo add_to_hierarchy. ++void SystemDictionary::remove_from_hierarchy(instanceKlassHandle k) { ++ assert(k.not_null(), "just checking"); ++ ++ k->remove_from_sibling_list(); ++ ++ // TODO (tw): Remove from interfaces. ++} + + // ---------------------------------------------------------------------------- + // GC support +@@ -1869,9 +1913,12 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) { + + // Preload ref klasses and set reference types + instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER); +- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); ++ ++ // (tw) This is now done in parseClassFile in order to support class redefinition ++ // instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass)); + + initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK); ++ // TODO(tw): Check that the following is also not necessary? + instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT); + instanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK); + instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL); +@@ -1955,7 +2002,7 @@ void SystemDictionary::check_constraints(int d_index, unsigned int d_hash, + // also holds array classes + + assert(check->klass_part()->oop_is_instance(), "noninstance in systemdictionary"); +- if ((defining == true) || (k() != check)) { ++ if ((defining == true) && ((k() != check) && k->old_version() != check)) { + linkage_error = "loader (instance of %s): attempted duplicate class " + "definition for name: \"%s\""; + } else { +@@ -2602,8 +2649,10 @@ void SystemDictionary::verify_obj_klass_present(Handle obj, + name = find_placeholder(class_name, class_loader); + } + } +- guarantee(probe != NULL || name != NULL, +- "Loaded klasses should be in SystemDictionary"); ++ // (tw) Relaxed assertion to allow different class versions. Also allow redefining classes lie around (because of rollback). ++ guarantee(probe != NULL && ++ (!probe->is_klass() || (!((klassOop)(obj()))->klass_part()->is_redefining()) || ((klassOop)probe)->klass_part()->is_same_or_older_version((klassOop)(obj()))) || ((klassOop)(obj()))->klass_part()->is_redefining(), ++ "Loaded klasses should be in SystemDictionary"); + } + + // utility function for posting class load event +diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp +index adf82e5..00cf392 100644 +--- a/src/share/vm/classfile/systemDictionary.hpp ++++ b/src/share/vm/classfile/systemDictionary.hpp +@@ -268,7 +268,7 @@ public: + // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) + static klassOop resolve_from_stream(Symbol* class_name, Handle class_loader, + Handle protection_domain, +- ClassFileStream* st, bool verify, TRAPS); ++ ClassFileStream* st, bool verify, KlassHandle old_class, TRAPS); + + // Lookup an already loaded class. If not found NULL is returned. + static klassOop find(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS); +@@ -343,6 +343,8 @@ public: + // System loader lock + static oop system_loader_lock() { return _system_loader_lock_obj; } + ++ // Remove link to hierarchy ++ static void remove_from_hierarchy(instanceKlassHandle k); + private: + // Traverses preloaded oops: various system classes. These are + // guaranteed to be in the perm gen. +@@ -415,6 +417,8 @@ public: + initialize_wk_klasses_until((WKID) limit, start_id, THREAD); + } + ++ static void rollback_redefinition(); ++ + public: + #define WK_KLASS_DECLARE(name, symbol, option) \ + static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); } +@@ -596,7 +600,7 @@ private: + // after waiting, but before reentering SystemDictionary_lock + // to preserve lock order semantics. + static void double_lock_wait(Handle lockObject, TRAPS); +- static void define_instance_class(instanceKlassHandle k, TRAPS); ++ static void define_instance_class(instanceKlassHandle k, KlassHandle old_class, TRAPS); + static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, + Handle class_loader, + instanceKlassHandle k, TRAPS); +diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp +index da188bb..097c50c 100644 +--- a/src/share/vm/classfile/verifier.cpp ++++ b/src/share/vm/classfile/verifier.cpp +@@ -106,7 +106,7 @@ bool Verifier::relax_verify_for(oop loader) { + return !need_verify; + } + +-bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, TRAPS) { ++bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS) { + HandleMark hm; + ResourceMark rm(THREAD); + +@@ -117,6 +117,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + + const char* klassName = klass->external_name(); + bool can_failover = FailOverToOldVerifier && ++ may_use_old_verifier && + klass->major_version() < NOFAILOVER_MAJOR_VERSION; + + // If the class should be verified, first see if we can use the split +@@ -138,6 +139,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + tty->print_cr( + "Fail over class verification to old verifier for: %s", klassName); + } ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -145,6 +147,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul + exception_message = split_verifier.exception_message(); + } + } else { ++ assert(may_use_old_verifier, ""); + exception_name = inference_verify( + klass, message_buffer, message_buffer_len, THREAD); + } +@@ -210,7 +213,7 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou + // NOTE: this is called too early in the bootstrapping process to be + // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. + (refl_magic_klass == NULL || +- !klass->is_subtype_of(refl_magic_klass) || ++ !(klass->is_subtype_of(refl_magic_klass) || klass->is_subtype_of(refl_magic_klass->klass_part()->newest_version())) || + VerifyReflectionBytecodes) + ); + } +@@ -517,7 +520,7 @@ void ErrorContext::stackmap_details(outputStream* ss, methodOop method) const { + + ClassVerifier::ClassVerifier( + instanceKlassHandle klass, TRAPS) +- : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass) { ++ : _thread(THREAD), _exception_type(NULL), _message(NULL), _klass(klass->newest_version()), _klass_to_verify(klass) { + _this_type = VerificationType::reference_type(klass->name()); + // Create list to hold symbols in reference area. + _symbols = new GrowableArray<Symbol*>(100, 0, NULL); +@@ -547,7 +550,7 @@ void ClassVerifier::verify_class(TRAPS) { + _klass->external_name()); + } + +- objArrayHandle methods(THREAD, _klass->methods()); ++ objArrayHandle methods(THREAD, _klass_to_verify->methods()); + int num_methods = methods->length(); + + for (int index = 0; index < num_methods; index++) { +@@ -2444,7 +2447,8 @@ void ClassVerifier::verify_invoke_instructions( + VerificationType stack_object_type = + current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this)); + if (current_type() != stack_object_type) { +- assert(cp->cache() == NULL, "not rewritten yet"); ++ // (tw) TODO: Check if relaxing the following assertion is correct. For class redefinition we might call the verifier twice. ++ //assert(cp->cache() == NULL, "not rewritten yet"); + Symbol* ref_class_name = + cp->klass_name_at(cp->klass_ref_index_at(index)); + // See the comments in verify_field_instructions() for +diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp +index 4457f4a..b1b96f2 100644 +--- a/src/share/vm/classfile/verifier.hpp ++++ b/src/share/vm/classfile/verifier.hpp +@@ -47,7 +47,7 @@ class Verifier : AllStatic { + * Otherwise, no exception is thrown and the return indicates the + * error. + */ +- static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, TRAPS); ++ static bool verify(instanceKlassHandle klass, Mode mode, bool should_verify_class, bool may_use_old_verifier, TRAPS); + + // Return false if the class is loaded by the bootstrap loader, + // or if defineClass was called requesting skipping verification +@@ -256,7 +256,10 @@ class ClassVerifier : public StackObj { + + ErrorContext _error_context; // contains information about an error + ++public: + void verify_method(methodHandle method, TRAPS); ++ ++private: + char* generate_code_data(methodHandle m, u4 code_length, TRAPS); + void verify_exception_handler_table(u4 code_length, char* code_data, + int& min, int& max, TRAPS); +@@ -329,6 +332,7 @@ class ClassVerifier : public StackObj { + + VerificationType object_type() const; + ++ instanceKlassHandle _klass_to_verify; + instanceKlassHandle _klass; // the class being verified + methodHandle _method; // current method being verified + VerificationType _this_type; // the verification type of the current class +diff --git a/src/share/vm/code/nmethod.cpp b/src/share/vm/code/nmethod.cpp +index 21c9413..59f5f7e 100644 +--- a/src/share/vm/code/nmethod.cpp ++++ b/src/share/vm/code/nmethod.cpp +@@ -2074,15 +2074,14 @@ bool nmethod::is_evol_dependent_on(klassOop dependee) { + methodOop method = deps.method_argument(0); + for (int j = 0; j < dependee_methods->length(); j++) { + if ((methodOop) dependee_methods->obj_at(j) == method) { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x01000000, +- ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", ++ ResourceMark rm(Thread::current()); ++ TRACE_RC3("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", + _method->method_holder()->klass_part()->external_name(), + _method->name()->as_C_string(), + _method->signature()->as_C_string(), compile_id(), + method->method_holder()->klass_part()->external_name(), + method->name()->as_C_string(), +- method->signature()->as_C_string())); ++ method->signature()->as_C_string()); + if (TraceDependencies || LogCompilation) + deps.log_dependency(dependee); + return true; +diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp +index 0feca89..1c4b014 100644 +--- a/src/share/vm/compiler/compileBroker.cpp ++++ b/src/share/vm/compiler/compileBroker.cpp +@@ -1592,6 +1592,9 @@ void CompileBroker::compiler_thread_loop() { + + // Never compile a method if breakpoints are present in it + if (method()->number_of_breakpoints() == 0) { ++ // (tw) Obtain a compilation lock. Class redefinition requires that there is no compilation in parallel. ++ thread->compilation_mutex()->lock(); ++ thread->set_should_bailout(false); + // Compile the method. + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { + #ifdef COMPILER1 +@@ -1615,6 +1618,7 @@ void CompileBroker::compiler_thread_loop() { + // After compilation is disabled, remove remaining methods from queue + method->clear_queued_for_compilation(); + } ++ thread->compilation_mutex()->unlock(); + } + } + } +@@ -1780,7 +1784,11 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { + //assert(false, "compiler should always document failure"); + // The compiler elected, without comment, not to register a result. + // Do not attempt further compilations of this method. +- ci_env.record_method_not_compilable("compile failed", !TieredCompilation); ++ if (((CompilerThread *)Thread::current())->should_bailout()) { ++ ci_env.record_failure("compile externally aborted"); ++ } else { ++ ci_env.record_method_not_compilable("compile failed"); ++ } + } + + // Copy this bit to the enclosing block: +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +index b0c9ec8..7feadf9 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +@@ -162,6 +162,13 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, + } + } + ++ ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +@@ -2118,7 +2125,7 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const { + // Support for compaction + + void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); ++ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false); + // prepare_for_compaction() uses the space between live objects + // so that later phase can skip dead space quickly. So verification + // of the free lists doesn't work after. +@@ -2139,7 +2146,7 @@ void CompactibleFreeListSpace::adjust_pointers() { + } + + void CompactibleFreeListSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ SCAN_AND_COMPACT(obj_size, false); + } + + // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] +diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +index 3b7bb9a..de7e54b 100644 +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +@@ -149,6 +149,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp +index 29841d8..d1386c7 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp +@@ -32,6 +32,8 @@ + #include "oops/objArrayKlass.inline.hpp" + #include "oops/oop.inline.hpp" + ++GrowableArray<oop>* MarkSweep::_rescued_oops = NULL; ++ + Stack<oop, mtGC> MarkSweep::_marking_stack; + Stack<DataLayout*, mtGC> MarkSweep::_revisit_mdo_stack; + Stack<Klass*, mtGC> MarkSweep::_revisit_klass_stack; +@@ -357,3 +359,97 @@ void MarkSweep::trace(const char* msg) { + } + + #endif ++ ++// (tw) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ oop rescued_obj = _rescued_oops->at(i); ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ assert(rescued_obj->blueprint()->new_version() != NULL, "just checking"); ++ ++ if (rescued_obj->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ rescued_obj->set_klass_no_check(rescued_obj->blueprint()->new_version()); ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_obj, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (tw) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->blueprint()->new_version() != NULL, "class of old object must have new version"); ++ ++ klassOop old_klass_oop = q->klass(); ++ klassOop new_klass_oop = q->blueprint()->new_version(); ++ ++ instanceKlass *old_klass = instanceKlass::cast(old_klass_oop); ++ instanceKlass *new_klass = instanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ oop tmp_obj = q; ++ ++ // Save object somewhere, there is an overlap in fields ++ if (new_klass_oop->klass_part()->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ tmp_obj->set_klass_no_check(new_klass_oop); ++ int *cur = new_klass_oop->klass_part()->update_information(); ++ assert(cur != NULL, "just checking"); ++ MarkSweep::update_fields(new_location, tmp_obj, cur); ++ ++ if (tmp_obj != q) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp_obj, size); ++ } ++} ++ ++void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) { ++ assert(cur != NULL, "just checking"); ++ char* to = (char*)new_location; ++ while (*cur != 0) { ++ int size = *cur; ++ if (size > 0) { ++ cur++; ++ int offset = *cur; ++ HeapWord* from = (HeapWord*)(((char *)tmp_obj) + offset); ++ if (size == HeapWordSize) { ++ *((HeapWord*)to) = *from; ++ } else if (size == HeapWordSize * 2) { ++ *((HeapWord*)to) = *from; ++ *(((HeapWord*)to) + 1) = *(from + 1); ++ } else { ++ Copy::conjoint_jbytes(from, to, size); ++ } ++ to += size; ++ cur++; ++ } else { ++ assert(size < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(to, skip, 0); ++ to += skip; ++ cur++; ++ } ++ } ++} +diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp +index eb8252c..40118db 100644 +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp +@@ -117,8 +117,12 @@ class MarkSweep : AllStatic { + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<oop>* _rescued_oops; ++ + // + // Vars + // +@@ -208,6 +212,9 @@ class MarkSweep : AllStatic { + template <class T> static inline void mark_and_push(T* p); + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); ++ static void update_fields(oop new_location, oop tmp_obj, int *cur); + static void follow_stack(); // Empty marking stack. + + static void preserve_mark(oop p, markOop mark); +diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp +index 32c0bdb..448d673 100644 +--- a/src/share/vm/interpreter/interpreterRuntime.cpp ++++ b/src/share/vm/interpreter/interpreterRuntime.cpp +@@ -402,7 +402,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea + assert(h_exception.not_null(), "NULL exceptions should be handled by athrow"); + assert(h_exception->is_oop(), "just checking"); + // Check that exception is a subclass of Throwable, otherwise we have a VerifyError +- if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) { ++ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()->klass_part()->newest_version())) && !(h_exception->is_a(SystemDictionary::Throwable_klass()))) { + if (ExitVMOnVerifyError) vm_exit(-1); + ShouldNotReachHere(); + } +diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp +index b17f405..1c96783 100644 +--- a/src/share/vm/interpreter/linkResolver.cpp ++++ b/src/share/vm/interpreter/linkResolver.cpp +@@ -153,8 +153,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass + // Klass resolution + + void LinkResolver::check_klass_accessability(KlassHandle ref_klass, KlassHandle sel_klass, TRAPS) { +- if (!Reflection::verify_class_access(ref_klass->as_klassOop(), +- sel_klass->as_klassOop(), ++ if (!Reflection::verify_class_access(ref_klass->as_klassOop()->klass_part()->newest_version(), ++ sel_klass->as_klassOop()->klass_part()->newest_version(), + true)) { + ResourceMark rm(THREAD); + Exceptions::fthrow( +@@ -338,7 +338,7 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass, + // We'll check for the method name first, as that's most likely + // to be false (so we'll short-circuit out of these tests). + if (sel_method->name() == vmSymbols::clone_name() && +- sel_klass() == SystemDictionary::Object_klass() && ++ sel_klass()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version() && + resolved_klass->oop_is_array()) { + // We need to change "protected" to "public". + assert(flags.is_protected(), "clone not protected?"); +@@ -634,7 +634,7 @@ void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle poo + } + + // Final fields can only be accessed from its own class. +- if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) { ++ if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()->klass_part()->active_version() && sel_klass() != pool->pool_holder()) { + THROW(vmSymbols::java_lang_IllegalAccessError()); + } + +@@ -839,7 +839,7 @@ void LinkResolver::resolve_virtual_call(CallInfo& result, Handle recv, KlassHand + bool check_access, bool check_null_and_abstract, TRAPS) { + methodHandle resolved_method; + linktime_resolve_virtual_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK); +- runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, check_null_and_abstract, CHECK); ++ runtime_resolve_virtual_method(result, resolved_method, resolved_klass, recv, receiver_klass, current_klass, check_null_and_abstract, CHECK); + } + + // throws linktime exceptions +@@ -869,6 +869,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + KlassHandle resolved_klass, + Handle recv, + KlassHandle recv_klass, ++ KlassHandle current_klass, + bool check_null_and_abstract, + TRAPS) { + +@@ -917,6 +918,9 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result, + // recv_klass might be an arrayKlassOop but all vtables start at + // the same place. The cast is to avoid virtual call and assertion. + instanceKlass* inst = (instanceKlass*)recv_klass()->klass_part(); ++ ++ // (tw) Check that the receiver is a subtype of the holder of the resolved method. ++ assert(inst->is_subtype_of(resolved_method->method_holder()), "receiver and resolved method holder are inconsistent"); + selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); + } + } +diff --git a/src/share/vm/interpreter/linkResolver.hpp b/src/share/vm/interpreter/linkResolver.hpp +index dfd74f9..6ca1b54 100644 +--- a/src/share/vm/interpreter/linkResolver.hpp ++++ b/src/share/vm/interpreter/linkResolver.hpp +@@ -110,7 +110,8 @@ class CallInfo: public LinkInfo { + // It does all necessary link-time checks & throws exceptions if necessary. + + class LinkResolver: AllStatic { +- private: ++private: ++ static void lookup_method (methodHandle& result, KlassHandle resolved_klass, Symbol* name, Symbol* signature, bool is_interface, KlassHandle current_klass, TRAPS); + static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); + static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS); +@@ -133,7 +134,7 @@ class LinkResolver: AllStatic { + static void linktime_resolve_interface_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS); + + static void runtime_resolve_special_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, KlassHandle current_klass, bool check_access, TRAPS); +- static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); ++ static void runtime_resolve_virtual_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, KlassHandle current_klass, bool check_null_and_abstract, TRAPS); + static void runtime_resolve_interface_method (CallInfo& result, methodHandle resolved_method, KlassHandle resolved_klass, Handle recv, KlassHandle recv_klass, bool check_null_and_abstract, TRAPS); + + static void check_field_accessability (KlassHandle ref_klass, KlassHandle resolved_klass, KlassHandle sel_klass, fieldDescriptor& fd, TRAPS); +diff --git a/src/share/vm/interpreter/oopMapCache.cpp b/src/share/vm/interpreter/oopMapCache.cpp +index 01d5753..6816b3a 100644 +--- a/src/share/vm/interpreter/oopMapCache.cpp ++++ b/src/share/vm/interpreter/oopMapCache.cpp +@@ -536,9 +536,9 @@ void OopMapCache::flush_obsolete_entries() { + if (!_array[i].is_empty() && _array[i].method()->is_old()) { + // Cache entry is occupied by an old redefined method and we don't want + // to pin it down so flush the entry. +- RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", ++ TRACE_RC3("flush: %s(%s): cached entry @%d", + _array[i].method()->name()->as_C_string(), +- _array[i].method()->signature()->as_C_string(), i)); ++ _array[i].method()->signature()->as_C_string(), i); + + _array[i].flush(); + } +diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp +index 76e18d8..6af7c14 100644 +--- a/src/share/vm/memory/genMarkSweep.cpp ++++ b/src/share/vm/memory/genMarkSweep.cpp +@@ -421,6 +421,7 @@ void GenMarkSweep::mark_sweep_phase4() { + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + Generation* pg = gch->perm_gen(); + +@@ -433,10 +434,14 @@ void GenMarkSweep::mark_sweep_phase4() { + + VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); + + VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); + ++ MarkSweep::copy_rescued_objects_back(); ++ + pg->post_compact(); // Shared spaces verification. + } +diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp +index b5f8e0e..856cfce 100644 +--- a/src/share/vm/memory/iterator.hpp ++++ b/src/share/vm/memory/iterator.hpp +@@ -101,6 +101,12 @@ class OopClosure : public Closure { + #endif + }; + ++class OopClosureNoHeader : public OopClosure { ++public: ++ // If "true", invoke on header klass field. ++ bool do_header() { return false; } // Note that this is non-virtual. ++}; ++ + // ObjectClosure is used for iterating through an object space + + class ObjectClosure : public Closure { +diff --git a/src/share/vm/memory/oopFactory.cpp b/src/share/vm/memory/oopFactory.cpp +index def88cc..016d7eb 100644 +--- a/src/share/vm/memory/oopFactory.cpp ++++ b/src/share/vm/memory/oopFactory.cpp +@@ -129,11 +129,11 @@ klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_ + unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS) { ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS) { + instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj()); + return ikk->allocate_instance_klass(name, vtable_len, itable_len, + static_field_size, nonstatic_oop_map_count, +- access_flags, rt, host_klass, CHECK_NULL); ++ access_flags, rt, host_klass, old_klass, CHECK_NULL); + } + + +diff --git a/src/share/vm/memory/oopFactory.hpp b/src/share/vm/memory/oopFactory.hpp +index e7e22d4..ce39ada 100644 +--- a/src/share/vm/memory/oopFactory.hpp ++++ b/src/share/vm/memory/oopFactory.hpp +@@ -80,7 +80,7 @@ class oopFactory: AllStatic { + unsigned int nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS); ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS); + + // Methods + private: +diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp +index f97bc34..c8563b2 100644 +--- a/src/share/vm/memory/space.cpp ++++ b/src/share/vm/memory/space.cpp +@@ -378,9 +378,8 @@ void CompactibleSpace::clear(bool mangle_space) { + _compaction_top = bottom(); + } + +-HeapWord* CompactibleSpace::forward(oop q, size_t size, +- CompactPoint* cp, HeapWord* compact_top) { +- // q is alive ++// (tw) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) { + // First check if we should switch compaction space + assert(this == cp->space, "'this' should be current compaction space."); + size_t compaction_max_size = pointer_delta(end(), compact_top); +@@ -400,8 +399,15 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size, + compaction_max_size = pointer_delta(cp->space->end(), compact_top); + } + ++ return compact_top; ++} ++ ++HeapWord* CompactibleSpace::forward(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ compact_top = forward_compact_top(size, cp, compact_top); ++ + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -423,6 +429,58 @@ HeapWord* CompactibleSpace::forward(oop q, size_t size, + return compact_top; + } + ++// Compute the forward sizes and leave out objects whose position could ++// possibly overlap other objects. ++HeapWord* CompactibleSpace::forward_with_rescue(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ size_t forward_size = size; ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL && oop(q)->blueprint()->new_version()->klass_part()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = forward_compact_top(forward_size, cp, compact_top); ++ ++ if (must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ MarkSweep::_rescued_oops->append(oop(q)); ++ return compact_top; ++ } ++ ++ return forward(q, forward_size, cp, compact_top); ++} ++ ++// Compute the forwarding addresses for the objects that need to be rescued. ++HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) { ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ oop q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size((HeapWord*)q); ++ ++ // (tw) There is a new version of the class of q => different size ++ if (oop(q)->blueprint()->new_version() != NULL) { ++ size_t new_size = oop(q)->size_given_klass(oop(q)->blueprint()->new_version()->klass_part()); ++ assert(size != new_size || oop(q)->is_perm(), "instances without changed size have to be updated prior to GC run"); ++ size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); ++ assert(compact_top <= end(), "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ return compact_top; ++} + + bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + HeapWord* q, size_t deadlength) { +@@ -444,12 +502,17 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + #define adjust_obj_size(s) s + + void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); ++ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false); + } + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true); ++ } + } + + void Space::adjust_pointers() { +@@ -490,6 +553,111 @@ void Space::adjust_pointers() { + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ if (heap->is_in_permanent(obj)) { ++ return -1; ++ } ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ // Only redefined objects can have the need to be rescued. ++ if (oop(old_obj)->blueprint()->new_version() == NULL) return false; ++ ++ if (old_obj->is_perm()) { ++ // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj. ++ return true; ++ } ++ ++ int new_size = old_obj->size_given_klass(oop(old_obj)->blueprint()->new_version()->klass_part()); ++ int original_size = old_obj->size(); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj); ++ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj); ++ if (old_in_tenured == new_in_tenured) { ++ // Rescue if object may overlap with a higher memory address. ++ bool overlap = (old_obj + original_size < new_obj + new_size); ++ if (old_in_tenured) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ } ++ return overlap; ++ ++ } else { ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ } ++ } ++} ++ ++oop CompactibleSpace::rescue(oop old_obj) { ++ assert(must_rescue(old_obj, old_obj->forwardee()), "do not call otherwise"); ++ ++ int size = old_obj->size(); ++ oop rescued_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ Copy::aligned_disjoint_words((HeapWord*)old_obj, (HeapWord*)rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<oop>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { +@@ -500,7 +668,13 @@ void CompactibleSpace::adjust_pointers() { + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_COMPACT(obj_size, true) ++ } + } + + void Space::print_short() const { print_short_on(tty); } +diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp +index ef2f2c6..ff95a8b 100644 +--- a/src/share/vm/memory/space.hpp ++++ b/src/share/vm/memory/space.hpp +@@ -445,6 +445,9 @@ public: + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ oop rescue(oop old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -474,6 +477,15 @@ public: + // accordingly". + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); ++ // (DCEVM) same as forwad, but can rescue objects. Invoked only during ++ // redefinition runs ++ HeapWord* forward_with_rescue(oop q, size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ ++ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top); ++ ++ // (tw) Compute new compact top without actually forwarding the object. ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } +@@ -504,7 +516,7 @@ protected: + size_t word_len); + }; + +-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ ++#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ +@@ -564,7 +576,17 @@ protected: + Prefetch::write(q, interval); \ + /* size_t size = oop(q)->size(); changing this for cms for perm gen */\ + size_t size = block_size(q); \ +- compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ if (redefinition_run) { \ ++ compact_top = cp->space->forward_with_rescue(oop(q), size, \ ++ cp, compact_top); \ ++ if (q < first_dead && oop(q)->is_gc_marked()) { \ ++ /* Was moved (otherwise, forward would reset mark), \ ++ set first_dead to here */ \ ++ first_dead = q; \ ++ } \ ++ } else { \ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ } \ + q += size; \ + end_of_live = q; \ + } else { \ +@@ -613,6 +635,8 @@ protected: + } \ + } \ + \ ++ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \ ++ \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ +@@ -665,13 +689,8 @@ protected: + q += size; \ + } \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ This is funky. Using this to read the previously written \ +- * LiveRange. See also use below. */ \ +- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ +@@ -702,7 +721,7 @@ protected: + assert(q == t, "just checking"); \ + } + +-#define SCAN_AND_COMPACT(obj_size) { \ ++#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ +@@ -728,12 +747,8 @@ protected: + } \ + ) /* debug_only */ \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ Funky */ \ +- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ +@@ -752,13 +767,36 @@ protected: + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ ++ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \ ++ oop dest_obj = rescue(oop(q)); \ ++ debug_only(Copy::fill_to_words(q, size, 0)); \ ++ q += size; \ ++ continue; \ ++ } \ ++ \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ + VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \ + compaction_top)); \ +- assert(q != compaction_top, "everything in this pass should be moving"); \ ++ assert(q != compaction_top || oop(q)->blueprint()->new_version() != NULL, \ ++ "everything in this pass should be moving"); \ ++ if (redefinition_run && oop(q)->blueprint()->new_version() != NULL) { \ ++ klassOop new_version = oop(q)->blueprint()->new_version(); \ ++ if (new_version->klass_part()->update_information() == NULL) { \ ++ Copy::aligned_conjoint_words(q, compaction_top, size); \ ++ oop(compaction_top)->set_klass_no_check(new_version); \ ++ } else { \ ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); \ ++ } \ ++ oop(compaction_top)->init_mark(); \ ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ ++ \ ++ debug_only(prev_q = q); \ ++ q += size; \ ++ continue; \ ++ } \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ +diff --git a/src/share/vm/memory/specialized_oop_closures.hpp b/src/share/vm/memory/specialized_oop_closures.hpp +index 4d7c50b..671787e 100644 +--- a/src/share/vm/memory/specialized_oop_closures.hpp ++++ b/src/share/vm/memory/specialized_oop_closures.hpp +@@ -37,6 +37,7 @@ + + // Forward declarations. + class OopClosure; ++class OopClosureNoHeader; + class OopsInGenClosure; + // DefNew + class ScanClosure; +@@ -74,6 +75,7 @@ class CMSInnerParMarkAndPushClosure; + #endif + + #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \ ++ f(OopClosureNoHeader,_v) \ + f(ScanClosure,_nv) \ + f(FastScanClosure,_nv) \ + f(FilteringClosure,_nv) +@@ -132,6 +134,7 @@ class CMSInnerParMarkAndPushClosure; + + #define ALL_PAR_OOP_ITERATE_CLOSURES(f) \ + f(OopClosure,_v) \ ++ f(OopClosureNoHeader,_v) \ + SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) + #endif // SERIALGC + +diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp +index 8ce17d9..fe37993 100644 +--- a/src/share/vm/memory/universe.cpp ++++ b/src/share/vm/memory/universe.cpp +@@ -100,6 +100,8 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" + #endif + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + klassOop Universe::_boolArrayKlassObj = NULL; + klassOop Universe::_byteArrayKlassObj = NULL; +@@ -204,6 +206,42 @@ void Universe::system_classes_do(void f(klassOop)) { + f(systemObjArrayKlassObj()); + } + ++// (tw) This method should iterate all pointers that are not within heap objects. ++void Universe::root_oops_do(OopClosure *oopClosure) { ++ ++ class AlwaysTrueClosure: public BoolObjectClosure { ++ public: ++ void do_object(oop p) { ShouldNotReachHere(); } ++ bool do_object_b(oop p) { return true; } ++ }; ++ AlwaysTrueClosure always_true; ++ ++ Universe::oops_do(oopClosure); ++// ReferenceProcessor::oops_do(oopClosure); (tw) check why no longer there ++ JNIHandles::oops_do(oopClosure); // Global (strong) JNI handles ++ Threads::oops_do(oopClosure, NULL); ++ ObjectSynchronizer::oops_do(oopClosure); ++ FlatProfiler::oops_do(oopClosure); ++ JvmtiExport::oops_do(oopClosure); ++ ++ // Now adjust pointers in remaining weak roots. (All of which should ++ // have been cleared if they pointed to non-surviving objects.) ++ // Global (weak) JNI handles ++ JNIHandles::weak_oops_do(&always_true, oopClosure); ++ ++ CodeCache::oops_do(oopClosure); ++ StringTable::oops_do(oopClosure); ++ ++ // (tw) TODO: Check if this is correct? ++ //CodeCache::scavenge_root_nmethods_oops_do(oopClosure); ++ //Management::oops_do(oopClosure); ++ //ref_processor()->weak_oops_do(&oopClosure); ++ //PSScavenge::reference_processor()->weak_oops_do(&oopClosure); ++ ++ // SO_AllClasses ++ SystemDictionary::oops_do(oopClosure); ++} ++ + void Universe::oops_do(OopClosure* f, bool do_all) { + + f->do_oop((oop*) &_int_mirror); +@@ -1590,10 +1628,9 @@ void ActiveMethodOopsCache::add_previous_version(const methodOop method) { + } + + // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000100, +- ("add: %s(%s): adding prev version ref for cached method @%d", ++ TRACE_RC2("add: %s(%s): adding prev version ref for cached method @%d", + method->name()->as_C_string(), method->signature()->as_C_string(), +- _prev_methods->length())); ++ _prev_methods->length()); + + methodHandle method_h(method); + jweak method_ref = JNIHandles::make_weak_global(method_h); +@@ -1620,9 +1657,8 @@ void ActiveMethodOopsCache::add_previous_version(const methodOop method) { + JNIHandles::destroy_weak_global(method_ref); + _prev_methods->remove_at(i); + } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive", +- m->name()->as_C_string(), m->signature()->as_C_string(), i)); ++ TRACE_RC2("add: %s(%s): previous cached method @%d is alive", ++ m->name()->as_C_string(), m->signature()->as_C_string(), i); + } + } + } // end add_previous_version() +diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp +index da21a8b..676675e 100644 +--- a/src/share/vm/memory/universe.hpp ++++ b/src/share/vm/memory/universe.hpp +@@ -127,6 +127,8 @@ class Universe: AllStatic { + friend class SystemDictionary; + friend class VMStructs; + friend class CompactingPermGenGen; ++ friend class Space; ++ friend class ContiguousSpace; + friend class VM_PopulateDumpSharedSpace; + + friend jint universe_init(); +@@ -258,7 +260,18 @@ class Universe: AllStatic { + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { ++ return _is_redefining_gc_run; ++ } ++ ++ static void set_redefining_gc_run(bool b) { ++ _is_redefining_gc_run = b; ++ } ++ + // Known classes in the VM + static klassOop boolArrayKlassObj() { return _boolArrayKlassObj; } + static klassOop byteArrayKlassObj() { return _byteArrayKlassObj; } +@@ -403,6 +416,8 @@ class Universe: AllStatic { + + // Iteration + ++ static void root_oops_do(OopClosure *f); ++ + // Apply "f" to the addresses of all the direct heap pointers maintained + // as static fields of "Universe". + static void oops_do(OopClosure* f, bool do_all = false); +@@ -419,6 +434,7 @@ class Universe: AllStatic { + + // Debugging + static bool verify_in_progress() { return _verify_in_progress; } ++ static void set_verify_in_progress(bool b) { _verify_in_progress = b; } + static void verify(bool silent, VerifyOption option); + static void verify(bool silent) { + verify(silent, VerifyOption_Default /* option */); +diff --git a/src/share/vm/oops/cpCacheOop.cpp b/src/share/vm/oops/cpCacheOop.cpp +index ad62921..1cd422c 100644 +--- a/src/share/vm/oops/cpCacheOop.cpp ++++ b/src/share/vm/oops/cpCacheOop.cpp +@@ -40,6 +40,11 @@ + void ConstantPoolCacheEntry::initialize_entry(int index) { + assert(0 < index && index < 0x10000, "sanity check"); + _indices = index; ++// (DCEVM) Should put something else to force JVM to fail if these invalid entries are accessed! ++//#ifdef ASSERT ++ _f1 = NULL; ++ _f2 = 0; ++//#endif + assert(constant_pool_index() == index, ""); + } + +@@ -162,7 +167,8 @@ void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, + int vtable_index) { + assert(!is_secondary_entry(), ""); + assert(method->interpreter_entry() != NULL, "should have been set at this point"); +- assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); ++ // (tw) No longer valid assert ++ //assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); + + int byte_no = -1; + bool change_to_virtual = false; +@@ -516,116 +522,6 @@ void ConstantPoolCacheEntry::update_pointers() { + } + #endif // SERIALGC + +-// RedefineClasses() API support: +-// If this constantPoolCacheEntry refers to old_method then update it +-// to refer to new_method. +-bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, +- methodOop new_method, bool * trace_name_printed) { +- +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- if (f2_as_vfinal_method() == old_method) { +- // match old_method so need an update +- // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values +- _f2 = (intptr_t)new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- +- return true; +- } +- +- // f1() is not used with virtual entries so bail out +- return false; +- } +- +- if ((oop)_f1 == NULL) { +- // NULL f1() means this is a virtual entry so bail out +- // We are assuming that the vtable index does not need change. +- return false; +- } +- +- if ((oop)_f1 == old_method) { +- _f1 = new_method; +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { +- if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); +- *trace_name_printed = true; +- } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); +- } +- +- return true; +- } +- +- return false; +-} +- +-// a constant pool cache entry should never contain old or obsolete methods +-bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- methodOop m = (methodOop)_f2; +- // Return false if _f2 refers to an old or an obsolete method. +- // _f2 == NULL || !m->is_method() are just as unexpected here. +- return (m != NULL && m->is_method() && !m->is_old() && !m->is_obsolete()); +- } else if ((oop)_f1 == NULL || !((oop)_f1)->is_method()) { +- // _f1 == NULL || !_f1->is_method() are OK here +- return true; +- } +- +- methodOop m = (methodOop)_f1; +- // return false if _f1 refers to an old or an obsolete method +- return (!m->is_old() && !m->is_obsolete()); +-} +- +-bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { +- if (!is_method_entry()) { +- // not a method entry so not interesting by default +- return false; +- } +- +- methodOop m = NULL; +- if (is_vfinal()) { +- // virtual and final so _f2 contains method ptr instead of vtable index +- m = f2_as_vfinal_method(); +- } else if (is_f1_null()) { +- // NULL _f1 means this is a virtual entry so also not interesting +- return false; +- } else { +- oop f1 = _f1; // _f1 is volatile +- if (!f1->is_method()) { +- // _f1 can also contain a klassOop for an interface +- return false; +- } +- m = f1_as_method(); +- } +- +- assert(m != NULL && m->is_method(), "sanity check"); +- if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) { +- // robustness for above sanity checks or method is not in +- // the interesting class +- return false; +- } +- +- // the method is in the interesting class so the entry is interesting +- return true; +-} +- + void ConstantPoolCacheEntry::print(outputStream* st, int index) const { + // print separator + if (index == 0) st->print_cr(" -------------"); +@@ -663,60 +559,10 @@ void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { + } + } + +-// RedefineClasses() API support: +-// If any entry of this constantPoolCache points to any of +-// old_methods, replace it with the corresponding new_method. +-void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed) { +- +- if (methods_length == 0) { +- // nothing to do if there are no methods +- return; +- } +- +- // get shorthand for the interesting class +- klassOop old_holder = old_methods[0]->method_holder(); ++void constantPoolCacheOopDesc::adjust_entries() { + + for (int i = 0; i < length(); i++) { +- if (!entry_at(i)->is_interesting_method_entry(old_holder)) { +- // skip uninteresting methods +- continue; +- } +- +- // The constantPoolCache contains entries for several different +- // things, but we only care about methods. In fact, we only care +- // about methods in the same class as the one that contains the +- // old_methods. At this point, we have an interesting entry. +- +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; +- methodOop new_method = new_methods[j]; +- +- if (entry_at(i)->adjust_method_entry(old_method, new_method, +- trace_name_printed)) { +- // current old_method matched this entry and we updated it so +- // break out and get to the next interesting entry if there one +- break; +- } +- } ++ entry_at(i)->initialize_entry(entry_at(i)->constant_pool_index()); + } + } + +-// the constant pool cache should never contain old or obsolete methods +-bool constantPoolCacheOopDesc::check_no_old_or_obsolete_entries() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL) && +- !entry_at(i)->check_no_old_or_obsolete_entries()) { +- return false; +- } +- } +- return true; +-} +- +-void constantPoolCacheOopDesc::dump_cache() { +- for (int i = 1; i < length(); i++) { +- if (entry_at(i)->is_interesting_method_entry(NULL)) { +- entry_at(i)->print(tty, i); +- } +- } +-} +diff --git a/src/share/vm/oops/cpCacheOop.hpp b/src/share/vm/oops/cpCacheOop.hpp +index ef26775..a270d0d 100644 +--- a/src/share/vm/oops/cpCacheOop.hpp ++++ b/src/share/vm/oops/cpCacheOop.hpp +@@ -355,17 +355,6 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { + + void update_pointers(); + +- // RedefineClasses() API support: +- // If this constantPoolCacheEntry refers to old_method then update it +- // to refer to new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- bool adjust_method_entry(methodOop old_method, methodOop new_method, +- bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- bool is_interesting_method_entry(klassOop k); +- + // Debugging & Printing + void print (outputStream* st, int index) const; + void verify(outputStream* st) const; +@@ -485,16 +474,8 @@ class constantPoolCacheOopDesc: public oopDesc { + return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index); + } + +- // RedefineClasses() API support: +- // If any entry of this constantPoolCache points to any of +- // old_methods, replace it with the corresponding new_method. +- // trace_name_printed is set to true if the current call has +- // printed the klass name so that other routines in the adjust_* +- // group don't print the klass name. +- void adjust_method_entries(methodOop* old_methods, methodOop* new_methods, +- int methods_length, bool * trace_name_printed); +- bool check_no_old_or_obsolete_entries(); +- void dump_cache(); ++ // (tw) Clear references to methods and fields from this cache. ++ void adjust_entries(); + }; + + #endif // SHARE_VM_OOPS_CPCACHEOOP_HPP +diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp +index cd3dce0..666ffdf 100644 +--- a/src/share/vm/oops/instanceKlass.cpp ++++ b/src/share/vm/oops/instanceKlass.cpp +@@ -255,7 +255,7 @@ bool instanceKlass::verify_code( + // 1) Verify the bytecodes + Verifier::Mode mode = + throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; +- return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); ++ return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), true, CHECK_false); + } + + +@@ -362,7 +362,13 @@ bool instanceKlass::link_class_impl( + jt->get_thread_stat()->perf_recursion_counts_addr(), + jt->get_thread_stat()->perf_timers_addr(), + PerfClassTraceTime::CLASS_VERIFY); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(true); ++ } + bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); ++ if (this_oop->is_redefining()) { ++ Thread::current()->set_pretend_new_universe(false); ++ } + if (!verify_ok) { + return false; + } +@@ -400,7 +406,8 @@ bool instanceKlass::link_class_impl( + } + #endif + this_oop->set_init_state(linked); +- if (JvmtiExport::should_post_class_prepare()) { ++ // (tw) Must check for old version in order to prevent infinite loops. ++ if (JvmtiExport::should_post_class_prepare() && this_oop->old_version() == NULL /* JVMTI deadlock otherwise */) { + Thread *thread = THREAD; + assert(thread->is_Java_thread(), "thread->is_Java_thread()"); + JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); +@@ -454,7 +461,9 @@ void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { + // If we were to use wait() instead of waitInterruptibly() then + // we might end up throwing IE from link/symbol resolution sites + // that aren't expected to throw. This would wreak havoc. See 6320309. +- while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { ++ // (tw) Wait also for the old class version to be fully initialized. ++ while((this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) ++ || (this_oop->old_version() != NULL && ((instanceKlass*)this_oop->old_version()->klass_part())->is_being_initialized())) { + wait = true; + ol.waitUninterruptibly(CHECK); + } +@@ -673,6 +682,18 @@ bool instanceKlass::implements_interface(klassOop k) const { + return false; + } + ++bool instanceKlass::implements_interface_any_version(klassOop k) const { ++ k = k->klass_part()->newest_version(); ++ if (this->newest_version() == k) return true; ++ assert(Klass::cast(k)->is_interface(), "should be an interface class"); ++ for (int i = 0; i < transitive_interfaces()->length(); i++) { ++ if (((klassOop)transitive_interfaces()->obj_at(i))->klass_part()->newest_version() == k) { ++ return true; ++ } ++ } ++ return false; ++} ++ + objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { + if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); + if (length > arrayOopDesc::max_array_length(T_OBJECT)) { +@@ -949,6 +970,18 @@ void instanceKlass::methods_do(void f(methodOop method)) { + } + } + ++void instanceKlass::store_update_information(GrowableArray<int> &values) { ++ int *arr = NEW_C_HEAP_ARRAY(int, values.length(), mtClass); ++ for (int i=0; i<values.length(); i++) { ++ arr[i] = values.at(i); ++ } ++ set_update_information(arr); ++} ++ ++void instanceKlass::clear_update_information() { ++ FREE_C_HEAP_ARRAY(int, update_information(), mtClass); ++ set_update_information(NULL); ++} + + void instanceKlass::do_local_static_fields(FieldClosure* cl) { + for (JavaFieldStream fs(this); !fs.done(); fs.next()) { +@@ -1368,6 +1401,20 @@ jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { + return id; + } + ++bool instanceKlass::update_jmethod_id(methodOop method, jmethodID newMethodID) { ++ size_t idnum = (size_t)method->method_idnum(); ++ jmethodID* jmeths = methods_jmethod_ids_acquire(); ++ size_t length; // length assigned as debugging crumb ++ jmethodID id = NULL; ++ if (jmeths != NULL && // If there is a cache ++ (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, ++ jmeths[idnum+1] = newMethodID; // Set the id (may be NULL) ++ return true; ++ } ++ ++ return false; ++} ++ + + // Cache an itable index + void instanceKlass::set_cached_itable_index(size_t idnum, int index) { +@@ -1527,6 +1574,13 @@ void instanceKlass::remove_dependent_nmethod(nmethod* nm) { + last = b; + b = b->next(); + } ++ ++ // (tw) Hack as dependencies get wrong version of klassOop ++ if(this->old_version() != NULL) { ++ ((instanceKlass *)this->old_version()->klass_part())->remove_dependent_nmethod(nm); ++ return; ++ } ++ + #ifdef ASSERT + tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); + nm->print(); +@@ -1922,16 +1976,6 @@ void instanceKlass::release_C_heap_structures() { + assert(breakpoints() == 0x0, "should have cleared breakpoints"); + } + +- // deallocate information about previous versions +- if (_previous_versions != NULL) { +- for (int i = _previous_versions->length() - 1; i >= 0; i--) { +- PreviousVersionNode * pv_node = _previous_versions->at(i); +- delete pv_node; +- } +- delete _previous_versions; +- _previous_versions = NULL; +- } +- + // deallocate the cached class file + if (_cached_class_file_bytes != NULL) { + os::free(_cached_class_file_bytes, mtClass); +@@ -2545,275 +2589,10 @@ void instanceKlass::set_init_state(ClassState state) { + } + #endif + +- +-// RedefineClasses() support for previous versions: +- +-// Add an information node that contains weak references to the +-// interesting parts of the previous version of the_class. +-// This is also where we clean out any unused weak references. +-// Note that while we delete nodes from the _previous_versions +-// array, we never delete the array itself until the klass is +-// unloaded. The has_been_redefined() query depends on that fact. +-// +-void instanceKlass::add_previous_version(instanceKlassHandle ikh, +- BitMap* emcp_methods, int emcp_method_count) { +- assert(Thread::current()->is_VM_thread(), +- "only VMThread can add previous versions"); +- +- if (_previous_versions == NULL) { +- // This is the first previous version so make some space. +- // Start with 2 elements under the assumption that the class +- // won't be redefined much. +- _previous_versions = new (ResourceObj::C_HEAP, mtClass) +- GrowableArray<PreviousVersionNode *>(2, true); +- } +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", +- ikh->external_name(), _previous_versions->length(), emcp_method_count)); +- constantPoolHandle cp_h(ikh->constants()); +- jobject cp_ref; +- if (cp_h->is_shared()) { +- // a shared ConstantPool requires a regular reference; a weak +- // reference would be collectible +- cp_ref = JNIHandles::make_global(cp_h); +- } else { +- cp_ref = JNIHandles::make_weak_global(cp_h); +- } +- PreviousVersionNode * pv_node = NULL; +- objArrayOop old_methods = ikh->methods(); +- +- if (emcp_method_count == 0) { +- // non-shared ConstantPool gets a weak reference +- pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); +- RC_TRACE(0x00000400, +- ("add: all methods are obsolete; flushing any EMCP weak refs")); +- } else { +- int local_count = 0; +- GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP, mtClass) +- GrowableArray<jweak>(emcp_method_count, true); +- for (int i = 0; i < old_methods->length(); i++) { +- if (emcp_methods->at(i)) { +- // this old method is EMCP so save a weak ref +- methodOop old_method = (methodOop) old_methods->obj_at(i); +- methodHandle old_method_h(old_method); +- jweak method_ref = JNIHandles::make_weak_global(old_method_h); +- method_refs->append(method_ref); +- if (++local_count >= emcp_method_count) { +- // no more EMCP methods so bail out now +- break; +- } +- } +- } +- // non-shared ConstantPool gets a weak reference +- pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); +- } +- +- _previous_versions->append(pv_node); +- +- // Using weak references allows the interesting parts of previous +- // classes to be GC'ed when they are no longer needed. Since the +- // caller is the VMThread and we are at a safepoint, this is a good +- // time to clear out unused weak references. +- +- RC_TRACE(0x00000400, ("add: previous version length=%d", +- _previous_versions->length())); +- +- // skip the last entry since we just added it +- for (int i = _previous_versions->length() - 2; i >= 0; i--) { +- // check the previous versions array for a GC'ed weak refs +- pv_node = _previous_versions->at(i); +- cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- _previous_versions->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- _previous_versions->remove_at(i); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; +- } else { +- RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs != NULL) { +- RC_TRACE(0x00000400, ("add: previous methods length=%d", +- method_refs->length())); +- for (int j = method_refs->length() - 1; j >= 0; j--) { +- jweak method_ref = method_refs->at(j); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL || emcp_method_count == 0) { +- // This method entry has been GC'ed or the current +- // RedefineClasses() call has made all methods obsolete +- // so remove it. +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(j); +- } else { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00000400, +- ("add: %s(%s): previous method @%d in version @%d is alive", +- method->name()->as_C_string(), method->signature()->as_C_string(), +- j, i)); +- } +- } +- } +- } +- +- int obsolete_method_count = old_methods->length() - emcp_method_count; +- +- if (emcp_method_count != 0 && obsolete_method_count != 0 && +- _previous_versions->length() > 1) { +- // We have a mix of obsolete and EMCP methods. If there is more +- // than the previous version that we just added, then we have to +- // clear out any matching EMCP method entries the hard way. +- int local_count = 0; +- for (int i = 0; i < old_methods->length(); i++) { +- if (!emcp_methods->at(i)) { +- // only obsolete methods are interesting +- methodOop old_method = (methodOop) old_methods->obj_at(i); +- Symbol* m_name = old_method->name(); +- Symbol* m_signature = old_method->signature(); +- +- // skip the last entry since we just added it +- for (int j = _previous_versions->length() - 2; j >= 0; j--) { +- // check the previous versions array for a GC'ed weak refs +- pv_node = _previous_versions->at(j); +- cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- delete pv_node; +- _previous_versions->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // this entry has been GC'ed so remove it +- delete pv_node; +- _previous_versions->remove_at(j); +- // Since we are traversing the array backwards, we don't have to +- // do anything special with the index. +- continue; +- } +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs == NULL) { +- // We have run into a PreviousVersion generation where +- // all methods were made obsolete during that generation's +- // RedefineClasses() operation. At the time of that +- // operation, all EMCP methods were flushed so we don't +- // have to go back any further. +- // +- // A NULL method_refs is different than an empty method_refs. +- // We cannot infer any optimizations about older generations +- // from an empty method_refs for the current generation. +- break; +- } +- +- for (int k = method_refs->length() - 1; k >= 0; k--) { +- jweak method_ref = method_refs->at(k); +- assert(method_ref != NULL, +- "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- method_refs->remove_at(k); +- // Since we are traversing the array backwards, we don't +- // have to do anything special with the index. +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this method entry has been GC'ed so skip it +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(k); +- continue; +- } +- +- if (method->name() == m_name && +- method->signature() == m_signature) { +- // The current RedefineClasses() call has made all EMCP +- // versions of this method obsolete so mark it as obsolete +- // and remove the weak ref. +- RC_TRACE(0x00000400, +- ("add: %s(%s): flush obsolete method @%d in version @%d", +- m_name->as_C_string(), m_signature->as_C_string(), k, j)); +- +- method->set_is_obsolete(); +- JNIHandles::destroy_weak_global(method_ref); +- method_refs->remove_at(k); +- break; +- } +- } +- +- // The previous loop may not find a matching EMCP method, but +- // that doesn't mean that we can optimize and not go any +- // further back in the PreviousVersion generations. The EMCP +- // method for this generation could have already been GC'ed, +- // but there still may be an older EMCP method that has not +- // been GC'ed. +- } +- +- if (++local_count >= obsolete_method_count) { +- // no more obsolete methods so bail out now +- break; +- } +- } +- } +- } +-} // end add_previous_version() +- +- + // Determine if instanceKlass has a previous version. + bool instanceKlass::has_previous_version() const { +- if (_previous_versions == NULL) { +- // no previous versions array so answer is easy +- return false; +- } +- +- for (int i = _previous_versions->length() - 1; i >= 0; i--) { +- // Check the previous versions array for an info node that hasn't +- // been GC'ed +- PreviousVersionNode * pv_node = _previous_versions->at(i); +- +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); +- if (cp_ref == NULL) { +- continue; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp != NULL) { +- // we have at least one previous version +- return true; +- } +- +- // We don't have to check the method refs. If the constant pool has +- // been GC'ed then so have the methods. +- } +- +- // all of the underlying nodes' info has been GC'ed +- return false; +-} // end has_previous_version() ++ return _old_version != NULL; ++} + + methodOop instanceKlass::method_with_idnum(int idnum) { + methodOop m = NULL; +@@ -2854,153 +2633,3 @@ void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, obj + } // if no array and idnum isn't included there is nothing to do + } + +-// Construct a PreviousVersionNode entry for the array hung off +-// the instanceKlass. +-PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, +- bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { +- +- _prev_constant_pool = prev_constant_pool; +- _prev_cp_is_weak = prev_cp_is_weak; +- _prev_EMCP_methods = prev_EMCP_methods; +-} +- +- +-// Destroy a PreviousVersionNode +-PreviousVersionNode::~PreviousVersionNode() { +- if (_prev_constant_pool != NULL) { +- if (_prev_cp_is_weak) { +- JNIHandles::destroy_weak_global(_prev_constant_pool); +- } else { +- JNIHandles::destroy_global(_prev_constant_pool); +- } +- } +- +- if (_prev_EMCP_methods != NULL) { +- for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { +- jweak method_ref = _prev_EMCP_methods->at(i); +- if (method_ref != NULL) { +- JNIHandles::destroy_weak_global(method_ref); +- } +- } +- delete _prev_EMCP_methods; +- } +-} +- +- +-// Construct a PreviousVersionInfo entry +-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { +- _prev_constant_pool_handle = constantPoolHandle(); // NULL handle +- _prev_EMCP_method_handles = NULL; +- +- jobject cp_ref = pv_node->prev_constant_pool(); +- assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); +- if (cp_ref == NULL) { +- return; // robustness +- } +- +- constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); +- if (cp == NULL) { +- // Weak reference has been GC'ed. Since the constant pool has been +- // GC'ed, the methods have also been GC'ed. +- return; +- } +- +- // make the constantPoolOop safe to return +- _prev_constant_pool_handle = constantPoolHandle(cp); +- +- GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); +- if (method_refs == NULL) { +- // the instanceKlass did not have any EMCP methods +- return; +- } +- +- _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); +- +- int n_methods = method_refs->length(); +- for (int i = 0; i < n_methods; i++) { +- jweak method_ref = method_refs->at(i); +- assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); +- if (method_ref == NULL) { +- continue; // robustness +- } +- +- methodOop method = (methodOop)JNIHandles::resolve(method_ref); +- if (method == NULL) { +- // this entry has been GC'ed so skip it +- continue; +- } +- +- // make the methodOop safe to return +- _prev_EMCP_method_handles->append(methodHandle(method)); +- } +-} +- +- +-// Destroy a PreviousVersionInfo +-PreviousVersionInfo::~PreviousVersionInfo() { +- // Since _prev_EMCP_method_handles is not C-heap allocated, we +- // don't have to delete it. +-} +- +- +-// Construct a helper for walking the previous versions array +-PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { +- _previous_versions = ik->previous_versions(); +- _current_index = 0; +- // _hm needs no initialization +- _current_p = NULL; +-} +- +- +-// Destroy a PreviousVersionWalker +-PreviousVersionWalker::~PreviousVersionWalker() { +- // Delete the current info just in case the caller didn't walk to +- // the end of the previous versions list. No harm if _current_p is +- // already NULL. +- delete _current_p; +- +- // When _hm is destroyed, all the Handles returned in +- // PreviousVersionInfo objects will be destroyed. +- // Also, after this destructor is finished it will be +- // safe to delete the GrowableArray allocated in the +- // PreviousVersionInfo objects. +-} +- +- +-// Return the interesting information for the next previous version +-// of the klass. Returns NULL if there are no more previous versions. +-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { +- if (_previous_versions == NULL) { +- // no previous versions so nothing to return +- return NULL; +- } +- +- delete _current_p; // cleanup the previous info for the caller +- _current_p = NULL; // reset to NULL so we don't delete same object twice +- +- int length = _previous_versions->length(); +- +- while (_current_index < length) { +- PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); +- PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass) +- PreviousVersionInfo(pv_node); +- +- constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); +- if (cp_h.is_null()) { +- delete pv_info; +- +- // The underlying node's info has been GC'ed so try the next one. +- // We don't have to check the methods. If the constant pool has +- // GC'ed then so have the methods. +- continue; +- } +- +- // Found a node with non GC'ed info so return it. The caller will +- // need to delete pv_info when they are done with it. +- _current_p = pv_info; +- return pv_info; +- } +- +- // all of the underlying nodes' info has been GC'ed +- return NULL; +-} // end next_previous_version() +diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp +index 8a849cb..53163b6 100644 +--- a/src/share/vm/oops/instanceKlass.hpp ++++ b/src/share/vm/oops/instanceKlass.hpp +@@ -271,9 +271,6 @@ class instanceKlass: public Klass { + nmethodBucket* _dependencies; // list of dependent nmethods + nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class + BreakpointInfo* _breakpoints; // bpt lists, managed by methodOop +- // Array of interesting part(s) of the previous version(s) of this +- // instanceKlass. See PreviousVersionWalker below. +- GrowableArray<PreviousVersionNode *>* _previous_versions; + // JVMTI fields can be moved to their own structure - see 6315920 + unsigned char * _cached_class_file_bytes; // JVMTI: cached class file, before retransformable agent modified it in CFLH + jint _cached_class_file_len; // JVMTI: length of above +@@ -571,20 +568,11 @@ class instanceKlass: public Klass { + _nonstatic_oop_map_size = words; + } + +- // RedefineClasses() support for previous versions: +- void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods, +- int emcp_method_count); + // If the _previous_versions array is non-NULL, then this klass + // has been redefined at least once even if we aren't currently + // tracking a previous version. +- bool has_been_redefined() const { return _previous_versions != NULL; } ++ bool has_been_redefined() const { return _old_version != NULL; } + bool has_previous_version() const; +- void init_previous_versions() { +- _previous_versions = NULL; +- } +- GrowableArray<PreviousVersionNode *>* previous_versions() const { +- return _previous_versions; +- } + + // JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation + void set_cached_class_file(unsigned char *class_file_bytes, +@@ -629,6 +617,7 @@ class instanceKlass: public Klass { + static void get_jmethod_id_length_value(jmethodID* cache, size_t idnum, + size_t *length_p, jmethodID* id_p); + jmethodID jmethod_id_or_null(methodOop method); ++ bool update_jmethod_id(methodOop method, jmethodID newMethodID); + + // cached itable index support + void set_cached_itable_index(size_t idnum, int index); +@@ -711,6 +700,7 @@ class instanceKlass: public Klass { + + // subclass/subinterface checks + bool implements_interface(klassOop k) const; ++ bool implements_interface_any_version(klassOop k) const; + + // Access to the implementor of an interface. + klassOop implementor() const +@@ -760,6 +750,9 @@ class instanceKlass: public Klass { + void do_local_static_fields(FieldClosure* cl); + void do_nonstatic_fields(FieldClosure* cl); // including inherited fields + void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS); ++ void store_update_information(GrowableArray<int> &values); ++ void clear_update_information(); ++ + + void methods_do(void f(methodOop method)); + void array_klasses_do(void f(klassOop k)); +@@ -895,7 +888,6 @@ class instanceKlass: public Klass { + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + #endif // !SERIALGC + +-private: + // initialization state + #ifdef ASSERT + void set_init_state(ClassState state); +@@ -1057,106 +1049,6 @@ class JNIid: public CHeapObj<mtClass> { + void verify(klassOop holder); + }; + +- +-// If breakpoints are more numerous than just JVMTI breakpoints, +-// consider compressing this data structure. +-// It is currently a simple linked list defined in methodOop.hpp. +- +-class BreakpointInfo; +- +- +-// A collection point for interesting information about the previous +-// version(s) of an instanceKlass. This class uses weak references to +-// the information so that the information may be collected as needed +-// by the system. If the information is shared, then a regular +-// reference must be used because a weak reference would be seen as +-// collectible. A GrowableArray of PreviousVersionNodes is attached +-// to the instanceKlass as needed. See PreviousVersionWalker below. +-class PreviousVersionNode : public CHeapObj<mtClass> { +- private: +- // A shared ConstantPool is never collected so we'll always have +- // a reference to it so we can update items in the cache. We'll +- // have a weak reference to a non-shared ConstantPool until all +- // of the methods (EMCP or obsolete) have been collected; the +- // non-shared ConstantPool becomes collectible at that point. +- jobject _prev_constant_pool; // regular or weak reference +- bool _prev_cp_is_weak; // true if not a shared ConstantPool +- +- // If the previous version of the instanceKlass doesn't have any +- // EMCP methods, then _prev_EMCP_methods will be NULL. If all the +- // EMCP methods have been collected, then _prev_EMCP_methods can +- // have a length of zero. +- GrowableArray<jweak>* _prev_EMCP_methods; +- +-public: +- PreviousVersionNode(jobject prev_constant_pool, bool prev_cp_is_weak, +- GrowableArray<jweak>* prev_EMCP_methods); +- ~PreviousVersionNode(); +- jobject prev_constant_pool() const { +- return _prev_constant_pool; +- } +- GrowableArray<jweak>* prev_EMCP_methods() const { +- return _prev_EMCP_methods; +- } +-}; +- +- +-// A Handle-ized version of PreviousVersionNode. +-class PreviousVersionInfo : public ResourceObj { +- private: +- constantPoolHandle _prev_constant_pool_handle; +- // If the previous version of the instanceKlass doesn't have any +- // EMCP methods, then _prev_EMCP_methods will be NULL. Since the +- // methods cannot be collected while we hold a handle, +- // _prev_EMCP_methods should never have a length of zero. +- GrowableArray<methodHandle>* _prev_EMCP_method_handles; +- +-public: +- PreviousVersionInfo(PreviousVersionNode *pv_node); +- ~PreviousVersionInfo(); +- constantPoolHandle prev_constant_pool_handle() const { +- return _prev_constant_pool_handle; +- } +- GrowableArray<methodHandle>* prev_EMCP_method_handles() const { +- return _prev_EMCP_method_handles; +- } +-}; +- +- +-// Helper object for walking previous versions. This helper cleans up +-// the Handles that it allocates when the helper object is destroyed. +-// The PreviousVersionInfo object returned by next_previous_version() +-// is only valid until a subsequent call to next_previous_version() or +-// the helper object is destroyed. +-class PreviousVersionWalker : public StackObj { +- private: +- GrowableArray<PreviousVersionNode *>* _previous_versions; +- int _current_index; +- // Fields for cleaning up when we are done walking the previous versions: +- // A HandleMark for the PreviousVersionInfo handles: +- HandleMark _hm; +- +- // It would be nice to have a ResourceMark field in this helper also, +- // but the ResourceMark code says to be careful to delete handles held +- // in GrowableArrays _before_ deleting the GrowableArray. Since we +- // can't guarantee the order in which the fields are destroyed, we +- // have to let the creator of the PreviousVersionWalker object do +- // the right thing. Also, adding a ResourceMark here causes an +- // include loop. +- +- // A pointer to the current info object so we can handle the deletes. +- PreviousVersionInfo * _current_p; +- +- public: +- PreviousVersionWalker(instanceKlass *ik); +- ~PreviousVersionWalker(); +- +- // Return the interesting information for the next previous version +- // of the klass. Returns NULL if there are no more previous versions. +- PreviousVersionInfo* next_previous_version(); +-}; +- +- + // + // nmethodBucket is used to record dependent nmethods for + // deoptimization. nmethod dependencies are actually <klass, method> +diff --git a/src/share/vm/oops/instanceKlassKlass.cpp b/src/share/vm/oops/instanceKlassKlass.cpp +index 8e7dc12..5b9b266 100644 +--- a/src/share/vm/oops/instanceKlassKlass.cpp ++++ b/src/share/vm/oops/instanceKlassKlass.cpp +@@ -358,7 +358,7 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it + unsigned nonstatic_oop_map_count, + AccessFlags access_flags, + ReferenceType rt, +- KlassHandle host_klass, TRAPS) { ++ KlassHandle host_klass, KlassHandle old_klass, TRAPS) { + + const int nonstatic_oop_map_size = + instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count); +@@ -435,7 +435,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it + ik->set_jni_ids(NULL); + ik->set_osr_nmethods_head(NULL); + ik->set_breakpoints(NULL); +- ik->init_previous_versions(); + ik->set_generic_signature(NULL); + ik->release_set_methods_jmethod_ids(NULL); + ik->release_set_methods_cached_itable_indices(NULL); +@@ -480,6 +479,28 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { + instanceKlass* ik = instanceKlass::cast(klassOop(obj)); + klassKlass::oop_print_on(obj, st); + ++ // (tw) Output revision number and revision numbers of older / newer and oldest / newest version of this class. ++ ++ st->print(BULLET"revision: %d", ik->revision_number()); ++ ++ if (ik->new_version() != NULL) { ++ st->print(" (newer=%d)", ik->new_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->newest_version() != ik->new_version() && ik->newest_version() != obj) { ++ st->print(" (newest=%d)", ik->newest_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->old_version() != NULL) { ++ st->print(" (old=%d)", ik->old_version()->klass_part()->revision_number()); ++ } ++ ++ if (ik->oldest_version() != ik->old_version() && ik->oldest_version() != obj) { ++ st->print(" (oldest=%d)", ik->oldest_version()->klass_part()->revision_number()); ++ } ++ ++ st->cr(); ++ + st->print(BULLET"instance size: %d", ik->size_helper()); st->cr(); + st->print(BULLET"klass size: %d", ik->object_size()); st->cr(); + st->print(BULLET"access: "); ik->access_flags().print_on(st); st->cr(); +@@ -537,26 +558,6 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) { + st->cr(); + } + +- { +- ResourceMark rm; +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- bool have_pv = false; +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- if (!have_pv) +- st->print(BULLET"previous version: "); +- have_pv = true; +- pv_info->prev_constant_pool_handle()()->print_value_on(st); +- } +- if (have_pv) st->cr(); +- } // pvw is cleaned up +- } // rm is cleaned up +- + if (ik->generic_signature() != NULL) { + st->print(BULLET"generic signature: "); + ik->generic_signature()->print_value_on(st); +@@ -663,7 +664,7 @@ void instanceKlassKlass::oop_verify_on(oop obj, outputStream* st) { + } + guarantee(sib->as_klassOop()->is_klass(), "should be klass"); + guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); +- guarantee(sib->super() == super, "siblings should have same superklass"); ++ guarantee(sib->super() == super || super->klass_part()->newest_version() == SystemDictionary::Object_klass(), "siblings should have same superklass"); + sib = sib->next_sibling(); + } + +diff --git a/src/share/vm/oops/instanceKlassKlass.hpp b/src/share/vm/oops/instanceKlassKlass.hpp +index df674a9..45d0b66 100644 +--- a/src/share/vm/oops/instanceKlassKlass.hpp ++++ b/src/share/vm/oops/instanceKlassKlass.hpp +@@ -50,6 +50,7 @@ class instanceKlassKlass : public klassKlass { + AccessFlags access_flags, + ReferenceType rt, + KlassHandle host_klass, ++ KlassHandle old_klass, + TRAPS); + + // Casting from klassOop +diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp +index e0dd7d7..a7eec08 100644 +--- a/src/share/vm/oops/instanceMirrorKlass.cpp ++++ b/src/share/vm/oops/instanceMirrorKlass.cpp +@@ -156,6 +156,13 @@ void instanceMirrorKlass::oop_follow_contents(oop obj) { + assert_is_in_closed_subset) + } + ++void instanceMirrorKlass::oop_fields_iterate(oop obj, OopClosure* blk) { ++ InstanceMirrorKlass_OOP_ITERATE( \ ++ start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ ++ blk->do_oop(p), \ ++ assert_is_in_closed_subset) ++} ++ + #ifndef SERIALGC + void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm, + oop obj) { +diff --git a/src/share/vm/oops/instanceMirrorKlass.hpp b/src/share/vm/oops/instanceMirrorKlass.hpp +index 2b8b2f4..31969c7 100644 +--- a/src/share/vm/oops/instanceMirrorKlass.hpp ++++ b/src/share/vm/oops/instanceMirrorKlass.hpp +@@ -79,6 +79,9 @@ class instanceMirrorKlass: public instanceKlass { + DEFINE_ALLOCATE_PERMANENT(instanceMirrorKlass); + instanceOop allocate_instance(KlassHandle k, TRAPS); + ++ // Class redefinition, iterate static fields ++ static void oop_fields_iterate(oop obj, OopClosure* blk); ++ + // Garbage collection + int oop_adjust_pointers(oop obj); + void oop_follow_contents(oop obj); +diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp +index 7db4f03..1171487 100644 +--- a/src/share/vm/oops/instanceRefKlass.cpp ++++ b/src/share/vm/oops/instanceRefKlass.cpp +@@ -455,10 +455,13 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) { + instanceKlass* ik = instanceKlass::cast(k); + + // Check that we have the right class +- debug_only(static bool first_time = true); +- assert(k == SystemDictionary::Reference_klass() && first_time, +- "Invalid update of maps"); +- debug_only(first_time = false); ++ ++ // (tw) Asserts no longer valid for class redefinition ++ // debug_only(static bool first_time = true); ++ ++ //assert(k == SystemDictionary::Reference_klass() && first_time, ++ // "Invalid update of maps"); ++ //debug_only(first_time = false); + assert(ik->nonstatic_oop_map_count() == 1, "just checking"); + + OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); +diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp +index 596d5ad..a928777 100644 +--- a/src/share/vm/oops/klass.cpp ++++ b/src/share/vm/oops/klass.cpp +@@ -161,6 +161,13 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size, + kl->set_alloc_size(0); + TRACE_INIT_ID(kl); + ++ kl->set_redefinition_flags(Klass::NoRedefinition); ++ kl->set_redefining(false); ++ kl->set_new_version(NULL); ++ kl->set_old_version(NULL); ++ kl->set_redefinition_index(-1); ++ kl->set_revision_number(-1); ++ + kl->set_prototype_header(markOopDesc::prototype()); + kl->set_biased_lock_revocation_count(0); + kl->set_last_biased_lock_bulk_revocation_time(0); +@@ -232,7 +239,7 @@ void Klass::initialize_supers(klassOop k, TRAPS) { + set_super(NULL); + oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop()); + assert(super_depth() == 0, "Object must already be initialized properly"); +- } else if (k != super() || k == SystemDictionary::Object_klass()) { ++ } else if (k != super() || k->klass_part()->super() == NULL) { + assert(super() == NULL || super() == SystemDictionary::Object_klass(), + "initialize this only once to a non-trivial value"); + set_super(k); +@@ -385,7 +392,7 @@ void Klass::append_to_sibling_list() { + void Klass::remove_from_sibling_list() { + // remove receiver from sibling list + instanceKlass* super = superklass(); +- assert(super != NULL || as_klassOop() == SystemDictionary::Object_klass(), "should have super"); ++ assert(super != NULL || as_klassOop()->klass_part()->newest_version() == SystemDictionary::Object_klass()->klass_part()->newest_version(), "should have super"); + if (super == NULL) return; // special case: class Object + if (super->subklass() == this) { + // first subklass +diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp +index bcbd4e7..bf242d9 100644 +--- a/src/share/vm/oops/klass.hpp ++++ b/src/share/vm/oops/klass.hpp +@@ -170,6 +170,7 @@ class Klass_vtbl { + void* operator new(size_t ignored, KlassHandle& klass, int size, TRAPS); + }; + ++template<class L, class R> class Pair; + + class Klass : public Klass_vtbl { + friend class VMStructs; +@@ -222,6 +223,31 @@ class Klass : public Klass_vtbl { + oop* oop_block_beg() const { return adr_secondary_super_cache(); } + oop* oop_block_end() const { return adr_next_sibling() + 1; } + ++ // (tw) Different class redefinition flags of code evolution. ++ enum RedefinitionFlags { ++ ++ // This class is not redefined at all! ++ NoRedefinition, ++ ++ // There are changes to the class meta data. ++ ModifyClass = 1, ++ ++ // The size of the class meta data changes. ++ ModifyClassSize = ModifyClass << 1, ++ ++ // There are change to the instance format. ++ ModifyInstances = ModifyClassSize << 1, ++ ++ // The size of instances changes. ++ ModifyInstanceSize = ModifyInstances << 1, ++ ++ // A super type of this class is removed. ++ RemoveSuperType = ModifyInstanceSize << 1, ++ ++ // This class has been marked as an affected class. ++ MarkedAsAffected = RemoveSuperType << 1 ++ }; ++ + protected: + // + // The oop block. All oop fields must be declared here and only oop fields +@@ -241,6 +267,10 @@ class Klass : public Klass_vtbl { + oop _java_mirror; + // Superclass + klassOop _super; ++ // Old class ++ klassOop _old_version; ++ // New class ++ klassOop _new_version; + // First subclass (NULL if none); _subklass->next_sibling() is next one + klassOop _subklass; + // Sibling link (or NULL); links all subklasses of a klass +@@ -253,6 +283,16 @@ class Klass : public Klass_vtbl { + jint _modifier_flags; // Processed access flags, for use by Class.getModifiers. + AccessFlags _access_flags; // Access flags. The class/interface distinction is stored here. + ++ // (tw) Non-oop fields for enhanced class redefinition ++ jint _revision_number; // The revision number for redefined classes ++ jint _redefinition_index; // Index of this class when performing the redefinition ++ bool _subtype_changed; ++ int _redefinition_flags; // Level of class redefinition ++ bool _is_copying_backwards; // Does the class need to copy fields backwards? => possibly overwrite itself? ++ bool _original_field_offsets_changed; // Did the original field offsets of this class change during class redefinition? ++ int * _update_information; // Update information ++ bool _is_redefining; ++ + #ifndef PRODUCT + int _verify_count; // to avoid redundant verifies + #endif +@@ -301,6 +341,75 @@ class Klass : public Klass_vtbl { + klassOop secondary_super_cache() const { return _secondary_super_cache; } + void set_secondary_super_cache(klassOop k) { oop_store_without_check((oop*) &_secondary_super_cache, (oop) k); } + ++ // BEGIN class redefinition utilities ++ ++ // double links between new and old version of a class ++ klassOop old_version() const { return _old_version; } ++ void set_old_version(klassOop klass) { assert(_old_version == NULL || klass == NULL, "Can only be set once!"); _old_version = klass; } ++ klassOop new_version() const { return _new_version; } ++ void set_new_version(klassOop klass) { assert(_new_version == NULL || klass == NULL, "Can only be set once!"); _new_version = klass; } ++ ++ // A subtype of this class is no longer a subtype ++ bool has_subtype_changed() const { return _subtype_changed; } ++ void set_subtype_changed(bool b) { assert(is_newest_version() || new_version()->klass_part()->is_newest_version(), "must be newest or second newest version"); ++ _subtype_changed = b; } ++ // state of being redefined ++ int redefinition_index() const { return _redefinition_index; } ++ void set_redefinition_index(int index) { _redefinition_index = index; } ++ void set_redefining(bool b) { _is_redefining = b; } ++ bool is_redefining() const { return _is_redefining; } ++ int redefinition_flags() const { return _redefinition_flags; } ++ bool check_redefinition_flag(int flags) const { return (_redefinition_flags & flags) != 0; } ++ void set_redefinition_flags(int flags) { _redefinition_flags = flags; } ++ void set_redefinition_flag(int flag) { _redefinition_flags |= flag; } ++ void clear_redefinition_flag(int flag) { _redefinition_flags &= ~flag; } ++ bool is_copying_backwards() const { return _is_copying_backwards; } ++ void set_copying_backwards(bool b) { _is_copying_backwards = b; } ++ ++ // update information ++ int *update_information() const { return _update_information; } ++ void set_update_information(int *info) { _update_information = info; } ++ ++ bool is_same_or_older_version(klassOop klass) const { ++ if (Klass::cast(klass) == this) { return true; } ++ else if (_old_version == NULL) { return false; } ++ else { return _old_version->klass_part()->is_same_or_older_version(klass); } ++ } ++ ++ // Revision number for redefined classes, -1 for originally loaded classes ++ jint revision_number() const { ++ return _revision_number; ++ } ++ ++ bool was_redefined() const { ++ return _revision_number != -1; ++ } ++ ++ void set_revision_number(jint number) { ++ _revision_number = number; ++ } ++ ++ klassOop oldest_version() const { ++ if (_old_version == NULL) { return this->as_klassOop(); } ++ else { return _old_version->klass_part()->oldest_version(); }; ++ } ++ ++ klassOop newest_version() const { ++ if (_new_version == NULL) { return this->as_klassOop(); } ++ else { return _new_version->klass_part()->newest_version(); }; ++ } ++ ++ klassOop active_version() const { ++ if (_new_version == NULL || _new_version->klass_part()->is_redefining()) { return this->as_klassOop(); assert(!this->is_redefining(), "just checking"); } ++ else { return _new_version->klass_part()->active_version(); }; ++ } ++ ++ bool is_newest_version() const { ++ return _new_version == NULL; ++ } ++ ++ // END class redefinition utilities ++ + objArrayOop secondary_supers() const { return _secondary_supers; } + void set_secondary_supers(objArrayOop k) { oop_store_without_check((oop*) &_secondary_supers, (oop) k); } + +@@ -361,6 +470,8 @@ class Klass : public Klass_vtbl { + void set_next_sibling(klassOop s); + + oop* adr_super() const { return (oop*)&_super; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } + oop* adr_primary_supers() const { return (oop*)&_primary_supers[0]; } + oop* adr_secondary_super_cache() const { return (oop*)&_secondary_super_cache; } + oop* adr_secondary_supers()const { return (oop*)&_secondary_supers; } +diff --git a/src/share/vm/oops/klassKlass.cpp b/src/share/vm/oops/klassKlass.cpp +index 06809d5..1050eda 100644 +--- a/src/share/vm/oops/klassKlass.cpp ++++ b/src/share/vm/oops/klassKlass.cpp +@@ -68,6 +68,8 @@ void klassKlass::oop_follow_contents(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + MarkSweep::mark_and_push(k->adr_super()); ++ MarkSweep::mark_and_push(k->adr_old_version()); ++ MarkSweep::mark_and_push(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::mark_and_push(k->adr_primary_supers()+i); + MarkSweep::mark_and_push(k->adr_secondary_super_cache()); +@@ -87,6 +89,8 @@ void klassKlass::oop_follow_contents(ParCompactionManager* cm, + Klass* k = Klass::cast(klassOop(obj)); + // If we are alive it is valid to keep our superclass and subtype caches alive + PSParallelCompact::mark_and_push(cm, k->adr_super()); ++ PSParallelCompact::mark_and_push(cm, k->adr_old_version()); ++ PSParallelCompact::mark_and_push(cm, k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + PSParallelCompact::mark_and_push(cm, k->adr_primary_supers()+i); + PSParallelCompact::mark_and_push(cm, k->adr_secondary_super_cache()); +@@ -106,6 +110,8 @@ int klassKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + int size = oop_size(obj); + Klass* k = Klass::cast(klassOop(obj)); + blk->do_oop(k->adr_super()); ++ blk->do_oop(k->adr_old_version()); ++ blk->do_oop(k->adr_new_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + blk->do_oop(k->adr_primary_supers()+i); + blk->do_oop(k->adr_secondary_super_cache()); +@@ -134,6 +140,10 @@ int klassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = k->adr_super(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = k->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + for (juint i = 0; i < Klass::primary_super_limit(); i++) { + adr = k->adr_primary_supers()+i; + if (mr.contains(adr)) blk->do_oop(adr); +@@ -167,6 +177,8 @@ int klassKlass::oop_adjust_pointers(oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + + MarkSweep::adjust_pointer(k->adr_super()); ++ MarkSweep::adjust_pointer(k->adr_new_version()); ++ MarkSweep::adjust_pointer(k->adr_old_version()); + for (juint i = 0; i < Klass::primary_super_limit(); i++) + MarkSweep::adjust_pointer(k->adr_primary_supers()+i); + MarkSweep::adjust_pointer(k->adr_secondary_super_cache()); +diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp +index 94e2e04..a683a4b 100644 +--- a/src/share/vm/oops/klassVtable.cpp ++++ b/src/share/vm/oops/klassVtable.cpp +@@ -628,17 +628,13 @@ void klassVtable::adjust_method_entries(methodOop* old_methods, methodOop* new_m + if (unchecked_method_at(index) == old_method) { + put_method_at(new_method, index); + +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { ++ IF_TRACE_RC4 { + if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); ++ TRACE_RC4("adjust: name=%s", Klass::cast(old_method->method_holder())->external_name()); + *trace_name_printed = true; + } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00100000, ("vtable method update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); ++ TRACE_RC4("vtable method update: %s(%s)", new_method->name()->as_C_string(), ++ new_method->signature()->as_C_string()); + } + // cannot 'break' here; see for-loop comment above. + } +@@ -1008,17 +1004,13 @@ void klassItable::adjust_method_entries(methodOop* old_methods, methodOop* new_m + if (ime->method() == old_method) { + ime->initialize(new_method); + +- if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { ++ IF_TRACE_RC4 { + if (!(*trace_name_printed)) { +- // RC_TRACE_MESG macro has an embedded ResourceMark +- RC_TRACE_MESG(("adjust: name=%s", +- Klass::cast(old_method->method_holder())->external_name())); ++ TRACE_RC4("adjust: name=%s", Klass::cast(old_method->method_holder())->external_name()); + *trace_name_printed = true; + } +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00200000, ("itable method update: %s(%s)", +- new_method->name()->as_C_string(), +- new_method->signature()->as_C_string())); ++ TRACE_RC4("itable method update: %s(%s)", new_method->name()->as_C_string(), ++ new_method->signature()->as_C_string()); + } + // cannot 'break' here; see for-loop comment above. + } +@@ -1241,6 +1233,7 @@ void klassVtable::verify(outputStream* st, bool forced) { + + void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { + vtableEntry* vte = &vt->table()[index]; ++ if (vte->method() == NULL || table()[index].method() == NULL) return; + if (vte->method()->name() != table()[index].method()->name() || + vte->method()->signature() != table()[index].method()->signature()) { + fatal("mismatched name/signature of vtable entries"); +@@ -1260,6 +1253,8 @@ void klassVtable::print() { + + void vtableEntry::verify(klassVtable* vt, outputStream* st) { + NOT_PRODUCT(FlagSetting fs(IgnoreLockingAssertions, true)); ++ // (tw) TODO: Check: Does not hold? ++ if (method() != NULL) { + assert(method() != NULL, "must have set method"); + method()->verify(); + // we sub_type, because it could be a miranda method +@@ -1267,7 +1262,13 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + #ifndef PRODUCT + print(); + #endif +- fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); ++ klassOop first_klass = vt->klass()(); ++ klassOop second_klass = method()->method_holder(); ++ // (tw) the following fatal does not work for old versions of classes ++ if (first_klass->klass_part()->is_newest_version()) { ++ //fatal1("vtableEntry %#lx: method is from subclass", this); ++ } ++ } + } + } + +@@ -1275,8 +1276,8 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { + + void vtableEntry::print() { + ResourceMark rm; +- tty->print("vtableEntry %s: ", method()->name()->as_C_string()); + if (Verbose) { ++ tty->print("vtableEntry %s: ", (method() == NULL) ? "null" : method()->name()->as_C_string()); + tty->print("m %#lx ", (address)method()); + } + } +diff --git a/src/share/vm/oops/methodKlass.cpp b/src/share/vm/oops/methodKlass.cpp +index 75d0b09..c4be146 100644 +--- a/src/share/vm/oops/methodKlass.cpp ++++ b/src/share/vm/oops/methodKlass.cpp +@@ -93,6 +93,9 @@ methodOop methodKlass::allocate(constMethodHandle xconst, + m->set_adapter_entry(NULL); + m->clear_code(); // from_c/from_i get set to c2i/i2i + ++ m->set_new_version(NULL); ++ m->set_old_version(NULL); ++ + if (access_flags.is_native()) { + m->clear_native_function(); + m->set_signature_handler(NULL); +@@ -122,6 +125,8 @@ void methodKlass::oop_follow_contents(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::mark_and_push(m->adr_constMethod()); ++ MarkSweep::mark_and_push(m->adr_new_version()); ++ MarkSweep::mark_and_push(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::mark_and_push(m->adr_method_data()); + } +@@ -135,6 +140,8 @@ void methodKlass::oop_follow_contents(ParCompactionManager* cm, + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + PSParallelCompact::mark_and_push(cm, m->adr_constMethod()); ++ PSParallelCompact::mark_and_push(cm, m->adr_new_version()); ++ PSParallelCompact::mark_and_push(cm, m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::mark_and_push(cm, m->adr_method_data()); +@@ -152,6 +159,8 @@ int methodKlass::oop_oop_iterate(oop obj, OopClosure* blk) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves + blk->do_oop(m->adr_constMethod()); ++ blk->do_oop(m->adr_new_version()); ++ blk->do_oop(m->adr_old_version()); + if (m->method_data() != NULL) { + blk->do_oop(m->adr_method_data()); + } +@@ -170,6 +179,10 @@ int methodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + oop* adr; + adr = m->adr_constMethod(); + if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_new_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); ++ adr = m->adr_old_version(); ++ if (mr.contains(adr)) blk->do_oop(adr); + if (m->method_data() != NULL) { + adr = m->adr_method_data(); + if (mr.contains(adr)) blk->do_oop(adr); +@@ -187,6 +200,8 @@ int methodKlass::oop_adjust_pointers(oop obj) { + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::methodKlassObj never moves. + MarkSweep::adjust_pointer(m->adr_constMethod()); ++ MarkSweep::adjust_pointer(m->adr_new_version()); ++ MarkSweep::adjust_pointer(m->adr_old_version()); + if (m->method_data() != NULL) { + MarkSweep::adjust_pointer(m->adr_method_data()); + } +@@ -202,6 +217,8 @@ int methodKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { + assert(obj->is_method(), "should be method"); + methodOop m = methodOop(obj); + PSParallelCompact::adjust_pointer(m->adr_constMethod()); ++ PSParallelCompact::adjust_pointer(m->adr_new_version()); ++ PSParallelCompact::adjust_pointer(m->adr_old_version()); + #ifdef COMPILER2 + if (m->method_data() != NULL) { + PSParallelCompact::adjust_pointer(m->adr_method_data()); +@@ -222,7 +239,18 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) { + methodOop m = methodOop(obj); + // get the effect of PrintOopAddress, always, for methods: + st->print_cr(" - this oop: "INTPTR_FORMAT, (intptr_t)m); +- st->print (" - method holder: "); m->method_holder()->print_value_on(st); st->cr(); ++ st->print (" - method holder: "); m->method_holder()->print_value_on(st); ++ ++ if (m->method_holder()->klass_part()->new_version() != NULL) { ++ st->print(" (old)"); ++ } ++ st->cr(); ++ ++ st->print_cr(" - is obsolete: %d", (int)(m->is_obsolete())); ++ st->print_cr(" - is old: %d", (int)(m->is_old())); ++ st->print_cr(" - new version: "INTPTR_FORMAT, (address)m->new_version()); ++ st->print_cr(" - old version: "INTPTR_FORMAT, (address)m->old_version()); ++ st->print_cr(" - holder revision: %d", m->method_holder()->klass_part()->revision_number()); + st->print (" - constants: "INTPTR_FORMAT" ", (address)m->constants()); + m->constants()->print_value_on(st); st->cr(); + st->print (" - access: 0x%x ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr(); +diff --git a/src/share/vm/oops/methodOop.cpp b/src/share/vm/oops/methodOop.cpp +index 4f59d3a..32cb4cf 100644 +--- a/src/share/vm/oops/methodOop.cpp ++++ b/src/share/vm/oops/methodOop.cpp +@@ -1061,6 +1061,8 @@ methodHandle methodOopDesc::clone_with_new_data(methodHandle m, u_char* new_code + + // Reset correct method/const method, method size, and parameter info + newm->set_constMethod(newcm); ++ newm->set_new_version(newm->new_version()); ++ newm->set_old_version(newm->old_version()); + newm->constMethod()->set_code_size(new_code_length); + newm->constMethod()->set_constMethod_size(new_const_method_size); + newm->set_method_size(new_method_size); +diff --git a/src/share/vm/oops/methodOop.hpp b/src/share/vm/oops/methodOop.hpp +index 486e106..e35d5ed 100644 +--- a/src/share/vm/oops/methodOop.hpp ++++ b/src/share/vm/oops/methodOop.hpp +@@ -114,6 +114,10 @@ class methodOopDesc : public oopDesc { + AccessFlags _access_flags; // Access flags + int _vtable_index; // vtable index of this method (see VtableIndexFlag) + // note: can have vtables with >2**16 elements (because of inheritance) ++ // (tw) Newer version of method available? ++ methodOop _new_version; ++ methodOop _old_version; ++ + #ifdef CC_INTERP + int _result_index; // C++ interpreter needs for converting results to/from stack + #endif +@@ -175,6 +179,29 @@ class methodOopDesc : public oopDesc { + int name_index() const { return constMethod()->name_index(); } + void set_name_index(int index) { constMethod()->set_name_index(index); } + ++ methodOop new_version() const {return _new_version; } ++ void set_new_version(methodOop m) { _new_version = m; } ++ methodOop newest_version() { if(_new_version == NULL) return this; else return new_version()->newest_version(); } ++ ++ methodOop old_version() const {return _old_version; }; ++ void set_old_version(methodOop m) { ++ if (m == NULL) { ++ _old_version = NULL; ++ return; ++ } ++ ++ assert(_old_version == NULL, "may only be set once"); ++ assert(this->code_size() == m->code_size(), "must have same code length"); ++ _old_version = m; ++ } ++ ++ methodOop oldest_version() const { ++ if(_old_version == NULL) return (methodOop)this; ++ else { ++ return old_version()->oldest_version(); ++ } ++ } ++ + // signature + Symbol* signature() const { return constants()->symbol_at(signature_index()); } + int signature_index() const { return constMethod()->signature_index(); } +@@ -734,6 +761,8 @@ class methodOopDesc : public oopDesc { + + // Garbage collection support + oop* adr_constMethod() const { return (oop*)&_constMethod; } ++ oop* adr_new_version() const { return (oop*)&_new_version; } ++ oop* adr_old_version() const { return (oop*)&_old_version; } + oop* adr_method_data() const { return (oop*)&_method_data; } + }; + +diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp +index 5982c88..4873fca 100644 +--- a/src/share/vm/oops/oop.hpp ++++ b/src/share/vm/oops/oop.hpp +@@ -95,6 +95,7 @@ class oopDesc { + narrowOop* compressed_klass_addr(); + + void set_klass(klassOop k); ++ void set_klass_no_check(klassOop k); + + // For klass field compression + int klass_gap() const; +@@ -135,6 +136,7 @@ class oopDesc { + bool is_array() const; + bool is_objArray() const; + bool is_klass() const; ++ bool is_instanceKlass() const; + bool is_thread() const; + bool is_method() const; + bool is_constMethod() const; +diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp +index f4eb2f7..0acb346 100644 +--- a/src/share/vm/oops/oop.inline.hpp ++++ b/src/share/vm/oops/oop.inline.hpp +@@ -123,6 +123,14 @@ inline void oopDesc::set_klass(klassOop k) { + } + } + ++inline void oopDesc::set_klass_no_check(klassOop k) { ++ if (UseCompressedOops) { ++ oop_store_without_check(compressed_klass_addr(), (oop)k); ++ } else { ++ oop_store_without_check(klass_addr(), (oop) k); ++ } ++} ++ + inline int oopDesc::klass_gap() const { + return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()); + } +@@ -156,6 +164,7 @@ inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_ + inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); } + inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); } + inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); } ++inline bool oopDesc::is_instanceKlass() const { return blueprint()->oop_is_instanceKlass(); } + inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); } + inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); } + inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); } +diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp +index 2123991..6cbd78c 100644 +--- a/src/share/vm/prims/jni.cpp ++++ b/src/share/vm/prims/jni.cpp +@@ -406,7 +406,7 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR + } + } + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, +- Handle(), &st, true, ++ Handle(), &st, true, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp +index 7dcd968..d59052f 100644 +--- a/src/share/vm/prims/jvm.cpp ++++ b/src/share/vm/prims/jvm.cpp +@@ -872,7 +872,7 @@ static jclass jvm_define_class_common(JNIEnv *env, const char *name, + Handle protection_domain (THREAD, JNIHandles::resolve(pd)); + klassOop k = SystemDictionary::resolve_from_stream(class_name, class_loader, + protection_domain, &st, +- verify != 0, ++ verify != 0, KlassHandle(), + CHECK_NULL); + + if (TraceClassResolution && k != NULL) { +diff --git a/src/share/vm/prims/jvm_misc.hpp b/src/share/vm/prims/jvm_misc.hpp +index 2b46e36..549e949 100644 +--- a/src/share/vm/prims/jvm_misc.hpp ++++ b/src/share/vm/prims/jvm_misc.hpp +@@ -84,6 +84,7 @@ extern "C" { + (JNIEnv *env, jobject obj, jfieldID fieldID); + } + ++// TODO(tw): Check if we need to "unquicken" because of class redefinition. + void quicken_jni_functions(); + address jni_GetBooleanField_addr(); + address jni_GetByteField_addr(); +diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp +index 4ac6b82..30b8e84 100644 +--- a/src/share/vm/prims/jvmtiEnv.cpp ++++ b/src/share/vm/prims/jvmtiEnv.cpp +@@ -290,7 +290,10 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + class_definitions[index].klass = jcls; + } + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_retransform); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RetransformClasses */ + +@@ -299,9 +302,12 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) { + // class_definitions - pre-checked for NULL + jvmtiError + JvmtiEnv::RedefineClasses(jint class_count, const jvmtiClassDefinition* class_definitions) { +-//TODO: add locking ++ + VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); +- VMThread::execute(&op); ++ { ++ MutexLocker sd_mutex(RedefineClasses_lock); ++ VMThread::execute(&op); ++ } + return (op.check_error()); + } /* end RedefineClasses */ + +diff --git a/src/share/vm/prims/jvmtiExport.cpp b/src/share/vm/prims/jvmtiExport.cpp +index ec8ede3..2bd5983 100644 +--- a/src/share/vm/prims/jvmtiExport.cpp ++++ b/src/share/vm/prims/jvmtiExport.cpp +@@ -2296,7 +2296,7 @@ JvmtiDynamicCodeEventCollector::JvmtiDynamicCodeEventCollector() : _code_blobs(N + // iterate over any code blob descriptors collected and post a + // DYNAMIC_CODE_GENERATED event to the profiler. + JvmtiDynamicCodeEventCollector::~JvmtiDynamicCodeEventCollector() { +- assert(!JavaThread::current()->owns_locks(), "all locks must be released to post deferred events"); ++ assert(!JavaThread::current()->owns_locks_but_redefine_classes_lock(), "all locks must be released to post deferred events"); + // iterate over any code blob descriptors that we collected + if (_code_blobs != NULL) { + for (int i=0; i<_code_blobs->length(); i++) { +diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp +index d3fa140..31a8a19 100644 +--- a/src/share/vm/prims/jvmtiImpl.cpp ++++ b/src/share/vm/prims/jvmtiImpl.cpp +@@ -284,60 +284,11 @@ address JvmtiBreakpoint::getBcp() { + } + + void JvmtiBreakpoint::each_method_version_do(method_action meth_act) { +- ((methodOopDesc*)_method->*meth_act)(_bci); +- +- // add/remove breakpoint to/from versions of the method that +- // are EMCP. Directly or transitively obsolete methods are +- // not saved in the PreviousVersionInfo. +- Thread *thread = Thread::current(); +- instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder()); +- Symbol* m_name = _method->name(); +- Symbol* m_signature = _method->signature(); +- +- { +- ResourceMark rm(thread); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // search previous versions if they exist +- PreviousVersionWalker pvw((instanceKlass *)ikh()->klass_part()); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- GrowableArray<methodHandle>* methods = +- pv_info->prev_EMCP_method_handles(); +- +- if (methods == NULL) { +- // We have run into a PreviousVersion generation where +- // all methods were made obsolete during that generation's +- // RedefineClasses() operation. At the time of that +- // operation, all EMCP methods were flushed so we don't +- // have to go back any further. +- // +- // A NULL methods array is different than an empty methods +- // array. We cannot infer any optimizations about older +- // generations from an empty methods array for the current +- // generation. +- break; +- } +- +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method = methods->at(i); +- if (method->name() == m_name && method->signature() == m_signature) { +- RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)", +- meth_act == &methodOopDesc::set_breakpoint ? "sett" : "clear", +- method->name()->as_C_string(), +- method->signature()->as_C_string())); +- assert(!method->is_obsolete(), "only EMCP methods here"); +- +- ((methodOopDesc*)method()->*meth_act)(_bci); +- break; +- } +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up ++ methodOop method = _method; ++ while (method != NULL) { ++ ((methodOopDesc*)method->*meth_act)(_bci); ++ method = method->old_version(); ++ } + } + + void JvmtiBreakpoint::set() { +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.cpp b/src/share/vm/prims/jvmtiRedefineClasses.cpp +index eb52388..432e15a 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -38,490 +38,669 @@ + #include "runtime/deoptimization.hpp" + #include "runtime/relocator.hpp" + #include "utilities/bitMap.inline.hpp" ++#include "prims/jvmtiClassFileReconstituter.hpp" ++#include "compiler/compileBroker.hpp" ++#include "oops/instanceMirrorKlass.hpp" + + + objArrayOop VM_RedefineClasses::_old_methods = NULL; + objArrayOop VM_RedefineClasses::_new_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_old_methods = NULL; +-methodOop* VM_RedefineClasses::_matching_new_methods = NULL; +-methodOop* VM_RedefineClasses::_deleted_methods = NULL; +-methodOop* VM_RedefineClasses::_added_methods = NULL; ++int* VM_RedefineClasses::_matching_old_methods = NULL; ++int* VM_RedefineClasses::_matching_new_methods = NULL; ++int* VM_RedefineClasses::_deleted_methods = NULL; ++int* VM_RedefineClasses::_added_methods = NULL; + int VM_RedefineClasses::_matching_methods_length = 0; + int VM_RedefineClasses::_deleted_methods_length = 0; + int VM_RedefineClasses::_added_methods_length = 0; +-klassOop VM_RedefineClasses::_the_class_oop = NULL; ++GrowableArray<instanceKlassHandle>* VM_RedefineClasses::_affected_klasses = NULL; + + +-VM_RedefineClasses::VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind) { ++// Holds the revision number of the current class redefinition ++int VM_RedefineClasses::_revision_number = -1; ++ ++VM_RedefineClasses::VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind) ++ : VM_GC_Operation(Universe::heap()->total_full_collections(), GCCause::_heap_inspection) { ++ RC_TIMER_START(_timer_total); + _class_count = class_count; + _class_defs = class_defs; + _class_load_kind = class_load_kind; +- _res = JVMTI_ERROR_NONE; ++ _result = JVMTI_ERROR_NONE; + } + +-bool VM_RedefineClasses::doit_prologue() { +- if (_class_count == 0) { +- _res = JVMTI_ERROR_NONE; +- return false; ++VM_RedefineClasses::~VM_RedefineClasses() { ++ unlock_threads(); ++ RC_TIMER_STOP(_timer_total); ++ ++ if (TimeRedefineClasses) { ++ tty->print_cr(""); ++ tty->print_cr("Timing Prologue: %d", _timer_prologue.milliseconds()); ++ tty->print_cr("Timing Class Loading: %d", _timer_class_loading.milliseconds()); ++ tty->print_cr("Timing Waiting for Lock: %d", _timer_wait_for_locks.milliseconds()); ++ tty->print_cr("Timing Class Linking: %d", _timer_class_linking.milliseconds()); ++ tty->print_cr("Timing Prepare Redefinition: %d", _timer_prepare_redefinition.milliseconds()); ++ tty->print_cr("Timing Heap Iteration: %d", _timer_heap_iteration.milliseconds()); ++ tty->print_cr("Timing Redefinition GC: %d", _timer_redefinition.milliseconds()); ++ tty->print_cr("Timing Epilogue: %d", _timer_vm_op_epilogue.milliseconds()); ++ tty->print_cr("------------------------------------------------------------------"); ++ tty->print_cr("Total Time: %d", _timer_total.milliseconds()); ++ tty->print_cr(""); + } +- if (_class_defs == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++} ++ ++void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { ++ typeArrayOop save; ++ ++ save = scratch_class->get_method_annotations_of(i); ++ scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); ++ scratch_class->set_method_annotations_of(j, save); ++ ++ save = scratch_class->get_method_parameter_annotations_of(i); ++ scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); ++ scratch_class->set_method_parameter_annotations_of(j, save); ++ ++ save = scratch_class->get_method_default_annotations_of(i); ++ scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); ++ scratch_class->set_method_default_annotations_of(j, save); ++} ++ ++void VM_RedefineClasses::add_affected_klasses( klassOop klass ) ++{ ++ assert(!_affected_klasses->contains(klass), "must not occur more than once!"); ++ assert(klass->klass_part()->new_version() == NULL, "Only last version is valid entry in system dictionary"); ++ ++ Klass* k = klass->klass_part(); ++ ++ if (k->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ _affected_klasses->append(klass); ++ return; + } +- for (int i = 0; i < _class_count; i++) { +- if (_class_defs[i].klass == NULL) { +- _res = JVMTI_ERROR_INVALID_CLASS; +- return false; +- } +- if (_class_defs[i].class_byte_count == 0) { +- _res = JVMTI_ERROR_INVALID_CLASS_FORMAT; +- return false; ++ ++ for (juint i = 0; i < k->super_depth(); i++) { ++ klassOop primary_oop = k->primary_super_of_depth(i); ++ // super_depth returns "8" for interfaces, but they don't have primaries other than Object. ++ if (primary_oop == NULL) break; ++ Klass* primary = Klass::cast(primary_oop); ++ if (primary->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ TRACE_RC3("Found affected class: %s", k->name()->as_C_string()); ++ k->set_redefinition_flag(Klass::MarkedAsAffected); ++ _affected_klasses->append(klass); ++ return; + } +- if (_class_defs[i].class_bytes == NULL) { +- _res = JVMTI_ERROR_NULL_POINTER; +- return false; ++ } ++ ++ // Check secondary supers ++ int cnt = k->secondary_supers()->length(); ++ for (int i = 0; i < cnt; i++) { ++ Klass* secondary = Klass::cast((klassOop) k->secondary_supers()->obj_at(i)); ++ if (secondary->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ TRACE_RC3("Found affected class: %s", k->name()->as_C_string()); ++ k->set_redefinition_flag(Klass::MarkedAsAffected); ++ _affected_klasses->append(klass); ++ return; + } + } ++} + +- // Start timer after all the sanity checks; not quite accurate, but +- // better than adding a bunch of stop() calls. +- RC_TIMER_START(_timer_vm_op_prologue); + +- // We first load new class versions in the prologue, because somewhere down the +- // call chain it is required that the current thread is a Java thread. +- _res = load_new_class_versions(Thread::current()); +- if (_res != JVMTI_ERROR_NONE) { +- // Free os::malloc allocated memory in load_new_class_version. +- os::free(_scratch_classes); +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return false; ++// Searches for all affected classes and performs a sorting such that a supertype is always before a subtype. ++jvmtiError VM_RedefineClasses::find_sorted_affected_classes() { ++ ++ assert(_affected_klasses, ""); ++ for (int i = 0; i < _class_count; i++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); ++ instanceKlassHandle klass_handle(Thread::current(), java_lang_Class::as_klassOop(mirror)); ++ klass_handle->set_redefinition_flag(Klass::MarkedAsAffected); ++ assert(klass_handle->new_version() == NULL, "Must be new class"); + } + +- RC_TIMER_STOP(_timer_vm_op_prologue); +- return true; ++ // Find classes not directly redefined, but affected by a redefinition (because one of its supertypes is redefined) ++ SystemDictionary::classes_do(VM_RedefineClasses::add_affected_klasses); ++ TRACE_RC1("%d classes affected", _affected_klasses->length()); ++ ++ // Sort the affected klasses such that a supertype is always on a smaller array index than its subtype. ++ jvmtiError result = do_topological_class_sorting(_class_defs, _class_count, Thread::current()); ++ IF_TRACE_RC2 { ++ TRACE_RC2("Redefine order: "); ++ for (int i = 0; i < _affected_klasses->length(); i++) { ++ TRACE_RC2("%s", _affected_klasses->at(i)->name()->as_C_string()); ++ } ++ } ++ ++ return result; + } + +-void VM_RedefineClasses::doit() { +- Thread *thread = Thread::current(); ++// Searches for the class bytes of the given class and returns them as a byte array. ++jvmtiError VM_RedefineClasses::find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed) { + +- if (UseSharedSpaces) { +- // Sharing is enabled so we remap the shared readonly space to +- // shared readwrite, private just in case we need to redefine +- // a shared class. We do the remap during the doit() phase of +- // the safepoint to be safer. +- if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { +- RC_TRACE_WITH_THREAD(0x00000001, thread, +- ("failed to remap shared readonly space to readwrite, private")); +- _res = JVMTI_ERROR_INTERNAL; +- return; ++ *not_changed = false; ++ ++ // Search for the index in the redefinition array that corresponds to the current class ++ int j; ++ for (j=0; j<_class_count; j++) { ++ oop mirror = JNIHandles::resolve_non_null(_class_defs[j].klass); ++ klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); ++ if (the_class_oop == the_class()) { ++ break; + } + } + +- for (int i = 0; i < _class_count; i++) { +- redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread); +- } +- // Disable any dependent concurrent compilations +- SystemDictionary::notice_modification(); ++ if (j == _class_count) { + +- // Set flag indicating that some invariants are no longer true. +- // See jvmtiExport.hpp for detailed explanation. +- JvmtiExport::set_has_redefined_a_class(); ++ *not_changed = true; + +-// check_class() is optionally called for product bits, but is +-// always called for non-product bits. +-#ifdef PRODUCT +- if (RC_TRACE_ENABLED(0x00004000)) { +-#endif +- RC_TRACE_WITH_THREAD(0x00004000, thread, ("calling check_class")); +- SystemDictionary::classes_do(check_class, thread); +-#ifdef PRODUCT ++ // Redefine with same bytecodes. This is a class that is only indirectly affected by redefinition, ++ // so the user did not specify a different bytecode for that class. ++ ++ if (the_class->get_cached_class_file_bytes() == NULL) { ++ // not cached, we need to reconstitute the class file from VM representation ++ constantPoolHandle constants(Thread::current(), the_class->constants()); ++ ObjectLocker ol(constants, Thread::current()); // lock constant pool while we query it ++ ++ JvmtiClassFileReconstituter reconstituter(the_class); ++ if (reconstituter.get_error() != JVMTI_ERROR_NONE) { ++ return reconstituter.get_error(); ++ } ++ ++ *class_byte_count = (jint)reconstituter.class_file_size(); ++ *class_bytes = (unsigned char*)reconstituter.class_file_bytes(); ++ ++ TRACE_RC3("Reconstituted class bytes"); ++ ++ } else { ++ ++ // it is cached, get it from the cache ++ *class_byte_count = the_class->get_cached_class_file_len(); ++ *class_bytes = the_class->get_cached_class_file_bytes(); ++ ++ ++ TRACE_RC3("Retrieved cached class bytes"); ++ } ++ ++ } else { ++ ++ // Redefine with bytecodes at index j ++ *class_bytes = _class_defs[j].class_bytes; ++ *class_byte_count = _class_defs[j].class_byte_count; + } +-#endif ++ ++ return JVMTI_ERROR_NONE; + } + +-void VM_RedefineClasses::doit_epilogue() { +- // Free os::malloc allocated memory. +- // The memory allocated in redefine will be free'ed in next VM operation. +- os::free(_scratch_classes); +- +- if (RC_TRACE_ENABLED(0x00000004)) { +- // Used to have separate timers for "doit" and "all", but the timer +- // overhead skewed the measurements. +- jlong doit_time = _timer_rsc_phase1.milliseconds() + +- _timer_rsc_phase2.milliseconds(); +- jlong all_time = _timer_vm_op_prologue.milliseconds() + doit_time; +- +- RC_TRACE(0x00000004, ("vm_op: all=" UINT64_FORMAT +- " prologue=" UINT64_FORMAT " doit=" UINT64_FORMAT, all_time, +- _timer_vm_op_prologue.milliseconds(), doit_time)); +- RC_TRACE(0x00000004, +- ("redefine_single_class: phase1=" UINT64_FORMAT " phase2=" UINT64_FORMAT, +- _timer_rsc_phase1.milliseconds(), _timer_rsc_phase2.milliseconds())); ++// Prologue of the VM operation, called on the Java thread in parallel to normal program execution ++bool VM_RedefineClasses::doit_prologue() { ++ ++ _revision_number++; ++ TRACE_RC1("Redefinition with revision number %d started!", _revision_number); ++ lock_threads(); ++ ++ assert(Thread::current()->is_Java_thread(), "must be Java thread"); ++ RC_TIMER_START(_timer_prologue); ++ ++ if (!check_arguments()) { ++ RC_TIMER_STOP(_timer_prologue); ++ return false; + } +-} + +-bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { +- // classes for primitives cannot be redefined +- if (java_lang_Class::is_primitive(klass_mirror)) { ++ // We first load new class versions in the prologue, because somewhere down the ++ // call chain it is required that the current thread is a Java thread. ++ _new_classes = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(5, true); ++ ++ assert(_affected_klasses == NULL, ""); ++ _affected_klasses = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceKlassHandle>(_class_count, true); ++ ++ ++ _result = load_new_class_versions(Thread::current()); ++ ++ TRACE_RC1("Loaded new class versions!"); ++ if (_result != JVMTI_ERROR_NONE) { ++ TRACE_RC1("error occured: %d!", _result); ++ delete _new_classes; ++ _new_classes = NULL; ++ delete _affected_klasses; ++ _affected_klasses = NULL; ++ RC_TIMER_STOP(_timer_prologue); + return false; + } +- klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); +- // classes for arrays cannot be redefined +- if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ ++ TRACE_RC2("nearly finished"); ++ VM_GC_Operation::doit_prologue(); ++ RC_TIMER_STOP(_timer_prologue); ++ TRACE_RC2("doit_prologue finished!"); ++ return true; ++} ++ ++// Checks basic properties of the arguments of the redefinition command. ++jvmtiError VM_RedefineClasses::check_arguments_error() { ++ if (_class_defs == NULL) return JVMTI_ERROR_NULL_POINTER; ++ for (int i = 0; i < _class_count; i++) { ++ if (_class_defs[i].klass == NULL) return JVMTI_ERROR_INVALID_CLASS; ++ if (_class_defs[i].class_byte_count == 0) return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ if (_class_defs[i].class_bytes == NULL) return JVMTI_ERROR_NULL_POINTER; ++ } ++ return JVMTI_ERROR_NONE; ++} ++ ++// Returns false and sets an result error code if the redefinition should be aborted. ++bool VM_RedefineClasses::check_arguments() { ++ jvmtiError error = check_arguments_error(); ++ if (error != JVMTI_ERROR_NONE || _class_count == 0) { ++ _result = error; + return false; + } + return true; + } + +-// Append the current entry at scratch_i in scratch_cp to *merge_cp_p +-// where the end of *merge_cp_p is specified by *merge_cp_length_p. For +-// direct CP entries, there is just the current entry to append. For +-// indirect and double-indirect CP entries, there are zero or more +-// referenced CP entries along with the current entry to append. +-// Indirect and double-indirect CP entries are handled by recursive +-// calls to append_entry() as needed. The referenced CP entries are +-// always appended to *merge_cp_p before the referee CP entry. These +-// referenced CP entries may already exist in *merge_cp_p in which case +-// there is nothing extra to append and only the current entry is +-// appended. +-void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp, +- int scratch_i, constantPoolHandle *merge_cp_p, int *merge_cp_length_p, +- TRAPS) { +- +- // append is different depending on entry tag type +- switch (scratch_cp->tag_at(scratch_i).value()) { +- +- // The old verifier is implemented outside the VM. It loads classes, +- // but does not resolve constant pool entries directly so we never +- // see Class entries here with the old verifier. Similarly the old +- // verifier does not like Class entries in the input constant pool. +- // The split-verifier is implemented in the VM so it can optionally +- // and directly resolve constant pool entries to load classes. The +- // split-verifier can accept either Class entries or UnresolvedClass +- // entries in the input constant pool. We revert the appended copy +- // back to UnresolvedClass so that either verifier will be happy +- // with the constant pool entry. +- case JVM_CONSTANT_Class: +- { +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- (*merge_cp_p)->unresolved_klass_at_put(*merge_cp_length_p, +- scratch_cp->klass_name_at(scratch_i)); +- +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++jvmtiError VM_RedefineClasses::check_exception() const { ++ Thread* THREAD = Thread::current(); ++ if (HAS_PENDING_EXCEPTION) { ++ ++ Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); ++ TRACE_RC1("parse_stream exception: '%s'", ex_name->as_C_string()); ++ if (TraceRedefineClasses >= 1) { ++ java_lang_Throwable::print(PENDING_EXCEPTION, tty); ++ tty->print_cr(""); ++ } ++ CLEAR_PENDING_EXCEPTION; ++ ++ if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { ++ return JVMTI_ERROR_UNSUPPORTED_VERSION; ++ } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { ++ return JVMTI_ERROR_INVALID_CLASS_FORMAT; ++ } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; ++ } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { ++ // The message will be "XXX (wrong name: YYY)" ++ return JVMTI_ERROR_NAMES_DONT_MATCH; ++ } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { ++ return JVMTI_ERROR_OUT_OF_MEMORY; ++ } else { ++ // Just in case more exceptions can be thrown.. ++ return JVMTI_ERROR_FAILS_VERIFICATION; ++ } ++ } + +- // these are direct CP entries so they can be directly appended, +- // but double and long take two constant pool entries +- case JVM_CONSTANT_Double: // fall through +- case JVM_CONSTANT_Long: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); ++ return JVMTI_ERROR_NONE; ++} + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p) += 2; +- } break; +- +- // these are direct CP entries so they can be directly appended +- case JVM_CONSTANT_Float: // fall through +- case JVM_CONSTANT_Integer: // fall through +- case JVM_CONSTANT_Utf8: // fall through +- +- // This was an indirect CP entry, but it has been changed into +- // an interned string so this entry can be directly appended. +- case JVM_CONSTANT_String: // fall through +- +- // These were indirect CP entries, but they have been changed into +- // Symbol*s so these entries can be directly appended. +- case JVM_CONSTANT_UnresolvedClass: // fall through +- case JVM_CONSTANT_UnresolvedString: +- { +- constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, +- THREAD); ++// Loads all new class versions and stores the instanceKlass handles in an array. ++jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ ResourceMark rm(THREAD); + +- // this is an indirect CP entry so it needs special handling +- case JVM_CONSTANT_NameAndType: +- { +- int name_ref_i = scratch_cp->name_ref_index_at(scratch_i); +- int new_name_ref_i = 0; +- bool match = (name_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_ref_i, *merge_cp_p, name_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_ref_i = found_i; +- map_index(scratch_cp, name_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_name_ref_i = *merge_cp_length_p - 1; ++ TRACE_RC1("==================================================================="); ++ TRACE_RC1("redefinition started by thread \"%s\"", THREAD->name()); ++ TRACE_RC1("load new class versions (%d)", _class_count); ++ ++ // Retrieve an array of all classes that need to be redefined ++ jvmtiError err = find_sorted_affected_classes(); ++ if (err != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error finding sorted affected classes: %d", (int)err); ++ return err; ++ } ++ ++ ++ JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); ++ ++ _max_redefinition_flags = Klass::NoRedefinition; ++ jvmtiError result = JVMTI_ERROR_NONE; ++ ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ TRACE_RC2("Processing affected class %d of %d", i+1, _affected_klasses->length()); ++ ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ TRACE_RC2("name=%s", the_class->name()->as_C_string()); ++ ++ the_class->link_class(THREAD); ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ // Find new class bytes ++ const unsigned char* class_bytes; ++ jint class_byte_count; ++ jvmtiError error; ++ jboolean not_changed; ++ if ((error = find_class_bytes(the_class, &class_bytes, &class_byte_count, ¬_changed)) != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error finding class bytes: %d", (int)error); ++ result = error; ++ break; ++ } ++ assert(class_bytes != NULL && class_byte_count != 0, "Class bytes defined at this point!"); ++ ++ ++ // Set redefined class handle in JvmtiThreadState class. ++ // This redefined class is sent to agent event handler for class file ++ // load hook event. ++ state->set_class_being_redefined(&the_class, _class_load_kind); ++ ++ TRACE_RC2("Before resolving from stream"); ++ ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_loading); ++ ++ ++ // Parse the stream. ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ClassFileStream st((u1*) class_bytes, class_byte_count, (char *)"__VM_RedefineClasses__"); ++ instanceKlassHandle new_class(THREAD, SystemDictionary::resolve_from_stream(the_class->name(), ++ the_class_loader, ++ protection_domain, ++ &st, ++ true, ++ the_class, ++ THREAD)); ++ ++ RC_TIMER_STOP(_timer_class_loading); ++ RC_TIMER_START(_timer_prologue); ++ ++ TRACE_RC2("After resolving class from stream!"); ++ // Clear class_being_redefined just to be sure. ++ state->clear_class_being_redefined(); ++ ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ ++ not_changed = false; ++ ++#ifdef ASSERT ++ ++ assert(new_class() != NULL, "Class could not be loaded!"); ++ assert(new_class() != the_class(), "must be different"); ++ assert(new_class->new_version() == NULL && new_class->old_version() != NULL, ""); ++ ++ ++ objArrayOop k_interfaces = new_class->local_interfaces(); ++ for (int j=0; j<k_interfaces->length(); j++) { ++ assert(((klassOop)k_interfaces->obj_at(j))->klass_part()->is_newest_version(), "just checking"); ++ } ++ ++ if (!THREAD->is_Compiler_thread()) { ++ ++ TRACE_RC2("name=%s loader="INTPTR_FORMAT" protection_domain="INTPTR_FORMAT, the_class->name()->as_C_string(), the_class->class_loader(), the_class->protection_domain()); ++ // If we are on the compiler thread, we must not try to resolve a class. ++ klassOop systemLookup = SystemDictionary::resolve_or_null(the_class->name(), the_class->class_loader(), the_class->protection_domain(), THREAD); ++ ++ if (systemLookup != NULL) { ++ assert(systemLookup == new_class->old_version(), "Old class must be in system dictionary!"); ++ ++ ++ Klass *subklass = new_class()->klass_part()->subklass(); ++ while (subklass != NULL) { ++ assert(subklass->new_version() == NULL, "Most recent version of class!"); ++ subklass = subklass->next_sibling(); + } ++ } else { ++ // This can happen for reflection generated classes.. ? ++ CLEAR_PENDING_EXCEPTION; + } ++ } + +- int signature_ref_i = scratch_cp->signature_ref_index_at(scratch_i); +- int new_signature_ref_i = 0; +- match = (signature_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(signature_ref_i, *merge_cp_p, +- signature_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(signature_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != signature_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_signature_ref_i = found_i; +- map_index(scratch_cp, signature_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, signature_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. +- new_signature_ref_i = *merge_cp_length_p - 1; +- } ++#endif ++ ++ IF_TRACE_RC1 { ++ if (new_class->layout_helper() != the_class->layout_helper()) { ++ TRACE_RC1("Instance size change for class %s: new=%d old=%d", new_class->name()->as_C_string(), new_class->layout_helper(), the_class->layout_helper()); + } ++ } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_name_ref_i and new_signature_ref_i will both be 0. +- // In that case, all we are appending is the current entry. +- if (new_name_ref_i == 0) { +- new_name_ref_i = name_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d name_ref_index change: %d to %d", +- *merge_cp_length_p, name_ref_i, new_name_ref_i)); ++ // Set the new version of the class ++ new_class->set_revision_number(_revision_number); ++ new_class->set_redefinition_index(i); ++ the_class->set_new_version(new_class()); ++ _new_classes->append(new_class); ++ ++ assert(new_class->new_version() == NULL, ""); ++ ++ int redefinition_flags = Klass::NoRedefinition; ++ ++ if (not_changed) { ++ redefinition_flags = Klass::NoRedefinition; ++ } else if (AllowAdvancedClassRedefinition) { ++ redefinition_flags = calculate_redefinition_flags(new_class); ++ if (redefinition_flags >= Klass::RemoveSuperType) { ++ TRACE_RC1("Remove super type is not allowed"); ++ result = JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ break; + } +- if (new_signature_ref_i == 0) { +- new_signature_ref_i = signature_ref_i; +- } else { +- RC_TRACE(0x00080000, +- ("NameAndType entry@%d signature_ref_index change: %d to %d", +- *merge_cp_length_p, signature_ref_i, new_signature_ref_i)); ++ } else { ++ jvmtiError allowed = check_redefinition_allowed(new_class); ++ if (allowed != JVMTI_ERROR_NONE) { ++ TRACE_RC1("Error redefinition not allowed!"); ++ result = allowed; ++ break; + } ++ redefinition_flags = Klass::ModifyClass; ++ } + +- (*merge_cp_p)->name_and_type_at_put(*merge_cp_length_p, +- new_name_ref_i, new_signature_ref_i); +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); ++ if (new_class->super() != NULL) { ++ redefinition_flags = redefinition_flags | new_class->super()->klass_part()->redefinition_flags(); ++ } ++ ++ for (int j=0; j<new_class->local_interfaces()->length(); j++) { ++ redefinition_flags = redefinition_flags | ((klassOop)new_class->local_interfaces()->obj_at(j))->klass_part()->redefinition_flags(); ++ } ++ ++ new_class->set_redefinition_flags(redefinition_flags); ++ ++ _max_redefinition_flags = _max_redefinition_flags | redefinition_flags; ++ ++ if ((redefinition_flags & Klass::ModifyInstances) != 0) { ++ // TODO: Check if watch access flags of static fields are updated correctly. ++ calculate_instance_update_information(_new_classes->at(i)()); ++ } else { ++ // Fields were not changed, transfer special flags only ++ assert(new_class->layout_helper() >> 1 == new_class->old_version()->klass_part()->layout_helper() >> 1, "must be equal"); ++ assert(new_class->fields()->length() == ((instanceKlass*)new_class->old_version()->klass_part())->fields()->length(), "must be equal"); ++ ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ AccessFlags flags = new_fs.access_flags(); ++ flags.set_is_field_modification_watched(old_fs.access_flags().is_field_modification_watched()); ++ flags.set_is_field_access_watched(old_fs.access_flags().is_field_access_watched()); ++ new_fs.set_access_flags(flags); + } +- (*merge_cp_length_p)++; +- } break; ++ } + +- // this is a double-indirect CP entry so it needs special handling +- case JVM_CONSTANT_Fieldref: // fall through +- case JVM_CONSTANT_InterfaceMethodref: // fall through +- case JVM_CONSTANT_Methodref: +- { +- int klass_ref_i = scratch_cp->uncached_klass_ref_index_at(scratch_i); +- int new_klass_ref_i = 0; +- bool match = (klass_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(klass_ref_i, *merge_cp_p, klass_ref_i, +- THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(klass_ref_i, *merge_cp_p, +- THREAD); +- if (found_i != 0) { +- guarantee(found_i != klass_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_klass_ref_i = found_i; +- map_index(scratch_cp, klass_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, klass_ref_i, merge_cp_p, merge_cp_length_p, +- THREAD); +- // The above call to append_entry() can only append one entry +- // so the post call query of *merge_cp_length_p is only for +- // the sake of consistency. Without the optimization where we +- // use JVM_CONSTANT_UnresolvedClass, then up to two entries +- // could be appended. +- new_klass_ref_i = *merge_cp_length_p - 1; +- } ++ IF_TRACE_RC3 { ++ if (new_class->super() != NULL) { ++ TRACE_RC3("Super class is %s", new_class->super()->klass_part()->name()->as_C_string()); + } ++ } + +- int name_and_type_ref_i = +- scratch_cp->uncached_name_and_type_ref_index_at(scratch_i); +- int new_name_and_type_ref_i = 0; +- match = (name_and_type_ref_i < *merge_cp_length_p) && +- scratch_cp->compare_entry_to(name_and_type_ref_i, *merge_cp_p, +- name_and_type_ref_i, THREAD); +- if (!match) { +- // forward reference in *merge_cp_p or not a direct match +- +- int found_i = scratch_cp->find_matching_entry(name_and_type_ref_i, +- *merge_cp_p, THREAD); +- if (found_i != 0) { +- guarantee(found_i != name_and_type_ref_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- new_name_and_type_ref_i = found_i; +- map_index(scratch_cp, name_and_type_ref_i, found_i); +- } else { +- // no match found so we have to append this entry to *merge_cp_p +- append_entry(scratch_cp, name_and_type_ref_i, merge_cp_p, +- merge_cp_length_p, THREAD); +- // The above call to append_entry() can append more than +- // one entry so the post call query of *merge_cp_length_p +- // is required in order to get the right index for the +- // JVM_CONSTANT_NameAndType entry. +- new_name_and_type_ref_i = *merge_cp_length_p - 1; ++#ifdef ASSERT ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->new_version() == NULL, "Super klass must be newest version!"); ++ ++ the_class->vtable()->verify(tty); ++ new_class->vtable()->verify(tty); ++#endif ++ ++ TRACE_RC2("Verification done!"); ++ ++ if (i == _affected_klasses->length() - 1) { ++ ++ // This was the last class processed => check if additional classes have been loaded in the meantime ++ ++ for (int j=0; j<_affected_klasses->length(); j++) { ++ ++ klassOop initial_klass = _affected_klasses->at(j)(); ++ Klass *initial_subklass = initial_klass->klass_part()->subklass(); ++ Klass *cur_klass = initial_subklass; ++ while(cur_klass != NULL) { ++ ++ if(cur_klass->oop_is_instance() && cur_klass->is_newest_version() && !cur_klass->is_redefining()) { ++ instanceKlassHandle handle(THREAD, cur_klass->as_klassOop()); ++ if (!_affected_klasses->contains(handle)) { ++ ++ int k = i + 1; ++ for (; k<_affected_klasses->length(); k++) { ++ if (_affected_klasses->at(k)->is_subtype_of(cur_klass->as_klassOop())) { ++ break; ++ } ++ } ++ _affected_klasses->insert_before(k, handle); ++ TRACE_RC2("Adding newly loaded class to affected classes: %s", cur_klass->name()->as_C_string()); ++ } ++ } ++ ++ cur_klass = cur_klass->next_sibling(); + } + } + +- // If the referenced entries already exist in *merge_cp_p, then +- // both new_klass_ref_i and new_name_and_type_ref_i will both be +- // 0. In that case, all we are appending is the current entry. +- if (new_klass_ref_i == 0) { +- new_klass_ref_i = klass_ref_i; +- } +- if (new_name_and_type_ref_i == 0) { +- new_name_and_type_ref_i = name_and_type_ref_i; +- } ++ int new_count = _affected_klasses->length() - 1 - i; ++ if (new_count != 0) { + +- const char *entry_name; +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Fieldref: +- entry_name = "Fieldref"; +- (*merge_cp_p)->field_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_InterfaceMethodref: +- entry_name = "IFMethodref"; +- (*merge_cp_p)->interface_method_at_put(*merge_cp_length_p, +- new_klass_ref_i, new_name_and_type_ref_i); +- break; +- case JVM_CONSTANT_Methodref: +- entry_name = "Methodref"; +- (*merge_cp_p)->method_at_put(*merge_cp_length_p, new_klass_ref_i, +- new_name_and_type_ref_i); +- break; +- default: +- guarantee(false, "bad switch"); +- break; ++ TRACE_RC1("Found new number of affected classes: %d", new_count); + } ++ } ++ } + +- if (klass_ref_i != new_klass_ref_i) { +- RC_TRACE(0x00080000, ("%s entry@%d class_index changed: %d to %d", +- entry_name, *merge_cp_length_p, klass_ref_i, new_klass_ref_i)); +- } +- if (name_and_type_ref_i != new_name_and_type_ref_i) { +- RC_TRACE(0x00080000, +- ("%s entry@%d name_and_type_index changed: %d to %d", +- entry_name, *merge_cp_length_p, name_and_type_ref_i, +- new_name_and_type_ref_i)); +- } ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- if (scratch_i != *merge_cp_length_p) { +- // The new entry in *merge_cp_p is at a different index than +- // the new entry in scratch_cp so we need to map the index values. +- map_index(scratch_cp, scratch_i, *merge_cp_length_p); +- } +- (*merge_cp_length_p)++; +- } break; ++ RC_TIMER_STOP(_timer_prologue); ++ RC_TIMER_START(_timer_class_linking); ++ // Link and verify new classes _after_ all classes have been updated in the system dictionary! ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ instanceKlassHandle new_class(the_class->new_version()); + +- // At this stage, Class or UnresolvedClass could be here, but not +- // ClassIndex +- case JVM_CONSTANT_ClassIndex: // fall through ++ TRACE_RC2("Linking class %d/%d %s", i, _affected_klasses->length(), the_class->name()->as_C_string()); ++ new_class->link_class(THREAD); + +- // Invalid is used as the tag for the second constant pool entry +- // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should +- // not be seen by itself. +- case JVM_CONSTANT_Invalid: // fall through ++ result = check_exception(); ++ if (result != JVMTI_ERROR_NONE) break; ++ } ++ RC_TIMER_STOP(_timer_class_linking); ++ RC_TIMER_START(_timer_prologue); + +- // At this stage, String or UnresolvedString could be here, but not +- // StringIndex +- case JVM_CONSTANT_StringIndex: // fall through ++ if (result != JVMTI_ERROR_NONE) { ++ rollback(); ++ return result; ++ } + +- // At this stage JVM_CONSTANT_UnresolvedClassInError should not be +- // here +- case JVM_CONSTANT_UnresolvedClassInError: // fall through ++ TRACE_RC2("All classes loaded!"); + +- default: +- { +- // leave a breadcrumb +- jbyte bad_value = scratch_cp->tag_at(scratch_i).value(); +- ShouldNotReachHere(); +- } break; +- } // end switch tag value +-} // end append_entry() ++#ifdef ASSERT ++ for (int i=0; i<_affected_klasses->length(); i++) { ++ instanceKlassHandle the_class = _affected_klasses->at(i); ++ assert(the_class->new_version() != NULL, "Must have been redefined"); ++ instanceKlassHandle new_version = instanceKlassHandle(THREAD, the_class->new_version()); ++ assert(new_version->new_version() == NULL, "Must be newest version"); + ++ if (!(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL)) { ++ new_version()->print(); ++ new_version->super()->print(); ++ } ++ assert(new_version->super() == NULL || new_version->super()->klass_part()->new_version() == NULL, "Super class must be newest version"); ++ } + +-void VM_RedefineClasses::swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class) { +- typeArrayOop save; ++ SystemDictionary::classes_do(check_class, THREAD); + +- save = scratch_class->get_method_annotations_of(i); +- scratch_class->set_method_annotations_of(i, scratch_class->get_method_annotations_of(j)); +- scratch_class->set_method_annotations_of(j, save); ++#endif + +- save = scratch_class->get_method_parameter_annotations_of(i); +- scratch_class->set_method_parameter_annotations_of(i, scratch_class->get_method_parameter_annotations_of(j)); +- scratch_class->set_method_parameter_annotations_of(j, save); ++ TRACE_RC1("Finished verification!"); ++ return JVMTI_ERROR_NONE; ++} + +- save = scratch_class->get_method_default_annotations_of(i); +- scratch_class->set_method_default_annotations_of(i, scratch_class->get_method_default_annotations_of(j)); +- scratch_class->set_method_default_annotations_of(j, save); ++void VM_RedefineClasses::lock_threads() { ++ ++ RC_TIMER_START(_timer_wait_for_locks); ++ ++ ++ JavaThread *javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->set_should_bailout(true); ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ int cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ compilerThread->compilation_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Locked %d compiler threads", cnt); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ javaThread->redefine_classes_mutex()->lock(); ++ cnt++; ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ ++ TRACE_RC2("Locked %d threads", cnt); ++ ++ RC_TIMER_STOP(_timer_wait_for_locks); ++} ++ ++void VM_RedefineClasses::unlock_threads() { ++ ++ int cnt = 0; ++ JavaThread *javaThread = Threads::first(); ++ Thread *thread = Thread::current(); ++ while (javaThread != NULL) { ++ if (javaThread->is_Compiler_thread() && javaThread != Thread::current()) { ++ CompilerThread *compilerThread = (CompilerThread *)javaThread; ++ if (compilerThread->compilation_mutex()->owned_by_self()) { ++ compilerThread->compilation_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Unlocked %d compiler threads", cnt); ++ ++ cnt = 0; ++ javaThread = Threads::first(); ++ while (javaThread != NULL) { ++ if (javaThread != Thread::current()) { ++ if (javaThread->redefine_classes_mutex()->owned_by_self()) { ++ javaThread->redefine_classes_mutex()->unlock(); ++ cnt++; ++ } ++ } ++ javaThread = javaThread->next(); ++ } ++ ++ TRACE_RC2("Unlocked %d threads", cnt); + } + ++jvmtiError VM_RedefineClasses::check_redefinition_allowed(instanceKlassHandle scratch_class) { ++ ++ ++ ++ // Compatibility mode => check for unsupported modification ++ ++ ++ assert(scratch_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(scratch_class->old_version()); + +-jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( +- instanceKlassHandle the_class, +- instanceKlassHandle scratch_class) { + int i; + + // Check superclasses, or rather their names, since superclasses themselves can be + // requested to replace. + // Check for NULL superclass first since this might be java.lang.Object + if (the_class->super() != scratch_class->super() && +- (the_class->super() == NULL || scratch_class->super() == NULL || +- Klass::cast(the_class->super())->name() != +- Klass::cast(scratch_class->super())->name())) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ (the_class->super() == NULL || scratch_class->super() == NULL || ++ Klass::cast(the_class->super())->name() != ++ Klass::cast(scratch_class->super())->name())) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + + // Check if the number, names and order of directly implemented interfaces are the same. +@@ -539,8 +718,8 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + } + for (i = 0; i < n_intfs; i++) { + if (Klass::cast((klassOop) k_interfaces->obj_at(i))->name() != +- Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; ++ Klass::cast((klassOop) k_new_interfaces->obj_at(i))->name()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED; + } + } + +@@ -578,14 +757,283 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + Symbol* name_sym2 = scratch_class->constants()->symbol_at(new_fs.name_index()); + Symbol* sig_sym2 = scratch_class->constants()->symbol_at(new_fs.signature_index()); + if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ } ++ } ++ ++ // If both streams aren't done then we have a differing number of ++ // fields. ++ if (!old_fs.done() || !new_fs.done()) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ } ++ ++ // Do a parallel walk through the old and new methods. Detect ++ // cases where they match (exist in both), have been added in ++ // the new methods, or have been deleted (exist only in the ++ // old methods). The class file parser places methods in order ++ // by method name, but does not order overloaded methods by ++ // signature. In order to determine what fate befell the methods, ++ // this code places the overloaded new methods that have matching ++ // old methods in the same order as the old methods and places ++ // new overloaded methods at the end of overloaded methods of ++ // that name. The code for this order normalization is adapted ++ // from the algorithm used in instanceKlass::find_method(). ++ // Since we are swapping out of order entries as we find them, ++ // we only have to search forward through the overloaded methods. ++ // Methods which are added and have the same name as an existing ++ // method (but different signature) will be put at the end of ++ // the methods with that name, and the name mismatch code will ++ // handle them. ++ objArrayHandle k_old_methods(the_class->methods()); ++ objArrayHandle k_new_methods(scratch_class->methods()); ++ int n_old_methods = k_old_methods->length(); ++ int n_new_methods = k_new_methods->length(); ++ ++ int ni = 0; ++ int oi = 0; ++ while (true) { ++ methodOop k_old_method; ++ methodOop k_new_method; ++ enum { matched, added, deleted, undetermined } method_was = undetermined; ++ ++ if (oi >= n_old_methods) { ++ if (ni >= n_new_methods) { ++ break; // we've looked at everything, done ++ } ++ // New method at the end ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ method_was = added; ++ } else if (ni >= n_new_methods) { ++ // Old method, at the end, is deleted ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ method_was = deleted; ++ } else { ++ // There are more methods in both the old and new lists ++ k_old_method = (methodOop) k_old_methods->obj_at(oi); ++ k_new_method = (methodOop) k_new_methods->obj_at(ni); ++ if (k_old_method->name() != k_new_method->name()) { ++ // Methods are sorted by method name, so a mismatch means added ++ // or deleted ++ if (k_old_method->name()->fast_compare(k_new_method->name()) > 0) { ++ method_was = added; ++ } else { ++ method_was = deleted; ++ } ++ } else if (k_old_method->signature() == k_new_method->signature()) { ++ // Both the name and signature match ++ method_was = matched; ++ } else { ++ // The name matches, but the signature doesn't, which means we have to ++ // search forward through the new overloaded methods. ++ int nj; // outside the loop for post-loop check ++ for (nj = ni + 1; nj < n_new_methods; nj++) { ++ methodOop m = (methodOop)k_new_methods->obj_at(nj); ++ if (k_old_method->name() != m->name()) { ++ // reached another method name so no more overloaded methods ++ method_was = deleted; ++ break; ++ } ++ if (k_old_method->signature() == m->signature()) { ++ // found a match so swap the methods ++ k_new_methods->obj_at_put(ni, m); ++ k_new_methods->obj_at_put(nj, k_new_method); ++ k_new_method = m; ++ method_was = matched; ++ break; ++ } ++ } ++ ++ if (nj >= n_new_methods) { ++ // reached the end without a match; so method was deleted ++ method_was = deleted; ++ } ++ } ++ } ++ ++ switch (method_was) { ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED; ++ } ++ { ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = scratch_class->method_with_idnum(old_num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(old_num); ++ } ++ } ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ } ++ { ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; ++ } ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = scratch_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ } ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; ++ } ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ return JVMTI_ERROR_NONE; ++} ++ ++int VM_RedefineClasses::calculate_redefinition_flags(instanceKlassHandle new_class) { ++ ++ int result = Klass::NoRedefinition; ++ ++ ++ ++ TRACE_RC2("Comparing different class versions of class %s", new_class->name()->as_C_string()); ++ ++ assert(new_class->old_version() != NULL, "must have old version"); ++ instanceKlassHandle the_class(new_class->old_version()); ++ ++ // Check whether class is in the error init state. ++ if (the_class->is_in_error_state()) { ++ // TBD #5057930: special error code is needed in 1.6 ++ //result = Klass::union_redefinition_level(result, Klass::Invalid); ++ } ++ ++ int i; ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check superclasses ++ assert(new_class->super() == NULL || new_class->super()->klass_part()->is_newest_version(), ""); ++ if (the_class->super() != new_class->super()) { ++ // Super class changed ++ ++ klassOop cur_klass = the_class->super(); ++ while (cur_klass != NULL) { ++ if (!new_class->is_subclass_of(cur_klass->klass_part()->newest_version())) { ++ TRACE_RC2("Removed super class %s", cur_klass->klass_part()->name()->as_C_string()); ++ result = result | Klass::RemoveSuperType | Klass::ModifyInstances | Klass::ModifyClass; ++ ++ if (!cur_klass->klass_part()->has_subtype_changed()) { ++ TRACE_RC2("Subtype changed of class %s", cur_klass->klass_part()->name()->as_C_string()); ++ cur_klass->klass_part()->set_subtype_changed(true); ++ } ++ } ++ ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ ++ cur_klass = new_class->super(); ++ while (cur_klass != NULL) { ++ if (!the_class->is_subclass_of(cur_klass->klass_part()->old_version())) { ++ TRACE_RC2("Added super class %s", cur_klass->klass_part()->name()->as_C_string()); ++ result = result | Klass::ModifyClass | Klass::ModifyInstances; ++ } ++ cur_klass = cur_klass->klass_part()->super(); ++ } ++ } ++ ++ ////////////////////////////////////////////////////////////////////////////////////////////////////////// ++ // Check interfaces ++ ++ // Interfaces removed? ++ objArrayOop old_interfaces = the_class->transitive_interfaces(); ++ for (i = 0; i<old_interfaces->length(); i++) { ++ instanceKlassHandle old_interface((klassOop)old_interfaces->obj_at(i)); ++ if (!new_class->implements_interface_any_version(old_interface())) { ++ result = result | Klass::RemoveSuperType | Klass::ModifyClass; ++ TRACE_RC2("Removed interface %s", old_interface->name()->as_C_string()); ++ ++ if (!old_interface->has_subtype_changed()) { ++ TRACE_RC2("Subtype changed of interface %s", old_interface->name()->as_C_string()); ++ old_interface->set_subtype_changed(true); ++ } ++ } ++ } ++ ++ // Interfaces added? ++ objArrayOop new_interfaces = new_class->transitive_interfaces(); ++ for (i = 0; i<new_interfaces->length(); i++) { ++ if (!the_class->implements_interface_any_version((klassOop)new_interfaces->obj_at(i))) { ++ result = result | Klass::ModifyClass; ++ TRACE_RC2("Added interface %s", ((klassOop)new_interfaces->obj_at(i))->klass_part()->name()->as_C_string()); ++ } ++ } ++ ++ ++ // Check whether class modifiers are the same. ++ jushort old_flags = (jushort) the_class->access_flags().get_flags(); ++ jushort new_flags = (jushort) new_class->access_flags().get_flags(); ++ if (old_flags != new_flags) { ++ // TODO (tw): Can this have any effects? ++ } ++ ++ // Check if the number, names, types and order of fields declared in these classes ++ // are the same. ++ JavaFieldStream old_fs(the_class); ++ JavaFieldStream new_fs(new_class); ++ for (; !old_fs.done() && !new_fs.done(); old_fs.next(), new_fs.next()) { ++ // access ++ old_flags = old_fs.access_flags().as_short(); ++ new_flags = new_fs.access_flags().as_short(); ++ if ((old_flags ^ new_flags) & JVM_RECOGNIZED_FIELD_MODIFIERS) { ++ // TODO (tw) can this have any effect? ++ } ++ // offset ++ if (old_fs.offset() != new_fs.offset()) { ++ result = result | Klass::ModifyInstances; ++ } ++ // name and signature ++ Symbol* name_sym1 = the_class->constants()->symbol_at(old_fs.name_index()); ++ Symbol* sig_sym1 = the_class->constants()->symbol_at(old_fs.signature_index()); ++ Symbol* name_sym2 = new_class->constants()->symbol_at(new_fs.name_index()); ++ Symbol* sig_sym2 = new_class->constants()->symbol_at(new_fs.signature_index()); ++ if (name_sym1 != name_sym2 || sig_sym1 != sig_sym2) { ++ result = result | Klass::ModifyInstances; + } + } + + // If both streams aren't done then we have a differing number of + // fields. + if (!old_fs.done() || !new_fs.done()) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED; ++ result = result | Klass::ModifyInstances; + } + + // Do a parallel walk through the old and new methods. Detect +@@ -606,7 +1054,7 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + // the methods with that name, and the name mismatch code will + // handle them. + objArrayHandle k_old_methods(the_class->methods()); +- objArrayHandle k_new_methods(scratch_class->methods()); ++ objArrayHandle k_new_methods(new_class->methods()); + int n_old_methods = k_old_methods->length(); + int n_new_methods = k_new_methods->length(); + +@@ -672,2278 +1120,701 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions( + } + + switch (method_was) { +- case matched: +- // methods match, be sure modifiers do too +- old_flags = (jushort) k_old_method->access_flags().get_flags(); +- new_flags = (jushort) k_new_method->access_flags().get_flags(); +- if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED; +- } +- { +- u2 new_num = k_new_method->method_idnum(); +- u2 old_num = k_old_method->method_idnum(); +- if (new_num != old_num) { +- methodOop idnum_owner = scratch_class->method_with_idnum(old_num); +- if (idnum_owner != NULL) { +- // There is already a method assigned this idnum -- switch them +- idnum_owner->set_method_idnum(new_num); +- } +- k_new_method->set_method_idnum(old_num); +- swap_all_method_annotations(old_num, new_num, scratch_class); +- } +- } +- RC_TRACE(0x00008000, ("Method matched: new: %s [%d] == old: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni, +- k_old_method->name_and_sig_as_C_string(), oi)); +- // advance to next pair of methods +- ++oi; +- ++ni; +- break; +- case added: +- // method added, see if it is OK +- new_flags = (jushort) k_new_method->access_flags().get_flags(); +- if ((new_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // new methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; +- } +- { +- u2 num = the_class->next_method_idnum(); +- if (num == constMethodOopDesc::UNSET_IDNUM) { +- // cannot add any more methods +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED; +- } +- u2 new_num = k_new_method->method_idnum(); +- methodOop idnum_owner = scratch_class->method_with_idnum(num); ++ case matched: ++ // methods match, be sure modifiers do too ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((old_flags ^ new_flags) & ~(JVM_ACC_NATIVE)) { ++ // (tw) Can this have any effects? Probably yes on vtables? ++ result = result | Klass::ModifyClass; ++ } ++ { ++ u2 new_num = k_new_method->method_idnum(); ++ u2 old_num = k_old_method->method_idnum(); ++ if (new_num != old_num) { ++ methodOop idnum_owner = new_class->method_with_idnum(old_num); + if (idnum_owner != NULL) { + // There is already a method assigned this idnum -- switch them + idnum_owner->set_method_idnum(new_num); + } +- k_new_method->set_method_idnum(num); +- swap_all_method_annotations(new_num, num, scratch_class); +- } +- RC_TRACE(0x00008000, ("Method added: new: %s [%d]", +- k_new_method->name_and_sig_as_C_string(), ni)); +- ++ni; // advance to next new method +- break; +- case deleted: +- // method deleted, see if it is OK +- old_flags = (jushort) k_old_method->access_flags().get_flags(); +- if ((old_flags & JVM_ACC_PRIVATE) == 0 +- // hack: private should be treated as final, but alas +- || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 +- ) { +- // deleted methods must be private +- return JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED; +- } +- RC_TRACE(0x00008000, ("Method deleted: old: %s [%d]", +- k_old_method->name_and_sig_as_C_string(), oi)); +- ++oi; // advance to next old method +- break; +- default: +- ShouldNotReachHere(); +- } +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Find new constant pool index value for old constant pool index value +-// by seaching the index map. Returns zero (0) if there is no mapped +-// value for the old constant pool index. +-int VM_RedefineClasses::find_new_index(int old_index) { +- if (_index_map_count == 0) { +- // map is empty so nothing can be found +- return 0; +- } +- +- if (old_index < 1 || old_index >= _index_map_p->length()) { +- // The old_index is out of range so it is not mapped. This should +- // not happen in regular constant pool merging use, but it can +- // happen if a corrupt annotation is processed. +- return 0; +- } +- +- int value = _index_map_p->at(old_index); +- if (value == -1) { +- // the old_index is not mapped +- return 0; +- } +- +- return value; +-} // end find_new_index() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// class pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_class_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_Class && t1 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_Class && t2 != JVM_CONSTANT_UnresolvedClass) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->klass_name_at(index1)->as_C_string(); +- char *s2 = cp2->klass_name_at(index2)->as_C_string(); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_class_mismatch() +- +- +-// Returns true if the current mismatch is due to a resolved/unresolved +-// string pair. Otherwise, returns false. +-bool VM_RedefineClasses::is_unresolved_string_mismatch(constantPoolHandle cp1, +- int index1, constantPoolHandle cp2, int index2) { +- +- jbyte t1 = cp1->tag_at(index1).value(); +- if (t1 != JVM_CONSTANT_String && t1 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- jbyte t2 = cp2->tag_at(index2).value(); +- if (t2 != JVM_CONSTANT_String && t2 != JVM_CONSTANT_UnresolvedString) { +- return false; // wrong entry type; not our special case +- } +- +- if (t1 == t2) { +- return false; // not a mismatch; not our special case +- } +- +- char *s1 = cp1->string_at_noresolve(index1); +- char *s2 = cp2->string_at_noresolve(index2); +- if (strcmp(s1, s2) != 0) { +- return false; // strings don't match; not our special case +- } +- +- return true; // made it through the gauntlet; this is our special case +-} // end is_unresolved_string_mismatch() +- +- +-jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { +- // For consistency allocate memory using os::malloc wrapper. +- _scratch_classes = (instanceKlassHandle *) +- os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal); +- if (_scratch_classes == NULL) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } +- +- ResourceMark rm(THREAD); +- +- JvmtiThreadState *state = JvmtiThreadState::state_for(JavaThread::current()); +- // state can only be NULL if the current thread is exiting which +- // should not happen since we're trying to do a RedefineClasses +- guarantee(state != NULL, "exiting thread calling load_new_class_versions"); +- for (int i = 0; i < _class_count; i++) { +- oop mirror = JNIHandles::resolve_non_null(_class_defs[i].klass); +- // classes for primitives cannot be redefined +- if (!is_modifiable_class(mirror)) { +- return JVMTI_ERROR_UNMODIFIABLE_CLASS; +- } +- klassOop the_class_oop = java_lang_Class::as_klassOop(mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); +- Symbol* the_class_sym = the_class->name(); +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loading name=%s kind=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), _class_load_kind, +- os::available_memory() >> 10)); +- +- ClassFileStream st((u1*) _class_defs[i].class_bytes, +- _class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); +- +- // Parse the stream. +- Handle the_class_loader(THREAD, the_class->class_loader()); +- Handle protection_domain(THREAD, the_class->protection_domain()); +- // Set redefined class handle in JvmtiThreadState class. +- // This redefined class is sent to agent event handler for class file +- // load hook event. +- state->set_class_being_redefined(&the_class, _class_load_kind); +- +- klassOop k = SystemDictionary::parse_stream(the_class_sym, +- the_class_loader, +- protection_domain, +- &st, +- THREAD); +- // Clear class_being_redefined just to be sure. +- state->clear_class_being_redefined(); +- +- // TODO: if this is retransform, and nothing changed we can skip it +- +- instanceKlassHandle scratch_class (THREAD, k); +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("parse_stream exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- +- if (ex_name == vmSymbols::java_lang_UnsupportedClassVersionError()) { +- return JVMTI_ERROR_UNSUPPORTED_VERSION; +- } else if (ex_name == vmSymbols::java_lang_ClassFormatError()) { +- return JVMTI_ERROR_INVALID_CLASS_FORMAT; +- } else if (ex_name == vmSymbols::java_lang_ClassCircularityError()) { +- return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; +- } else if (ex_name == vmSymbols::java_lang_NoClassDefFoundError()) { +- // The message will be "XXX (wrong name: YYY)" +- return JVMTI_ERROR_NAMES_DONT_MATCH; +- } else if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { // Just in case more exceptions can be thrown.. +- return JVMTI_ERROR_FAILS_VERIFICATION; +- } +- } +- +- // Ensure class is linked before redefine +- if (!the_class->is_linked()) { +- the_class->link_class(THREAD); +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, ("link_class exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } ++ k_new_method->set_method_idnum(old_num); ++ TRACE_RC2("swapping idnum of new and old method %d / %d!", new_num, old_num); ++ swap_all_method_annotations(old_num, new_num, new_class); + } + } +- +- // Do the validity checks in compare_and_normalize_class_versions() +- // before verifying the byte codes. By doing these checks first, we +- // limit the number of functions that require redirection from +- // the_class to scratch_class. In particular, we don't have to +- // modify JNI GetSuperclass() and thus won't change its performance. +- jvmtiError res = compare_and_normalize_class_versions(the_class, +- scratch_class); +- if (res != JVMTI_ERROR_NONE) { +- return res; ++ TRACE_RC3("Method matched: new: %s [%d] == old: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni, ++ k_old_method->name_and_sig_as_C_string(), oi); ++ // advance to next pair of methods ++ ++oi; ++ ++ni; ++ break; ++ case added: ++ // method added, see if it is OK ++ new_flags = (jushort) k_new_method->access_flags().get_flags(); ++ if ((new_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (new_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // new methods must be private ++ result = result | Klass::ModifyClass; + } +- +- // verify what the caller passed us + { +- // The bug 6214132 caused the verification to fail. +- // Information about the_class and scratch_class is temporarily +- // recorded into jvmtiThreadState. This data is used to redirect +- // the_class to scratch_class in the JVM_* functions called by the +- // verifier. Please, refer to jvmtiThreadState.hpp for the detailed +- // description. +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify( +- scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes exception: '%s'", ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller the bytecodes are bad +- return JVMTI_ERROR_FAILS_VERIFICATION; +- } +- } +- +- res = merge_cp_and_rewrite(the_class, scratch_class, THREAD); +- if (res != JVMTI_ERROR_NONE) { +- return res; +- } +- +- if (VerifyMergedCPBytecodes) { +- // verify what we have done during constant pool merging +- { +- RedefineVerifyMark rvm(&the_class, &scratch_class, state); +- Verifier::verify(scratch_class, Verifier::ThrowException, true, THREAD); +- } +- +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000002, THREAD, +- ("verify_byte_codes post merge-CP exception: '%s'", +- ex_name->as_C_string())); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- // tell the caller that constant pool merging screwed up +- return JVMTI_ERROR_INTERNAL; +- } +- } +- } +- +- Rewriter::rewrite(scratch_class, THREAD); +- if (!HAS_PENDING_EXCEPTION) { +- Rewriter::relocate_and_link(scratch_class, THREAD); +- } +- if (HAS_PENDING_EXCEPTION) { +- Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); +- CLEAR_PENDING_EXCEPTION; +- if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) { +- return JVMTI_ERROR_OUT_OF_MEMORY; +- } else { +- return JVMTI_ERROR_INTERNAL; +- } +- } +- +- _scratch_classes[i] = scratch_class; +- +- // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("loaded name=%s (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), os::available_memory() >> 10)); +- } +- +- return JVMTI_ERROR_NONE; +-} +- +- +-// Map old_index to new_index as needed. scratch_cp is only needed +-// for RC_TRACE() calls. +-void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp, +- int old_index, int new_index) { +- if (find_new_index(old_index) != 0) { +- // old_index is already mapped +- return; +- } +- +- if (old_index == new_index) { +- // no mapping is needed +- return; +- } +- +- _index_map_p->at_put(old_index, new_index); +- _index_map_count++; +- +- RC_TRACE(0x00040000, ("mapped tag %d at index %d to %d", +- scratch_cp->tag_at(old_index).value(), old_index, new_index)); +-} // end map_index() +- +- +-// Merge old_cp and scratch_cp and return the results of the merge via +-// merge_cp_p. The number of entries in *merge_cp_p is returned via +-// merge_cp_length_p. The entries in old_cp occupy the same locations +-// in *merge_cp_p. Also creates a map of indices from entries in +-// scratch_cp to the corresponding entry in *merge_cp_p. Index map +-// entries are only created for entries in scratch_cp that occupy a +-// different location in *merged_cp_p. +-bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS) { +- +- if (merge_cp_p == NULL) { +- assert(false, "caller must provide scatch constantPool"); +- return false; // robustness +- } +- if (merge_cp_length_p == NULL) { +- assert(false, "caller must provide scatch CP length"); +- return false; // robustness +- } +- // Worst case we need old_cp->length() + scratch_cp()->length(), +- // but the caller might be smart so make sure we have at least +- // the minimum. +- if ((*merge_cp_p)->length() < old_cp->length()) { +- assert(false, "merge area too small"); +- return false; // robustness +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("old_cp_len=%d, scratch_cp_len=%d", old_cp->length(), +- scratch_cp->length())); +- +- { +- // Pass 0: +- // The old_cp is copied to *merge_cp_p; this means that any code +- // using old_cp does not have to change. This work looks like a +- // perfect fit for constantPoolOop::copy_cp_to(), but we need to +- // handle one special case: +- // - revert JVM_CONSTANT_Class to JVM_CONSTANT_UnresolvedClass +- // This will make verification happy. +- +- int old_i; // index into old_cp +- +- // index zero (0) is not used in constantPools +- for (old_i = 1; old_i < old_cp->length(); old_i++) { +- // leave debugging crumb +- jbyte old_tag = old_cp->tag_at(old_i).value(); +- switch (old_tag) { +- case JVM_CONSTANT_Class: +- case JVM_CONSTANT_UnresolvedClass: +- // revert the copy to JVM_CONSTANT_UnresolvedClass +- // May be resolving while calling this so do the same for +- // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) +- (*merge_cp_p)->unresolved_klass_at_put(old_i, +- old_cp->klass_name_at(old_i)); +- break; +- +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // just copy the entry to *merge_cp_p, but double and long take +- // two constant pool entries +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- old_i++; +- break; +- +- default: +- // just copy the entry to *merge_cp_p +- constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); +- break; +- } +- } // end for each old_cp entry +- +- // We don't need to sanity check that *merge_cp_length_p is within +- // *merge_cp_p bounds since we have the minimum on-entry check above. +- (*merge_cp_length_p) = old_i; +- } +- +- // merge_cp_len should be the same as old_cp->length() at this point +- // so this trace message is really a "warm-and-breathing" message. +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 0: merge_cp_len=%d", *merge_cp_length_p)); +- +- int scratch_i; // index into scratch_cp +- { +- // Pass 1a: +- // Compare scratch_cp entries to the old_cp entries that we have +- // already copied to *merge_cp_p. In this pass, we are eliminating +- // exact duplicates (matching entry at same index) so we only +- // compare entries in the common indice range. +- int increment = 1; +- int pass1a_length = MIN2(old_cp->length(), scratch_cp->length()); +- for (scratch_i = 1; scratch_i < pass1a_length; scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- bool match = scratch_cp->compare_entry_to(scratch_i, *merge_cp_p, +- scratch_i, CHECK_0); +- if (match) { +- // found a match at the same index so nothing more to do +- continue; +- } else if (is_unresolved_class_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved class entry at the same index +- // with the same string value. Since Pass 0 reverted any +- // class entries to unresolved class entries in *merge_cp_p, +- // we go with the unresolved class entry. +- continue; +- } else if (is_unresolved_string_mismatch(scratch_cp, scratch_i, +- *merge_cp_p, scratch_i)) { +- // The mismatch in compare_entry_to() above is because of a +- // resolved versus unresolved string entry at the same index +- // with the same string value. We can live with whichever +- // happens to be at scratch_i in *merge_cp_p. +- continue; +- } +- +- int found_i = scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, +- CHECK_0); +- if (found_i != 0) { +- guarantee(found_i != scratch_i, +- "compare_entry_to() and find_matching_entry() do not agree"); +- +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // The find_matching_entry() call above could fail to find a match +- // due to a resolved versus unresolved class or string entry situation +- // like we solved above with the is_unresolved_*_mismatch() calls. +- // However, we would have to call is_unresolved_*_mismatch() over +- // all of *merge_cp_p (potentially) and that doesn't seem to be +- // worth the time. +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1a: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- +- if (scratch_i < scratch_cp->length()) { +- // Pass 1b: +- // old_cp is smaller than scratch_cp so there are entries in +- // scratch_cp that we have not yet processed. We take care of +- // those now. +- int increment = 1; +- for (; scratch_i < scratch_cp->length(); scratch_i += increment) { +- switch (scratch_cp->tag_at(scratch_i).value()) { +- case JVM_CONSTANT_Double: +- case JVM_CONSTANT_Long: +- // double and long take two constant pool entries +- increment = 2; +- break; +- +- default: +- increment = 1; +- break; +- } +- +- int found_i = +- scratch_cp->find_matching_entry(scratch_i, *merge_cp_p, CHECK_0); +- if (found_i != 0) { +- // Found a matching entry somewhere else in *merge_cp_p so +- // just need a mapping entry. +- map_index(scratch_cp, scratch_i, found_i); +- continue; +- } +- +- // No match found so we have to append this entry and any unique +- // referenced entries to *merge_cp_p. +- append_entry(scratch_cp, scratch_i, merge_cp_p, merge_cp_length_p, +- CHECK_0); +- } +- +- RC_TRACE_WITH_THREAD(0x00020000, THREAD, +- ("after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d", +- *merge_cp_length_p, scratch_i, _index_map_count)); +- } +- +- return true; +-} // end merge_constant_pools() +- +- +-// Merge constant pools between the_class and scratch_class and +-// potentially rewrite bytecodes in scratch_class to use the merged +-// constant pool. +-jvmtiError VM_RedefineClasses::merge_cp_and_rewrite( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class, +- TRAPS) { +- // worst case merged constant pool length is old and new combined +- int merge_cp_length = the_class->constants()->length() +- + scratch_class->constants()->length(); +- +- constantPoolHandle old_cp(THREAD, the_class->constants()); +- constantPoolHandle scratch_cp(THREAD, scratch_class->constants()); +- +- // Constant pools are not easily reused so we allocate a new one +- // each time. +- // merge_cp is created unsafe for concurrent GC processing. It +- // should be marked safe before discarding it. Even though +- // garbage, if it crosses a card boundary, it may be scanned +- // in order to find the start of the first complete object on the card. +- constantPoolHandle merge_cp(THREAD, +- oopFactory::new_constantPool(merge_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- int orig_length = old_cp->orig_length(); +- if (orig_length == 0) { +- // This old_cp is an actual original constant pool. We save +- // the original length in the merged constant pool so that +- // merge_constant_pools() can be more efficient. If a constant +- // pool has a non-zero orig_length() value, then that constant +- // pool was created by a merge operation in RedefineClasses. +- merge_cp->set_orig_length(old_cp->length()); +- } else { +- // This old_cp is a merged constant pool from a previous +- // RedefineClasses() calls so just copy the orig_length() +- // value. +- merge_cp->set_orig_length(old_cp->orig_length()); +- } +- +- ResourceMark rm(THREAD); +- _index_map_count = 0; +- _index_map_p = new intArray(scratch_cp->length(), -1); +- +- bool result = merge_constant_pools(old_cp, scratch_cp, &merge_cp, +- &merge_cp_length, THREAD); +- if (!result) { +- // The merge can fail due to memory allocation failure or due +- // to robustness checks. +- return JVMTI_ERROR_INTERNAL; +- } +- +- RC_TRACE_WITH_THREAD(0x00010000, THREAD, +- ("merge_cp_len=%d, index_map_len=%d", merge_cp_length, _index_map_count)); +- +- if (_index_map_count == 0) { +- // there is nothing to map between the new and merged constant pools +- +- if (old_cp->length() == scratch_cp->length()) { +- // The old and new constant pools are the same length and the +- // index map is empty. This means that the three constant pools +- // are equivalent (but not the same). Unfortunately, the new +- // constant pool has not gone through link resolution nor have +- // the new class bytecodes gone through constant pool cache +- // rewriting so we can't use the old constant pool with the new +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else if (old_cp->length() < scratch_cp->length()) { +- // The old constant pool has fewer entries than the new constant +- // pool and the index map is empty. This means the new constant +- // pool is a superset of the old constant pool. However, the old +- // class bytecodes have already gone through constant pool cache +- // rewriting so we can't use the new constant pool with the old +- // class. +- +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); // toss the merged constant pool +- } else { +- // The old constant pool has more entries than the new constant +- // pool and the index map is empty. This means that both the old +- // and merged constant pools are supersets of the new constant +- // pool. +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool; the previous new constant pool will +- // get GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- // drop local ref to the merged constant pool +- merge_cp()->set_is_conc_safe(true); +- merge_cp = constantPoolHandle(); +- } +- } else { +- if (RC_TRACE_ENABLED(0x00040000)) { +- // don't want to loop unless we are tracing +- int count = 0; +- for (int i = 1; i < _index_map_p->length(); i++) { +- int value = _index_map_p->at(i); +- +- if (value != -1) { +- RC_TRACE_WITH_THREAD(0x00040000, THREAD, +- ("index_map[%d]: old=%d new=%d", count, i, value)); +- count++; +- } +- } +- } +- +- // We have entries mapped between the new and merged constant pools +- // so we have to rewrite some constant pool references. +- if (!rewrite_cp_refs(scratch_class, THREAD)) { +- return JVMTI_ERROR_INTERNAL; +- } +- +- // Replace the new constant pool with a shrunken copy of the +- // merged constant pool so now the rewritten bytecodes have +- // valid references; the previous new constant pool will get +- // GCed. +- set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, +- THREAD); +- merge_cp()->set_is_conc_safe(true); +- } +- assert(old_cp()->is_conc_safe(), "Just checking"); +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +- +- return JVMTI_ERROR_NONE; +-} // end merge_cp_and_rewrite() +- +- +-// Rewrite constant pool references in klass scratch_class. +-bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class, +- TRAPS) { +- +- // rewrite constant pool references in the methods: +- if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the class_annotations: +- if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the fields_annotations: +- if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_annotations: +- if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_parameter_annotations: +- if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- // rewrite constant pool references in the methods_default_annotations: +- if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, +- THREAD)) { +- // propagate failure back to caller +- return false; +- } +- +- return true; +-} // end rewrite_cp_refs() +- +- +-// Rewrite constant pool references in the methods. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods(THREAD, scratch_class->methods()); +- +- if (methods.is_null() || methods->length() == 0) { +- // no methods so nothing to do +- return true; +- } +- +- // rewrite constant pool references in the methods: +- for (int i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- methodHandle new_method; +- rewrite_cp_refs_in_method(method, &new_method, CHECK_false); +- if (!new_method.is_null()) { +- // the method has been replaced so save the new method version +- methods->obj_at_put(i, new_method()); +- } +- } +- +- return true; +-} +- +- +-// Rewrite constant pool references in the specific method. This code +-// was adapted from Rewriter::rewrite_method(). +-void VM_RedefineClasses::rewrite_cp_refs_in_method(methodHandle method, +- methodHandle *new_method_p, TRAPS) { +- +- *new_method_p = methodHandle(); // default is no new method +- +- // We cache a pointer to the bytecodes here in code_base. If GC +- // moves the methodOop, then the bytecodes will also move which +- // will likely cause a crash. We create a No_Safepoint_Verifier +- // object to detect whether we pass a possible safepoint in this +- // code block. +- No_Safepoint_Verifier nsv; +- +- // Bytecodes and their length +- address code_base = method->code_base(); +- int code_length = method->code_size(); +- +- int bc_length; +- for (int bci = 0; bci < code_length; bci += bc_length) { +- address bcp = code_base + bci; +- Bytecodes::Code c = (Bytecodes::Code)(*bcp); +- +- bc_length = Bytecodes::length_for(c); +- if (bc_length == 0) { +- // More complicated bytecodes report a length of zero so +- // we have to try again a slightly different way. +- bc_length = Bytecodes::length_at(method(), bcp); +- } +- +- assert(bc_length != 0, "impossible bytecode length"); +- +- switch (c) { +- case Bytecodes::_ldc: +- { +- int cp_index = *(bcp + 1); +- int new_index = find_new_index(cp_index); +- +- if (StressLdcRewrite && new_index == 0) { +- // If we are stressing ldc -> ldc_w rewriting, then we +- // always need a new_index value. +- new_index = cp_index; +- } +- if (new_index != 0) { +- // the original index is mapped so we have more work to do +- if (!StressLdcRewrite && new_index <= max_jubyte) { +- // The new value can still use ldc instead of ldc_w +- // unless we are trying to stress ldc -> ldc_w rewriting +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- *(bcp + 1) = new_index; +- } else { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s->ldc_w@" INTPTR_FORMAT " old=%d, new=%d", +- Bytecodes::name(c), bcp, cp_index, new_index)); +- // the new value needs ldc_w instead of ldc +- u_char inst_buffer[4]; // max instruction size is 4 bytes +- bcp = (address)inst_buffer; +- // construct new instruction sequence +- *bcp = Bytecodes::_ldc_w; +- bcp++; +- // Rewriter::rewrite_method() does not rewrite ldc -> ldc_w. +- // See comment below for difference between put_Java_u2() +- // and put_native_u2(). +- Bytes::put_Java_u2(bcp, new_index); +- +- Relocator rc(method, NULL /* no RelocatorListener needed */); +- methodHandle m; +- { +- Pause_No_Safepoint_Verifier pnsv(&nsv); +- +- // ldc is 2 bytes and ldc_w is 3 bytes +- m = rc.insert_space_at(bci, 3, inst_buffer, THREAD); +- if (m.is_null() || HAS_PENDING_EXCEPTION) { +- guarantee(false, "insert_space_at() failed"); +- } +- } +- +- // return the new method so that the caller can update +- // the containing class +- *new_method_p = method = m; +- // switch our bytecode processing loop from the old method +- // to the new method +- code_base = method->code_base(); +- code_length = method->code_size(); +- bcp = code_base + bci; +- c = (Bytecodes::Code)(*bcp); +- bc_length = Bytecodes::length_for(c); +- assert(bc_length != 0, "sanity check"); +- } // end we need ldc_w instead of ldc +- } // end if there is a mapped index +- } break; +- +- // these bytecodes have a two-byte constant pool index +- case Bytecodes::_anewarray : // fall through +- case Bytecodes::_checkcast : // fall through +- case Bytecodes::_getfield : // fall through +- case Bytecodes::_getstatic : // fall through +- case Bytecodes::_instanceof : // fall through +- case Bytecodes::_invokeinterface: // fall through +- case Bytecodes::_invokespecial : // fall through +- case Bytecodes::_invokestatic : // fall through +- case Bytecodes::_invokevirtual : // fall through +- case Bytecodes::_ldc_w : // fall through +- case Bytecodes::_ldc2_w : // fall through +- case Bytecodes::_multianewarray : // fall through +- case Bytecodes::_new : // fall through +- case Bytecodes::_putfield : // fall through +- case Bytecodes::_putstatic : +- { +- address p = bcp + 1; +- int cp_index = Bytes::get_Java_u2(p); +- int new_index = find_new_index(cp_index); +- if (new_index != 0) { +- // the original index is mapped so update w/ new value +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("%s@" INTPTR_FORMAT " old=%d, new=%d", Bytecodes::name(c), +- bcp, cp_index, new_index)); +- // Rewriter::rewrite_method() uses put_native_u2() in this +- // situation because it is reusing the constant pool index +- // location for a native index into the constantPoolCache. +- // Since we are updating the constant pool index prior to +- // verification and constantPoolCache initialization, we +- // need to keep the new index in Java byte order. +- Bytes::put_Java_u2(p, new_index); +- } +- } break; ++ u2 num = the_class->next_method_idnum(); ++ if (num == constMethodOopDesc::UNSET_IDNUM) { ++ // cannot add any more methods ++ result = result | Klass::ModifyClass; ++ } ++ u2 new_num = k_new_method->method_idnum(); ++ methodOop idnum_owner = new_class->method_with_idnum(num); ++ if (idnum_owner != NULL) { ++ // There is already a method assigned this idnum -- switch them ++ idnum_owner->set_method_idnum(new_num); ++ } ++ k_new_method->set_method_idnum(num); ++ swap_all_method_annotations(new_num, num, new_class); + } +- } // end for each bytecode +-} // end rewrite_cp_refs_in_method() +- +- +-// Rewrite constant pool references in the class_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ TRACE_RC1("Method added: new: %s [%d]", ++ k_new_method->name_and_sig_as_C_string(), ni); ++ ++ni; // advance to next new method ++ break; ++ case deleted: ++ // method deleted, see if it is OK ++ old_flags = (jushort) k_old_method->access_flags().get_flags(); ++ if ((old_flags & JVM_ACC_PRIVATE) == 0 ++ // hack: private should be treated as final, but alas ++ || (old_flags & (JVM_ACC_FINAL|JVM_ACC_STATIC)) == 0 ++ ) { ++ // deleted methods must be private ++ result = result | Klass::ModifyClass; ++ } ++ TRACE_RC1("Method deleted: old: %s [%d]", ++ k_old_method->name_and_sig_as_C_string(), oi); ++ ++oi; // advance to next old method ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } + +- typeArrayHandle class_annotations(THREAD, +- scratch_class->class_annotations()); +- if (class_annotations.is_null() || class_annotations->length() == 0) { +- // no class_annotations so nothing to do +- return true; ++ if (new_class()->size() != new_class->old_version()->size()) { ++ result |= Klass::ModifyClassSize; + } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_annotations length=%d", class_annotations->length())); ++ if (new_class->size_helper() != ((instanceKlass*)(new_class->old_version()->klass_part()))->size_helper()) { ++ result |= Klass::ModifyInstanceSize; ++ } + +- int byte_i = 0; // byte index into class_annotations +- return rewrite_cp_refs_in_annotations_typeArray(class_annotations, byte_i, +- THREAD); ++ // (tw) Check method bodies to be able to return NoChange? ++ return result; + } + ++void VM_RedefineClasses::calculate_instance_update_information(klassOop new_version) { + +-// Rewrite constant pool references in an annotations typeArray. This +-// "structure" is adapted from the RuntimeVisibleAnnotations_attribute +-// that is described in section 4.8.15 of the 2nd-edition of the VM spec: +-// +-// annotations_typeArray { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for num_annotations field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for num_annotations field")); +- return false; +- } ++ class CalculateFieldUpdates : public FieldClosure { + +- u2 num_annotations = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; ++ private: ++ instanceKlass* _old_ik; ++ GrowableArray<int> _update_info; ++ int _position; ++ bool _copy_backwards; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_annotations=%d", num_annotations)); ++ public: + +- int calc_num_annotations = 0; +- for (; calc_num_annotations < num_annotations; calc_num_annotations++) { +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad annotation_struct at %d", calc_num_annotations)); +- // propagate failure back to caller +- return false; ++ bool does_copy_backwards() { ++ return _copy_backwards; + } +- } +- assert(num_annotations == calc_num_annotations, "sanity check"); + +- return true; +-} // end rewrite_cp_refs_in_annotations_typeArray() ++ CalculateFieldUpdates(instanceKlass* old_ik) : ++ _old_ik(old_ik), _position(instanceOopDesc::base_offset_in_bytes()), _copy_backwards(false) { ++ _update_info.append(_position); ++ _update_info.append(0); ++ } + ++ GrowableArray<int> &finish() { ++ _update_info.append(0); ++ return _update_info; ++ } + +-// Rewrite constant pool references in the annotation struct portion of +-// an annotations_typeArray. This "structure" is from section 4.8.15 of +-// the 2nd-edition of the VM spec: +-// +-// struct annotation { +-// u2 type_index; +-// u2 num_element_value_pairs; +-// { +-// u2 element_name_index; +-// element_value value; +-// } element_value_pairs[num_element_value_pairs]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { +- if ((byte_i_ref + 2 + 2) > annotations_typeArray->length()) { +- // not enough room for smallest annotation_struct +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for annotation_struct")); +- return false; +- } ++ void do_field(fieldDescriptor* fd) { ++ int alignment = fd->offset() - _position; ++ if (alignment > 0) { ++ // This field was aligned, so we need to make sure that we fill the gap ++ fill(alignment); ++ } + +- u2 type_index = rewrite_cp_ref_in_annotation_data(annotations_typeArray, +- byte_i_ref, "mapped old type_index=%d", THREAD); ++ assert(_position == fd->offset(), "must be correct offset!"); + +- u2 num_element_value_pairs = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr( +- byte_i_ref)); +- byte_i_ref += 2; ++ fieldDescriptor old_fd; ++ if (_old_ik->find_field(fd->name(), fd->signature(), false, &old_fd) != NULL) { ++ // Found field in the old class, copy ++ copy(old_fd.offset(), type2aelembytes(fd->field_type())); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_index=%d num_element_value_pairs=%d", type_index, +- num_element_value_pairs)); ++ if (old_fd.offset() < fd->offset()) { ++ _copy_backwards = true; ++ } + +- int calc_num_element_value_pairs = 0; +- for (; calc_num_element_value_pairs < num_element_value_pairs; +- calc_num_element_value_pairs++) { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for another element_name_index, let alone +- // the rest of another component +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for element_name_index")); +- return false; ++ // Transfer special flags ++ fd->set_is_field_modification_watched(old_fd.is_field_modification_watched()); ++ fd->set_is_field_access_watched(old_fd.is_field_access_watched()); ++ } else { ++ // New field, fill ++ fill(type2aelembytes(fd->field_type())); ++ } + } + +- u2 element_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old element_name_index=%d", THREAD); ++ private: + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("element_name_index=%d", element_name_index)); +- +- if (!rewrite_cp_refs_in_element_value(annotations_typeArray, +- byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad element_value at %d", calc_num_element_value_pairs)); +- // propagate failure back to caller +- return false; ++ void fill(int size) { ++ if (_update_info.length() > 0 && _update_info.at(_update_info.length() - 1) < 0) { ++ (*_update_info.adr_at(_update_info.length() - 1)) -= size; ++ } else { ++ _update_info.append(-size); ++ } ++ _position += size; + } +- } // end for each component +- assert(num_element_value_pairs == calc_num_element_value_pairs, +- "sanity check"); +- +- return true; +-} // end rewrite_cp_refs_in_annotation_struct() +- +- +-// Rewrite a constant pool reference at the current position in +-// annotations_typeArray if needed. Returns the original constant +-// pool reference if a rewrite was not needed or the new constant +-// pool reference if a rewrite was needed. +-u2 VM_RedefineClasses::rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS) { +- +- address cp_index_addr = (address) +- annotations_typeArray->byte_at_addr(byte_i_ref); +- u2 old_cp_index = Bytes::get_Java_u2(cp_index_addr); +- u2 new_cp_index = find_new_index(old_cp_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, (trace_mesg, old_cp_index)); +- Bytes::put_Java_u2(cp_index_addr, new_cp_index); +- old_cp_index = new_cp_index; +- } +- byte_i_ref += 2; +- return old_cp_index; +-} +- +- +-// Rewrite constant pool references in the element_value portion of an +-// annotations_typeArray. This "structure" is from section 4.8.15.1 of +-// the 2nd-edition of the VM spec: +-// +-// struct element_value { +-// u1 tag; +-// union { +-// u2 const_value_index; +-// { +-// u2 type_name_index; +-// u2 const_name_index; +-// } enum_const_value; +-// u2 class_info_index; +-// annotation annotation_value; +-// struct { +-// u2 num_values; +-// element_value values[num_values]; +-// } array_value; +-// } value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_element_value( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS) { + +- if ((byte_i_ref + 1) > annotations_typeArray->length()) { +- // not enough room for a tag let alone the rest of an element_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a tag")); +- return false; +- } ++ void copy(int offset, int size) { ++ int prev_end = -1; ++ if (_update_info.length() > 0 && _update_info.at(_update_info.length() - 1) > 0) { ++ prev_end = _update_info.at(_update_info.length() - 2) + _update_info.at(_update_info.length() - 1); ++ } + +- u1 tag = annotations_typeArray->byte_at(byte_i_ref); +- byte_i_ref++; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("tag='%c'", tag)); +- +- switch (tag) { +- // These BaseType tag values are from Table 4.2 in VM spec: +- case 'B': // byte +- case 'C': // char +- case 'D': // double +- case 'F': // float +- case 'I': // int +- case 'J': // long +- case 'S': // short +- case 'Z': // boolean +- +- // The remaining tag values are from Table 4.8 in the 2nd-edition of +- // the VM spec: +- case 's': +- { +- // For the above tag values (including the BaseType values), +- // value.const_value_index is right union field. +- +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a const_value_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a const_value_index")); +- return false; ++ if (prev_end == offset) { ++ (*_update_info.adr_at(_update_info.length() - 2)) += size; ++ } else { ++ _update_info.append(size); ++ _update_info.append(offset); + } + +- u2 const_value_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_value_index=%d", THREAD); ++ _position += size; ++ } ++ }; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("const_value_index=%d", const_value_index)); +- } break; ++ instanceKlass* ik = instanceKlass::cast(new_version); ++ instanceKlass* old_ik = instanceKlass::cast(new_version->klass_part()->old_version()); ++ CalculateFieldUpdates cl(old_ik); ++ ik->do_nonstatic_fields(&cl); + +- case 'e': +- { +- // for the above tag value, value.enum_const_value is right union field ++ GrowableArray<int> result = cl.finish(); ++ ik->store_update_information(result); ++ ik->set_copying_backwards(cl.does_copy_backwards()); + +- if ((byte_i_ref + 4) > annotations_typeArray->length()) { +- // not enough room for a enum_const_value +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a enum_const_value")); +- return false; ++ IF_TRACE_RC2 { ++ TRACE_RC2("Instance update information for %s:", new_version->klass_part()->name()->as_C_string()); ++ if (cl.does_copy_backwards()) { ++ TRACE_RC2("\tDoes copy backwards!"); ++ } ++ for (int i=0; i<result.length(); i++) { ++ int curNum = result.at(i); ++ if (curNum < 0) { ++ TRACE_RC2("\t%d CLEAN", curNum); ++ } else if (curNum > 0) { ++ TRACE_RC2("\t%d COPY from %d", curNum, result.at(i + 1)); ++ i++; ++ } else { ++ TRACE_RC2("\tEND"); + } ++ } ++ } ++} + +- u2 type_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old type_name_index=%d", THREAD); +- +- u2 const_name_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old const_name_index=%d", THREAD); ++void VM_RedefineClasses::rollback() { ++ TRACE_RC1("Rolling back redefinition!"); ++ SystemDictionary::rollback_redefinition(); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("type_name_index=%d const_name_index=%d", type_name_index, +- const_name_index)); +- } break; ++ TRACE_RC1("After rolling back system dictionary!"); ++ for (int i=0; i<_new_classes->length(); i++) { ++ SystemDictionary::remove_from_hierarchy(_new_classes->at(i)); ++ } + +- case 'c': +- { +- // for the above tag value, value.class_info_index is right union field ++ for (int i=0; i<_new_classes->length(); i++) { ++ instanceKlassHandle new_class = _new_classes->at(i); ++ new_class->set_redefining(false); ++ new_class->old_version()->klass_part()->set_new_version(NULL); ++ new_class->set_old_version(NULL); ++ } + +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a class_info_index +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a class_info_index")); +- return false; +- } ++} + +- u2 class_info_index = rewrite_cp_ref_in_annotation_data( +- annotations_typeArray, byte_i_ref, +- "mapped old class_info_index=%d", THREAD); +- +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("class_info_index=%d", class_info_index)); +- } break; +- +- case '@': +- // For the above tag value, value.attr_value is the right union +- // field. This is a nested annotation. +- if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, +- byte_i_ref, THREAD)) { +- // propagate failure back to caller +- return false; +- } +- break; ++void VM_RedefineClasses::swap_marks(oop first, oop second) { ++ markOop first_mark = first->mark(); ++ markOop second_mark = second->mark(); ++ first->set_mark(second_mark); ++ second->set_mark(first_mark); ++} + +- case '[': +- { +- if ((byte_i_ref + 2) > annotations_typeArray->length()) { +- // not enough room for a num_values field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_values field")); +- return false; +- } + +- // For the above tag value, value.array_value is the right union +- // field. This is an array of nested element_value. +- u2 num_values = Bytes::get_Java_u2((address) +- annotations_typeArray->byte_at_addr(byte_i_ref)); +- byte_i_ref += 2; +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("num_values=%d", num_values)); +- +- int calc_num_values = 0; +- for (; calc_num_values < num_values; calc_num_values++) { +- if (!rewrite_cp_refs_in_element_value( +- annotations_typeArray, byte_i_ref, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad nested element_value at %d", calc_num_values)); +- // propagate failure back to caller +- return false; ++class FieldCopier : public FieldClosure { ++ public: ++ void do_field(fieldDescriptor* fd) { ++ instanceKlass* cur = instanceKlass::cast(fd->field_holder()); ++ oop cur_oop = cur->java_mirror(); ++ ++ instanceKlass* old = instanceKlass::cast(cur->old_version()); ++ oop old_oop = old->java_mirror(); ++ ++ fieldDescriptor result; ++ bool found = old->find_local_field(fd->name(), fd->signature(), &result); ++ if (found && result.is_static()) { ++ TRACE_RC3("Copying static field value for field %s old_offset=%d new_offset=%d", ++ fd->name()->as_C_string(), result.offset(), fd->offset()); ++ memcpy(cur_oop->obj_field_addr<HeapWord>(fd->offset()), ++ old_oop->obj_field_addr<HeapWord>(result.offset()), ++ type2aelembytes(fd->field_type())); ++ ++ // Static fields may have references to java.lang.Class ++ if (fd->field_type() == T_OBJECT) { ++ oop oop = cur_oop->obj_field(fd->offset()); ++ if (oop != NULL && oop->is_instanceMirror()) { ++ klassOop klass = java_lang_Class::as_klassOop(oop); ++ if (klass != NULL && klass->klass_part()->oop_is_instance()) { ++ assert(oop == instanceKlass::cast(klass)->java_mirror(), "just checking"); ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = instanceKlass::cast(klass->klass_part()->new_version())->java_mirror(); ++ ++ cur_oop->obj_field_put(fd->offset(), oop); ++ } ++ } + } + } +- assert(num_values == calc_num_values, "sanity check"); +- } break; +- +- default: +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("bad tag=0x%x", tag)); +- return false; +- } // end decode tag field +- +- return true; +-} // end rewrite_cp_refs_in_element_value() +- +- +-// Rewrite constant pool references in a fields_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle fields_annotations(THREAD, +- scratch_class->fields_annotations()); ++ } ++ } ++}; + +- if (fields_annotations.is_null() || fields_annotations->length() == 0) { +- // no fields_annotations so nothing to do +- return true; ++void VM_RedefineClasses::mark_as_scavengable(nmethod* nm) { ++ if (!nm->on_scavenge_root_list()) { ++ CodeCache::add_scavenge_root_nmethod(nm); + } ++} + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("fields_annotations length=%d", fields_annotations->length())); ++struct StoreBarrier { ++ template <class T> static void oop_store(T* p, oop v) { ::oop_store(p, v); } ++}; + +- for (int i = 0; i < fields_annotations->length(); i++) { +- typeArrayHandle field_annotations(THREAD, +- (typeArrayOop)fields_annotations->obj_at(i)); +- if (field_annotations.is_null() || field_annotations->length() == 0) { +- // this field does not have any annotations so skip it +- continue; +- } ++struct StoreNoBarrier { ++ template <class T> static void oop_store(T* p, oop v) { oopDesc::encode_store_heap_oop_not_null(p, v); } ++}; + +- int byte_i = 0; // byte index into field_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(field_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad field_annotations at %d", i)); +- // propagate failure back to caller +- return false; ++template <class S> ++class ChangePointersOopClosure : public OopClosureNoHeader { ++ // Forward pointers to instanceKlass and mirror class to new versions ++ template <class T> ++ inline void do_oop_work(T* p) { ++ oop oop = oopDesc::load_decode_heap_oop(p); ++ if (oop == NULL) { ++ return; ++ } ++ if (oop->is_instanceKlass()) { ++ klassOop klass = (klassOop) oop; ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = klass->klass_part()->new_version(); ++ S::oop_store(p, oop); ++ } ++ } else if (oop->is_instanceMirror()) { ++ klassOop klass = java_lang_Class::as_klassOop(oop); ++ if (klass != NULL && klass->klass_part()->oop_is_instance()) { ++ assert(oop == instanceKlass::cast(klass)->java_mirror(), "just checking"); ++ if (klass->klass_part()->new_version() != NULL) { ++ oop = instanceKlass::cast(klass->klass_part()->new_version())->java_mirror(); ++ S::oop_store(p, oop); ++ } ++ } + } + } + +- return true; +-} // end rewrite_cp_refs_in_fields_annotations() +- +- +-// Rewrite constant pool references in a methods_annotations field. +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS) { +- +- objArrayHandle methods_annotations(THREAD, +- scratch_class->methods_annotations()); ++ virtual void do_oop(oop* o) { ++ do_oop_work(o); ++ } + +- if (methods_annotations.is_null() || methods_annotations->length() == 0) { +- // no methods_annotations so nothing to do +- return true; ++ virtual void do_oop(narrowOop* o) { ++ do_oop_work(o); + } ++}; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_annotations length=%d", methods_annotations->length())); ++void VM_RedefineClasses::doit() { ++ Thread *thread = Thread::current(); ++ ++ TRACE_RC1("Entering doit!"); + +- for (int i = 0; i < methods_annotations->length(); i++) { +- typeArrayHandle method_annotations(THREAD, +- (typeArrayOop)methods_annotations->obj_at(i)); +- if (method_annotations.is_null() || method_annotations->length() == 0) { +- // this method does not have any annotations so skip it +- continue; +- } ++ assert((_max_redefinition_flags & Klass::RemoveSuperType) == 0, "removing super types not allowed"); + +- int byte_i = 0; // byte index into method_annotations +- if (!rewrite_cp_refs_in_annotations_typeArray(method_annotations, byte_i, +- THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_annotations at %d", i)); +- // propagate failure back to caller +- return false; ++ if (UseSharedSpaces) { ++ // Sharing is enabled so we remap the shared readonly space to ++ // shared readwrite, private just in case we need to redefine ++ // a shared class. We do the remap during the doit() phase of ++ // the safepoint to be safer. ++ if (!CompactingPermGenGen::remap_shared_readonly_as_readwrite()) { ++ TRACE_RC1("failed to remap shared readonly space to readwrite, private"); ++ _result = JVMTI_ERROR_INTERNAL; ++ return; + } + } ++ ++ RC_TIMER_START(_timer_prepare_redefinition); ++ for (int i = 0; i < _new_classes->length(); i++) { ++ redefine_single_class(_new_classes->at(i), thread); ++ } ++ ++ // Deoptimize all compiled code that depends on this class ++ flush_dependent_code(instanceKlassHandle(Thread::current(), (klassOop)NULL), Thread::current()); + +- return true; +-} // end rewrite_cp_refs_in_methods_annotations() ++ // Adjust constantpool caches and vtables for all classes ++ // that reference methods of the evolved class. ++ SystemDictionary::classes_do(adjust_cpool_cache, Thread::current()); + ++ RC_TIMER_STOP(_timer_prepare_redefinition); ++ RC_TIMER_START(_timer_heap_iteration); + +-// Rewrite constant pool references in a methods_parameter_annotations +-// field. This "structure" is adapted from the +-// RuntimeVisibleParameterAnnotations_attribute described in section +-// 4.8.17 of the 2nd-edition of the VM spec: +-// +-// methods_parameter_annotations_typeArray { +-// u1 num_parameters; +-// { +-// u2 num_annotations; +-// annotation annotations[num_annotations]; +-// } parameter_annotations[num_parameters]; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ class ChangePointersObjectClosure : public ObjectClosure { + +- objArrayHandle methods_parameter_annotations(THREAD, +- scratch_class->methods_parameter_annotations()); ++ private: + +- if (methods_parameter_annotations.is_null() +- || methods_parameter_annotations->length() == 0) { +- // no methods_parameter_annotations so nothing to do +- return true; +- } ++ OopClosureNoHeader *_closure; ++ bool _needs_instance_update; ++ oop _tmp_obj; ++ int _tmp_obj_size; + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_parameter_annotations length=%d", +- methods_parameter_annotations->length())); ++ public: ++ ChangePointersObjectClosure(OopClosureNoHeader *closure) : _closure(closure), _needs_instance_update(false), _tmp_obj(NULL), _tmp_obj_size(0) {} + +- for (int i = 0; i < methods_parameter_annotations->length(); i++) { +- typeArrayHandle method_parameter_annotations(THREAD, +- (typeArrayOop)methods_parameter_annotations->obj_at(i)); +- if (method_parameter_annotations.is_null() +- || method_parameter_annotations->length() == 0) { +- // this method does not have any parameter annotations so skip it +- continue; +- } ++ bool needs_instance_update() { ++ return _needs_instance_update; ++ } + +- if (method_parameter_annotations->length() < 1) { +- // not enough room for a num_parameters field +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("length() is too small for a num_parameters field at %d", i)); +- return false; +- } ++ void copy_to_tmp(oop o) { ++ int size = o->size(); ++ if (_tmp_obj_size < size) { ++ _tmp_obj_size = size; ++ _tmp_obj = (oop)resource_allocate_bytes(size * HeapWordSize); ++ } ++ Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)_tmp_obj, size); ++ } + +- int byte_i = 0; // byte index into method_parameter_annotations ++ virtual void do_object(oop obj) { ++ if (obj->is_instanceKlass()) return; ++ if (obj->is_instanceMirror()) { ++ // static fields may have references to old java.lang.Class instances, update them ++ // at the same time, we don't want to update other oops in the java.lang.Class ++ // Causes SIGSEGV? ++ //instanceMirrorKlass::oop_fields_iterate(obj, _closure); ++ } else { ++ obj->oop_iterate(_closure); ++ } + +- u1 num_parameters = method_parameter_annotations->byte_at(byte_i); +- byte_i++; ++ if (obj->blueprint()->new_version() != NULL) { ++ Klass* new_klass = obj->blueprint()->new_version()->klass_part(); ++ if (obj->is_perm()) { ++ _needs_instance_update = true; ++ } else if(new_klass->update_information() != NULL) { ++ int size_diff = obj->size() - obj->size_given_klass(new_klass); ++ ++ // Either new size is bigger or gap is to small to be filled ++ if (size_diff < 0 || (size_diff > 0 && (size_t) size_diff < CollectedHeap::min_fill_size())) { ++ // We need an instance update => set back to old klass ++ _needs_instance_update = true; ++ } else { ++ oop src = obj; ++ if (new_klass->is_copying_backwards()) { ++ copy_to_tmp(obj); ++ src = _tmp_obj; ++ } ++ src->set_klass_no_check(obj->blueprint()->new_version()); ++ MarkSweep::update_fields(obj, src, new_klass->update_information()); + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("num_parameters=%d", num_parameters)); ++ if (size_diff > 0) { ++ HeapWord* dead_space = ((HeapWord *)obj) + obj->size(); ++ CollectedHeap::fill_with_object(dead_space, size_diff); ++ } ++ } ++ } else { ++ obj->set_klass_no_check(obj->blueprint()->new_version()); ++ } ++ } ++ } ++ }; ++ ++ ChangePointersOopClosure<StoreNoBarrier> oopClosureNoBarrier; ++ ChangePointersOopClosure<StoreBarrier> oopClosure; ++ ChangePointersObjectClosure objectClosure(&oopClosure); + +- int calc_num_parameters = 0; +- for (; calc_num_parameters < num_parameters; calc_num_parameters++) { +- if (!rewrite_cp_refs_in_annotations_typeArray( +- method_parameter_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad method_parameter_annotations at %d", calc_num_parameters)); +- // propagate failure back to caller +- return false; ++ { ++ // Since we may update oops inside nmethod's code blob to point to java.lang.Class in new generation, we need to ++ // make sure such references are properly recognized by GC. For that, If ScavengeRootsInCode is true, we need to ++ // mark such nmethod's as "scavengable". ++ // For now, mark all nmethod's as scavengable that are not scavengable already ++ if (ScavengeRootsInCode) { ++ CodeCache::nmethods_do(mark_as_scavengable); + } ++ ++ SharedHeap::heap()->gc_prologue(true); ++ Universe::heap()->object_iterate(&objectClosure); ++ Universe::root_oops_do(&oopClosureNoBarrier); ++ SharedHeap::heap()->gc_epilogue(false); + } +- assert(num_parameters == calc_num_parameters, "sanity check"); +- } + +- return true; +-} // end rewrite_cp_refs_in_methods_parameter_annotations() + ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur_oop = _new_classes->at(i)(); ++ instanceKlass* cur = instanceKlass::cast(cur_oop); ++ klassOop old_oop = cur->old_version(); ++ instanceKlass* old = instanceKlass::cast(old_oop); + +-// Rewrite constant pool references in a methods_default_annotations +-// field. This "structure" is adapted from the AnnotationDefault_attribute +-// that is described in section 4.8.19 of the 2nd-edition of the VM spec: +-// +-// methods_default_annotations_typeArray { +-// element_value default_value; +-// } +-// +-bool VM_RedefineClasses::rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS) { ++ // Swap marks to have same hashcodes ++ swap_marks(cur_oop, old_oop); ++ swap_marks(cur->java_mirror(), old->java_mirror()); + +- objArrayHandle methods_default_annotations(THREAD, +- scratch_class->methods_default_annotations()); ++ // Revert pool holder for old version of klass (it was updated by one of ours closure!) ++ old->constants()->set_pool_holder(old_oop); + +- if (methods_default_annotations.is_null() +- || methods_default_annotations->length() == 0) { +- // no methods_default_annotations so nothing to do +- return true; +- } + +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("methods_default_annotations length=%d", +- methods_default_annotations->length())); ++ if (old->array_klasses() != NULL) { ++ // Transfer the array classes, otherwise we might get cast exceptions when casting array types. ++ assert(cur->array_klasses() == NULL, "just checking"); ++ cur->set_array_klasses(old->array_klasses()); ++ } + +- for (int i = 0; i < methods_default_annotations->length(); i++) { +- typeArrayHandle method_default_annotations(THREAD, +- (typeArrayOop)methods_default_annotations->obj_at(i)); +- if (method_default_annotations.is_null() +- || method_default_annotations->length() == 0) { +- // this method does not have any default annotations so skip it +- continue; ++ // Initialize the new class! Special static initialization that does not execute the ++ // static constructor but copies static field values from the old class if name ++ // and signature of a static field match. ++ FieldCopier copier; ++ cur->do_local_static_fields(&copier); // TODO (tw): What about internal static fields?? ++ old->set_java_mirror(cur->java_mirror()); ++ ++ // Transfer init state ++ instanceKlass::ClassState state = old->init_state(); ++ if (state > instanceKlass::linked) { ++ cur->set_init_state(state); ++ } + } + +- int byte_i = 0; // byte index into method_default_annotations ++ RC_TIMER_STOP(_timer_heap_iteration); ++ RC_TIMER_START(_timer_redefinition); ++ if (objectClosure.needs_instance_update()){ + +- if (!rewrite_cp_refs_in_element_value( +- method_default_annotations, byte_i, THREAD)) { +- RC_TRACE_WITH_THREAD(0x02000000, THREAD, +- ("bad default element_value at %d", i)); +- // propagate failure back to caller +- return false; +- } ++ // Do a full garbage collection to update the instance sizes accordingly ++ TRACE_RC1("Before performing full GC!"); ++ Universe::set_redefining_gc_run(true); ++ notify_gc_begin(true); ++ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); ++ notify_gc_end(); ++ Universe::set_redefining_gc_run(false); ++ TRACE_RC1("GC done!"); + } + +- return true; +-} // end rewrite_cp_refs_in_methods_default_annotations() ++ // Unmark klassOops as "redefining" ++ for (int i=0; i<_new_classes->length(); i++) { ++ klassOop cur_klass = _new_classes->at(i)(); ++ instanceKlass* cur = (instanceKlass*)cur_klass->klass_part(); ++ cur->set_redefining(false); ++ cur->clear_update_information(); ++ } + ++ // Disable any dependent concurrent compilations ++ SystemDictionary::notice_modification(); + +-// Rewrite constant pool references in the method's stackmap table. +-// These "structures" are adapted from the StackMapTable_attribute that +-// is described in section 4.8.4 of the 6.0 version of the VM spec +-// (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// stack_map { +-// u2 number_of_entries; +-// stack_map_frame entries[number_of_entries]; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_stack_map_table( +- methodHandle method, TRAPS) { ++ // Set flag indicating that some invariants are no longer true. ++ // See jvmtiExport.hpp for detailed explanation. ++ JvmtiExport::set_has_redefined_a_class(); + +- if (!method->has_stackmap_table()) { +- return; +- } ++ // Clean up caches in the compiler interface and compiler threads ++ ciObjectFactory::resort_shared_ci_objects(); + +- typeArrayOop stackmap_data = method->stackmap_data(); +- address stackmap_p = (address)stackmap_data->byte_at_addr(0); +- address stackmap_end = stackmap_p + stackmap_data->length(); +- +- assert(stackmap_p + 2 <= stackmap_end, "no room for number_of_entries"); +- u2 number_of_entries = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("number_of_entries=%u", number_of_entries)); +- +- // walk through each stack_map_frame +- u2 calc_number_of_entries = 0; +- for (; calc_number_of_entries < number_of_entries; calc_number_of_entries++) { +- // The stack_map_frame structure is a u1 frame_type followed by +- // 0 or more bytes of data: +- // +- // union stack_map_frame { +- // same_frame; +- // same_locals_1_stack_item_frame; +- // same_locals_1_stack_item_frame_extended; +- // chop_frame; +- // same_frame_extended; +- // append_frame; +- // full_frame; +- // } +- +- assert(stackmap_p + 1 <= stackmap_end, "no room for frame_type"); +- // The Linux compiler does not like frame_type to be u1 or u2. It +- // issues the following warning for the first if-statement below: +- // +- // "warning: comparison is always true due to limited range of data type" +- // +- u4 frame_type = *stackmap_p; +- stackmap_p++; +- +- // same_frame { +- // u1 frame_type = SAME; /* 0-63 */ +- // } +- if (frame_type >= 0 && frame_type <= 63) { +- // nothing more to do for same_frame +- } +- +- // same_locals_1_stack_item_frame { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM; /* 64-127 */ +- // verification_type_info stack[1]; +- // } +- else if (frame_type >= 64 && frame_type <= 127) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // reserved for future use +- else if (frame_type >= 128 && frame_type <= 246) { +- // nothing more to do for reserved frame_types +- } +- +- // same_locals_1_stack_item_frame_extended { +- // u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */ +- // u2 offset_delta; +- // verification_type_info stack[1]; +- // } +- else if (frame_type == 247) { +- stackmap_p += 2; +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- +- // chop_frame { +- // u1 frame_type = CHOP; /* 248-250 */ +- // u2 offset_delta; +- // } +- else if (frame_type >= 248 && frame_type <= 250) { +- stackmap_p += 2; +- } +- +- // same_frame_extended { +- // u1 frame_type = SAME_FRAME_EXTENDED; /* 251*/ +- // u2 offset_delta; +- // } +- else if (frame_type == 251) { +- stackmap_p += 2; +- } +- +- // append_frame { +- // u1 frame_type = APPEND; /* 252-254 */ +- // u2 offset_delta; +- // verification_type_info locals[frame_type - 251]; +- // } +- else if (frame_type >= 252 && frame_type <= 254) { +- assert(stackmap_p + 2 <= stackmap_end, +- "no room for offset_delta"); +- stackmap_p += 2; +- u1 len = frame_type - 251; +- for (u1 i = 0; i < len; i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } ++#ifdef ASSERT + +- // full_frame { +- // u1 frame_type = FULL_FRAME; /* 255 */ +- // u2 offset_delta; +- // u2 number_of_locals; +- // verification_type_info locals[number_of_locals]; +- // u2 number_of_stack_items; +- // verification_type_info stack[number_of_stack_items]; +- // } +- else if (frame_type == 255) { +- assert(stackmap_p + 2 + 2 <= stackmap_end, +- "no room for smallest full_frame"); +- stackmap_p += 2; +- +- u2 number_of_locals = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; +- +- for (u2 locals_i = 0; locals_i < number_of_locals; locals_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } ++ // Universe::verify(); ++ // JNIHandles::verify(); + +- // Use the largest size for the number_of_stack_items, but only get +- // the right number of bytes. +- u2 number_of_stack_items = Bytes::get_Java_u2(stackmap_p); +- stackmap_p += 2; ++ SystemDictionary::classes_do(check_class, thread); ++#endif + +- for (u2 stack_i = 0; stack_i < number_of_stack_items; stack_i++) { +- rewrite_cp_refs_in_verification_type_info(stackmap_p, stackmap_end, +- calc_number_of_entries, frame_type, THREAD); +- } +- } +- } // end while there is a stack_map_frame +- assert(number_of_entries == calc_number_of_entries, "sanity check"); +-} // end rewrite_cp_refs_in_stack_map_table() ++ RC_TIMER_STOP(_timer_redefinition); + ++ if (TraceRedefineClasses > 0) { ++ tty->flush(); ++ } ++} + +-// Rewrite constant pool references in the verification type info +-// portion of the method's stackmap table. These "structures" are +-// adapted from the StackMapTable_attribute that is described in +-// section 4.8.4 of the 6.0 version of the VM spec (dated 2005.10.26): +-// file:///net/quincunx.sfbay/export/gbracha/ClassFile-Java6.pdf +-// +-// The verification_type_info structure is a u1 tag followed by 0 or +-// more bytes of data: +-// +-// union verification_type_info { +-// Top_variable_info; +-// Integer_variable_info; +-// Float_variable_info; +-// Long_variable_info; +-// Double_variable_info; +-// Null_variable_info; +-// UninitializedThis_variable_info; +-// Object_variable_info; +-// Uninitialized_variable_info; +-// } +-// +-void VM_RedefineClasses::rewrite_cp_refs_in_verification_type_info( +- address& stackmap_p_ref, address stackmap_end, u2 frame_i, +- u1 frame_type, TRAPS) { +- +- assert(stackmap_p_ref + 1 <= stackmap_end, "no room for tag"); +- u1 tag = *stackmap_p_ref; +- stackmap_p_ref++; +- +- switch (tag) { +- // Top_variable_info { +- // u1 tag = ITEM_Top; /* 0 */ +- // } +- // verificationType.hpp has zero as ITEM_Bogus instead of ITEM_Top +- case 0: // fall through +- +- // Integer_variable_info { +- // u1 tag = ITEM_Integer; /* 1 */ +- // } +- case ITEM_Integer: // fall through +- +- // Float_variable_info { +- // u1 tag = ITEM_Float; /* 2 */ +- // } +- case ITEM_Float: // fall through +- +- // Double_variable_info { +- // u1 tag = ITEM_Double; /* 3 */ +- // } +- case ITEM_Double: // fall through +- +- // Long_variable_info { +- // u1 tag = ITEM_Long; /* 4 */ +- // } +- case ITEM_Long: // fall through +- +- // Null_variable_info { +- // u1 tag = ITEM_Null; /* 5 */ +- // } +- case ITEM_Null: // fall through +- +- // UninitializedThis_variable_info { +- // u1 tag = ITEM_UninitializedThis; /* 6 */ +- // } +- case ITEM_UninitializedThis: +- // nothing more to do for the above tag types +- break; ++void VM_RedefineClasses::doit_epilogue() { + +- // Object_variable_info { +- // u1 tag = ITEM_Object; /* 7 */ +- // u2 cpool_index; +- // } +- case ITEM_Object: +- { +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for cpool_index"); +- u2 cpool_index = Bytes::get_Java_u2(stackmap_p_ref); +- u2 new_cp_index = find_new_index(cpool_index); +- if (new_cp_index != 0) { +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("mapped old cpool_index=%d", cpool_index)); +- Bytes::put_Java_u2(stackmap_p_ref, new_cp_index); +- cpool_index = new_cp_index; +- } +- stackmap_p_ref += 2; +- +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, cpool_index=%d", frame_i, +- frame_type, cpool_index)); +- } break; +- +- // Uninitialized_variable_info { +- // u1 tag = ITEM_Uninitialized; /* 8 */ +- // u2 offset; +- // } +- case ITEM_Uninitialized: +- assert(stackmap_p_ref + 2 <= stackmap_end, "no room for offset"); +- stackmap_p_ref += 2; +- break; ++ RC_TIMER_START(_timer_vm_op_epilogue); + +- default: +- RC_TRACE_WITH_THREAD(0x04000000, THREAD, +- ("frame_i=%u, frame_type=%u, bad tag=0x%x", frame_i, frame_type, tag)); +- ShouldNotReachHere(); +- break; +- } // end switch (tag) +-} // end rewrite_cp_refs_in_verification_type_info() +- +- +-// Change the constant pool associated with klass scratch_class to +-// scratch_cp. If shrink is true, then scratch_cp_length elements +-// are copied from scratch_cp to a smaller constant pool and the +-// smaller constant pool is associated with scratch_class. +-void VM_RedefineClasses::set_new_constant_pool( +- instanceKlassHandle scratch_class, constantPoolHandle scratch_cp, +- int scratch_cp_length, bool shrink, TRAPS) { +- assert(!shrink || scratch_cp->length() >= scratch_cp_length, "sanity check"); +- +- if (shrink) { +- // scratch_cp is a merged constant pool and has enough space for a +- // worst case merge situation. We want to associate the minimum +- // sized constant pool with the klass to save space. +- constantPoolHandle smaller_cp(THREAD, +- oopFactory::new_constantPool(scratch_cp_length, +- oopDesc::IsUnsafeConc, +- THREAD)); +- // preserve orig_length() value in the smaller copy +- int orig_length = scratch_cp->orig_length(); +- assert(orig_length != 0, "sanity check"); +- smaller_cp->set_orig_length(orig_length); +- scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); +- scratch_cp = smaller_cp; +- smaller_cp()->set_is_conc_safe(true); +- } +- +- // attach new constant pool to klass +- scratch_cp->set_pool_holder(scratch_class()); +- +- // attach klass to new constant pool +- scratch_class->set_constants(scratch_cp()); +- +- int i; // for portability +- +- // update each field in klass to use new constant pool indices as needed +- for (JavaFieldStream fs(scratch_class); !fs.done(); fs.next()) { +- jshort cur_index = fs.name_index(); +- jshort new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-name_index change: %d to %d", cur_index, new_index)); +- fs.set_name_index(new_index); +- } +- cur_index = fs.signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-signature_index change: %d to %d", cur_index, new_index)); +- fs.set_signature_index(new_index); +- } +- cur_index = fs.initval_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-initval_index change: %d to %d", cur_index, new_index)); +- fs.set_initval_index(new_index); +- } +- cur_index = fs.generic_signature_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("field-generic_signature change: %d to %d", cur_index, new_index)); +- fs.set_generic_signature_index(new_index); +- } +- } // end for each field +- +- // Update constant pool indices in the inner classes info to use +- // new constant indices as needed. The inner classes info is a +- // quadruple: +- // (inner_class_info, outer_class_info, inner_name, inner_access_flags) +- InnerClassesIterator iter(scratch_class); +- for (; !iter.done(); iter.next()) { +- int cur_index = iter.inner_class_info_index(); +- if (cur_index == 0) { +- continue; // JVM spec. allows null inner class refs so skip it +- } +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_class_info change: %d to %d", cur_index, new_index)); +- iter.set_inner_class_info_index(new_index); +- } +- cur_index = iter.outer_class_info_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("outer_class_info change: %d to %d", cur_index, new_index)); +- iter.set_outer_class_info_index(new_index); +- } +- cur_index = iter.inner_name_index(); +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("inner_name change: %d to %d", cur_index, new_index)); +- iter.set_inner_name_index(new_index); +- } +- } // end for each inner class +- +- // Attach each method in klass to the new constant pool and update +- // to use new constant pool indices as needed: +- objArrayHandle methods(THREAD, scratch_class->methods()); +- for (i = methods->length() - 1; i >= 0; i--) { +- methodHandle method(THREAD, (methodOop)methods->obj_at(i)); +- method->set_constants(scratch_cp()); +- +- int new_index = find_new_index(method->name_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-name_index change: %d to %d", method->name_index(), +- new_index)); +- method->set_name_index(new_index); +- } +- new_index = find_new_index(method->signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-signature_index change: %d to %d", +- method->signature_index(), new_index)); +- method->set_signature_index(new_index); +- } +- new_index = find_new_index(method->generic_signature_index()); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("method-generic_signature_index change: %d to %d", +- method->generic_signature_index(), new_index)); +- method->set_generic_signature_index(new_index); +- } +- +- // Update constant pool indices in the method's checked exception +- // table to use new constant indices as needed. +- int cext_length = method->checked_exceptions_length(); +- if (cext_length > 0) { +- CheckedExceptionElement * cext_table = +- method->checked_exceptions_start(); +- for (int j = 0; j < cext_length; j++) { +- int cur_index = cext_table[j].class_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("cext-class_cp_index change: %d to %d", cur_index, new_index)); +- cext_table[j].class_cp_index = (u2)new_index; +- } +- } // end for each checked exception table entry +- } // end if there are checked exception table entries +- +- // Update each catch type index in the method's exception table +- // to use new constant pool indices as needed. The exception table +- // holds quadruple entries of the form: +- // (beg_bci, end_bci, handler_bci, klass_index) +- +- ExceptionTable ex_table(method()); +- int ext_length = ex_table.length(); +- +- for (int j = 0; j < ext_length; j ++) { +- int cur_index = ex_table.catch_type_index(j); +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("ext-klass_index change: %d to %d", cur_index, new_index)); +- ex_table.set_catch_type_index(j, new_index); +- } +- } // end for each exception table entry +- +- // Update constant pool indices in the method's local variable +- // table to use new constant indices as needed. The local variable +- // table hold sextuple entries of the form: +- // (start_pc, length, name_index, descriptor_index, signature_index, slot) +- int lvt_length = method->localvariable_table_length(); +- if (lvt_length > 0) { +- LocalVariableTableElement * lv_table = +- method->localvariable_table_start(); +- for (int j = 0; j < lvt_length; j++) { +- int cur_index = lv_table[j].name_cp_index; +- int new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-name_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].name_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].descriptor_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-descriptor_cp_index change: %d to %d", cur_index, +- new_index)); +- lv_table[j].descriptor_cp_index = (u2)new_index; +- } +- cur_index = lv_table[j].signature_cp_index; +- new_index = find_new_index(cur_index); +- if (new_index != 0) { +- RC_TRACE_WITH_THREAD(0x00080000, THREAD, +- ("lvt-signature_cp_index change: %d to %d", cur_index, new_index)); +- lv_table[j].signature_cp_index = (u2)new_index; +- } +- } // end for each local variable table entry +- } // end if there are local variable table entries ++ //unlock_threads(); + +- rewrite_cp_refs_in_stack_map_table(method, THREAD); +- } // end for each method +- assert(scratch_cp()->is_conc_safe(), "Just checking"); +-} // end set_new_constant_pool() ++ ResourceMark mark; + ++ VM_GC_Operation::doit_epilogue(); ++ TRACE_RC1("GC Operation epilogue finished! "); + +-// Unevolving classes may point to methods of the_class directly +-// from their constant pool caches, itables, and/or vtables. We +-// use the SystemDictionary::classes_do() facility and this helper +-// to fix up these pointers. +-// +-// Note: We currently don't support updating the vtable in +-// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. +-void VM_RedefineClasses::adjust_cpool_cache_and_vtable(klassOop k_oop, +- oop initiating_loader, TRAPS) { +- Klass *k = k_oop->klass_part(); +- if (k->oop_is_instance()) { +- HandleMark hm(THREAD); +- instanceKlass *ik = (instanceKlass *) k; ++ // Free the array of scratch classes ++ delete _new_classes; ++ _new_classes = NULL; + +- // HotSpot specific optimization! HotSpot does not currently +- // support delegation from the bootstrap class loader to a +- // user-defined class loader. This means that if the bootstrap +- // class loader is the initiating class loader, then it will also +- // be the defining class loader. This also means that classes +- // loaded by the bootstrap class loader cannot refer to classes +- // loaded by a user-defined class loader. Note: a user-defined +- // class loader can delegate to the bootstrap class loader. +- // +- // If the current class being redefined has a user-defined class +- // loader as its defining class loader, then we can skip all +- // classes loaded by the bootstrap class loader. +- bool is_user_defined = +- instanceKlass::cast(_the_class_oop)->class_loader() != NULL; +- if (is_user_defined && ik->class_loader() == NULL) { +- return; +- } ++ // Free the array of affected classes ++ delete _affected_klasses; ++ _affected_klasses = NULL; + +- // This is a very busy routine. We don't want too much tracing +- // printed out. +- bool trace_name_printed = false; +- +- // Very noisy: only enable this call if you are trying to determine +- // that a specific class gets found by this routine. +- // RC_TRACE macro has an embedded ResourceMark +- // RC_TRACE_WITH_THREAD(0x00100000, THREAD, +- // ("adjust check: name=%s", ik->external_name())); +- // trace_name_printed = true; +- +- // Fix the vtable embedded in the_class and subclasses of the_class, +- // if one exists. We discard scratch_class and we don't keep an +- // instanceKlass around to hold obsolete methods so we don't have +- // any other instanceKlass embedded vtables to update. The vtable +- // holds the methodOops for virtual (but not final) methods. +- if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) { +- // ik->vtable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->vtable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // If the current class has an itable and we are either redefining an +- // interface or if the current class is a subclass of the_class, then +- // we potentially have to fix the itable. If we are redefining an +- // interface, then we have to call adjust_method_entries() for +- // every instanceKlass that has an itable since there isn't a +- // subclass relationship between an interface and an instanceKlass. +- if (ik->itable_length() > 0 && (Klass::cast(_the_class_oop)->is_interface() +- || ik->is_subclass_of(_the_class_oop))) { +- // ik->itable() creates a wrapper object; rm cleans it up +- ResourceMark rm(THREAD); +- ik->itable()->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- +- // The constant pools in other classes (other_cp) can refer to +- // methods in the_class. We have to update method information in +- // other_cp's cache. If other_cp has a previous version, then we +- // have to repeat the process for each previous version. The +- // constant pool cache holds the methodOops for non-virtual +- // methods and for virtual, final methods. +- // +- // Special case: if the current class is the_class, then new_cp +- // has already been attached to the_class and old_cp has already +- // been added as a previous version. The new_cp doesn't have any +- // cached references to old methods so it doesn't need to be +- // updated. We can simply start with the previous version(s) in +- // that case. +- constantPoolHandle other_cp; +- constantPoolCacheOop cp_cache; +- +- if (k_oop != _the_class_oop) { +- // this klass' constant pool cache may need adjustment +- other_cp = constantPoolHandle(ik->constants()); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- { +- ResourceMark rm(THREAD); +- // PreviousVersionInfo objects returned via PreviousVersionWalker +- // contain a GrowableArray of handles. We have to clean up the +- // GrowableArray _after_ the PreviousVersionWalker destructor +- // has destroyed the handles. +- { +- // the previous versions' constant pool caches may need adjustment +- PreviousVersionWalker pvw(ik); +- for (PreviousVersionInfo * pv_info = pvw.next_previous_version(); +- pv_info != NULL; pv_info = pvw.next_previous_version()) { +- other_cp = pv_info->prev_constant_pool_handle(); +- cp_cache = other_cp->cache(); +- if (cp_cache != NULL) { +- cp_cache->adjust_method_entries(_matching_old_methods, +- _matching_new_methods, +- _matching_methods_length, +- &trace_name_printed); +- } +- } +- } // pvw is cleaned up +- } // rm is cleaned up +- } +-} ++ TRACE_RC1("Redefinition finished!"); + +-void VM_RedefineClasses::update_jmethod_ids() { +- for (int j = 0; j < _matching_methods_length; ++j) { +- methodOop old_method = _matching_old_methods[j]; +- jmethodID jmid = old_method->find_jmethod_id_or_null(); +- if (jmid != NULL) { +- // There is a jmethodID, change it to point to the new method +- methodHandle new_method_h(_matching_new_methods[j]); +- JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); +- assert(JNIHandles::resolve_jmethod_id(jmid) == _matching_new_methods[j], +- "should be replaced"); +- } +- } ++ RC_TIMER_STOP(_timer_vm_op_epilogue); + } + +-void VM_RedefineClasses::check_methods_and_mark_as_obsolete( +- BitMap *emcp_methods, int * emcp_method_count_p) { +- *emcp_method_count_p = 0; +- int obsolete_count = 0; +- int old_index = 0; +- for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { +- methodOop old_method = _matching_old_methods[j]; +- methodOop new_method = _matching_new_methods[j]; +- methodOop old_array_method; +- +- // Maintain an old_index into the _old_methods array by skipping +- // deleted methods +- while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) +- != old_method) { +- ++old_index; +- } +- +- if (MethodComparator::methods_EMCP(old_method, new_method)) { +- // The EMCP definition from JSR-163 requires the bytecodes to be +- // the same with the exception of constant pool indices which may +- // differ. However, the constants referred to by those indices +- // must be the same. +- // +- // We use methods_EMCP() for comparison since constant pool +- // merging can remove duplicate constant pool entries that were +- // present in the old method and removed from the rewritten new +- // method. A faster binary comparison function would consider the +- // old and new methods to be different when they are actually +- // EMCP. +- // +- // The old and new methods are EMCP and you would think that we +- // could get rid of one of them here and now and save some space. +- // However, the concept of EMCP only considers the bytecodes and +- // the constant pool entries in the comparison. Other things, +- // e.g., the line number table (LNT) or the local variable table +- // (LVT) don't count in the comparison. So the new (and EMCP) +- // method can have a new LNT that we need so we can't just +- // overwrite the new method with the old method. +- // +- // When this routine is called, we have already attached the new +- // methods to the_class so the old methods are effectively +- // overwritten. However, if an old method is still executing, +- // then the old method cannot be collected until sometime after +- // the old method call has returned. So the overwriting of old +- // methods by new methods will save us space except for those +- // (hopefully few) old methods that are still executing. +- // +- // A method refers to a constMethodOop and this presents another +- // possible avenue to space savings. The constMethodOop in the +- // new method contains possibly new attributes (LNT, LVT, etc). +- // At first glance, it seems possible to save space by replacing +- // the constMethodOop in the old method with the constMethodOop +- // from the new method. The old and new methods would share the +- // same constMethodOop and we would save the space occupied by +- // the old constMethodOop. However, the constMethodOop contains +- // a back reference to the containing method. Sharing the +- // constMethodOop between two methods could lead to confusion in +- // the code that uses the back reference. This would lead to +- // brittle code that could be broken in non-obvious ways now or +- // in the future. +- // +- // Another possibility is to copy the constMethodOop from the new +- // method to the old method and then overwrite the new method with +- // the old method. Since the constMethodOop contains the bytecodes +- // for the method embedded in the oop, this option would change +- // the bytecodes out from under any threads executing the old +- // method and make the thread's bcp invalid. Since EMCP requires +- // that the bytecodes be the same modulo constant pool indices, it +- // is straight forward to compute the correct new bcp in the new +- // constMethodOop from the old bcp in the old constMethodOop. The +- // time consuming part would be searching all the frames in all +- // of the threads to find all of the calls to the old method. +- // +- // It looks like we will have to live with the limited savings +- // that we get from effectively overwriting the old methods +- // when the new methods are attached to the_class. +- +- // track which methods are EMCP for add_previous_version() call +- emcp_methods->set_bit(old_index); +- (*emcp_method_count_p)++; +- +- // An EMCP method is _not_ obsolete. An obsolete method has a +- // different jmethodID than the current method. An EMCP method +- // has the same jmethodID as the current method. Having the +- // same jmethodID for all EMCP versions of a method allows for +- // a consistent view of the EMCP methods regardless of which +- // EMCP method you happen to have in hand. For example, a +- // breakpoint set in one EMCP method will work for all EMCP +- // versions of the method including the current one. +- } else { +- // mark obsolete methods as such +- old_method->set_is_obsolete(); +- obsolete_count++; +- +- // obsolete methods need a unique idnum +- u2 num = instanceKlass::cast(_the_class_oop)->next_method_idnum(); +- if (num != constMethodOopDesc::UNSET_IDNUM) { +-// u2 old_num = old_method->method_idnum(); +- old_method->set_method_idnum(num); +-// TO DO: attach obsolete annotations to obsolete method's new idnum +- } +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); +- } +- old_method->set_is_old(); +- } +- for (int i = 0; i < _deleted_methods_length; ++i) { +- methodOop old_method = _deleted_methods[i]; +- +- assert(old_method->vtable_index() < 0, +- "cannot delete methods with vtable entries");; +- +- // Mark all deleted methods as old and obsolete +- old_method->set_is_old(); +- old_method->set_is_obsolete(); +- ++obsolete_count; +- // With tracing we try not to "yack" too much. The position of +- // this trace assumes there are fewer obsolete methods than +- // EMCP methods. +- RC_TRACE(0x00000100, ("mark deleted %s(%s) as obsolete", +- old_method->name()->as_C_string(), +- old_method->signature()->as_C_string())); +- } +- assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), +- "sanity check"); +- RC_TRACE(0x00000100, ("EMCP_cnt=%d, obsolete_cnt=%d", *emcp_method_count_p, +- obsolete_count)); ++bool VM_RedefineClasses::is_modifiable_class(oop klass_mirror) { ++ // classes for primitives cannot be redefined ++ if (java_lang_Class::is_primitive(klass_mirror)) { ++ return false; ++ } ++ klassOop the_class_oop = java_lang_Class::as_klassOop(klass_mirror); ++ // classes for arrays cannot be redefined ++ if (the_class_oop == NULL || !Klass::cast(the_class_oop)->oop_is_instance()) { ++ return false; ++ } ++ return true; + } + +-// This internal class transfers the native function registration from old methods +-// to new methods. It is designed to handle both the simple case of unchanged +-// native methods and the complex cases of native method prefixes being added and/or +-// removed. +-// It expects only to be used during the VM_RedefineClasses op (a safepoint). +-// +-// This class is used after the new methods have been installed in "the_class". +-// +-// So, for example, the following must be handled. Where 'm' is a method and +-// a number followed by an underscore is a prefix. +-// +-// Old Name New Name +-// Simple transfer to new method m -> m +-// Add prefix m -> 1_m +-// Remove prefix 1_m -> m +-// Simultaneous add of prefixes m -> 3_2_1_m +-// Simultaneous removal of prefixes 3_2_1_m -> m +-// Simultaneous add and remove 1_m -> 2_m +-// Same, caused by prefix removal only 3_2_1_m -> 3_2_m +-// +-class TransferNativeFunctionRegistration { +- private: +- instanceKlassHandle the_class; +- int prefix_count; +- char** prefixes; +- +- // Recursively search the binary tree of possibly prefixed method names. +- // Iteration could be used if all agents were well behaved. Full tree walk is +- // more resilent to agents not cleaning up intermediate methods. +- // Branch at each depth in the binary tree is: +- // (1) without the prefix. +- // (2) with the prefix. +- // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) +- methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, +- Symbol* signature) { +- TempNewSymbol name_symbol = SymbolTable::probe(name_str, (int)name_len); +- if (name_symbol != NULL) { +- methodOop method = Klass::cast(the_class())->lookup_method(name_symbol, signature); +- if (method != NULL) { +- // Even if prefixed, intermediate methods must exist. +- if (method->is_native()) { +- // Wahoo, we found a (possibly prefixed) version of the method, return it. +- return method; +- } +- if (depth < prefix_count) { +- // Try applying further prefixes (other than this one). +- method = search_prefix_name_space(depth+1, name_str, name_len, signature); +- if (method != NULL) { +- return method; // found +- } +- +- // Try adding this prefix to the method name and see if it matches +- // another method name. +- char* prefix = prefixes[depth]; +- size_t prefix_len = strlen(prefix); +- size_t trial_len = name_len + prefix_len; +- char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); +- strcpy(trial_name_str, prefix); +- strcat(trial_name_str, name_str); +- method = search_prefix_name_space(depth+1, trial_name_str, trial_len, +- signature); +- if (method != NULL) { +- // If found along this branch, it was prefixed, mark as such +- method->set_is_prefixed_native(); +- return method; // found +- } +- } +- } +- } +- return NULL; // This whole branch bore nothing ++#ifdef ASSERT ++ ++void VM_RedefineClasses::verify_classes(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ ++ instanceKlassHandle k_handle(THREAD, k_oop); ++ Verifier::verify(k_handle, Verifier::ThrowException, true, true, THREAD); ++ k_oop = k_oop->klass_part()->old_version(); + } ++} + +- // Return the method name with old prefixes stripped away. +- char* method_name_without_prefixes(methodOop method) { +- Symbol* name = method->name(); +- char* name_str = name->as_utf8(); ++#endif + +- // Old prefixing may be defunct, strip prefixes, if any. +- for (int i = prefix_count-1; i >= 0; i--) { +- char* prefix = prefixes[i]; +- size_t prefix_len = strlen(prefix); +- if (strncmp(prefix, name_str, prefix_len) == 0) { +- name_str += prefix_len; ++// Rewrite faster byte-codes back to their slower equivalent. Undoes rewriting happening in templateTable_xxx.cpp ++// The reason is that once we zero cpool caches, we need to re-resolve all entries again. Faster bytecodes do not ++// do that, they assume that cache entry is resolved already. ++void VM_RedefineClasses::unpatch_bytecode(methodOop method) { ++ RawBytecodeStream bcs(method); ++ Bytecodes::Code code; ++ Bytecodes::Code java_code; ++ while (!bcs.is_last_bytecode()) { ++ code = bcs.raw_next(); ++ address bcp = bcs.bcp(); ++ ++ if (code == Bytecodes::_breakpoint) { ++ int bci = method->bci_from(bcp); ++ code = method->orig_bytecode_at(bci); ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ // Let breakpoint table handling unpatch bytecode ++ method->set_orig_bytecode_at(bci, java_code); ++ } ++ } else { ++ java_code = Bytecodes::java_code(code); ++ if (code != java_code && ++ (java_code == Bytecodes::_getfield || ++ java_code == Bytecodes::_putfield || ++ java_code == Bytecodes::_aload_0)) { ++ *bcp = java_code; + } + } +- return name_str; +- } + +- // Strip any prefixes off the old native method, then try to find a +- // (possibly prefixed) new native that matches it. +- methodOop strip_and_search_for_new_native(methodOop method) { +- ResourceMark rm; +- char* name_str = method_name_without_prefixes(method); +- return search_prefix_name_space(0, name_str, strlen(name_str), +- method->signature()); ++ // Additionally, we need to unpatch bytecode at bcp+1 for fast_xaccess (which would be fast field access) ++ if (code == Bytecodes::_fast_iaccess_0 || code == Bytecodes::_fast_aaccess_0 || code == Bytecodes::_fast_faccess_0) { ++ Bytecodes::Code code2 = Bytecodes::code_or_bp_at(bcp + 1); ++ assert(code2 == Bytecodes::_fast_igetfield || ++ code2 == Bytecodes::_fast_agetfield || ++ code2 == Bytecodes::_fast_fgetfield, ""); ++ *(bcp + 1) = Bytecodes::java_code(code2); ++ } + } ++} + +- public: ++// Unevolving classes may point to old methods directly ++// from their constant pool caches, itables, and/or vtables. We ++// use the SystemDictionary::classes_do() facility and this helper ++// to fix up these pointers. Additional field offsets and vtable indices ++// in the constant pool cache entries are fixed. ++// ++// Note: We currently don't support updating the vtable in ++// arrayKlassOops. See Open Issues in jvmtiRedefineClasses.hpp. ++void VM_RedefineClasses::adjust_cpool_cache(klassOop k_oop_latest, oop initiating_loader, TRAPS) { ++ klassOop k_oop = k_oop_latest; ++ while (k_oop != NULL) { ++ Klass *k = k_oop->klass_part(); ++ if (k->oop_is_instance()) { ++ HandleMark hm(THREAD); ++ instanceKlass *ik = (instanceKlass *) k; + +- // Construct a native method transfer processor for this class. +- TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { +- assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ constantPoolHandle other_cp; ++ constantPoolCacheOop cp_cache; + +- the_class = _the_class; +- prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); +- } ++ other_cp = constantPoolHandle(ik->constants()); + +- // Attempt to transfer any of the old or deleted methods that are native +- void transfer_registrations(methodOop* old_methods, int methods_length) { +- for (int j = 0; j < methods_length; j++) { +- methodOop old_method = old_methods[j]; ++ for (int i=0; i<other_cp->length(); i++) { ++ if (other_cp->tag_at(i).is_klass()) { ++ klassOop klass = other_cp->klass_at(i, THREAD); ++ if (klass->klass_part()->new_version() != NULL) { + +- if (old_method->is_native() && old_method->has_native_function()) { +- methodOop new_method = strip_and_search_for_new_native(old_method); +- if (new_method != NULL) { +- // Actually set the native function in the new method. +- // Redefine does not send events (except CFLH), certainly not this +- // behind the scenes re-registration. +- new_method->set_native_function(old_method->native_function(), +- !methodOopDesc::native_bind_event_is_interesting); ++ // (tw) TODO: check why/if this is necessary ++ other_cp->klass_at_put(i, klass->klass_part()->new_version()); ++ } ++ klass = other_cp->klass_at(i, THREAD); ++ assert(klass->klass_part()->new_version() == NULL, "Must be new klass!"); + } + } ++ ++ cp_cache = other_cp->cache(); ++ ++ if (cp_cache != NULL) { ++ cp_cache->adjust_entries(); ++ } ++ ++ // If bytecode rewriting is enabled, we also need to unpatch bytecode to force resolution of zeroed entries ++ if (RewriteBytecodes) { ++ ik->methods_do(unpatch_bytecode); ++ } + } ++ k_oop = k_oop->klass_part()->old_version(); + } +-}; ++} + +-// Don't lose the association between a native method and its JNI function. +-void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle the_class) { +- TransferNativeFunctionRegistration transfer(the_class); +- transfer.transfer_registrations(_deleted_methods, _deleted_methods_length); +- transfer.transfer_registrations(_matching_old_methods, _matching_methods_length); ++void VM_RedefineClasses::update_jmethod_ids() { ++ for (int j = 0; j < _matching_methods_length; ++j) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ TRACE_RC3("matching method %s", old_method->name_and_sig_as_C_string()); ++ ++ jmethodID jmid = old_method->find_jmethod_id_or_null(); ++ if (old_method->new_version() != NULL && jmid == NULL) { ++ // (tw) Have to create jmethodID in this case ++ jmid = old_method->jmethod_id(); ++ } ++ ++ if (jmid != NULL) { ++ // There is a jmethodID, change it to point to the new method ++ methodHandle new_method_h((methodOop)_new_methods->obj_at(_matching_new_methods[j])); ++ if (old_method->new_version() == NULL) { ++ methodHandle old_method_h((methodOop)_old_methods->obj_at(_matching_old_methods[j])); ++ jmethodID new_jmethod_id = JNIHandles::make_jmethod_id(old_method_h); ++ bool result = instanceKlass::cast(old_method_h->method_holder())->update_jmethod_id(old_method_h(), new_jmethod_id); ++ //TRACE_RC3("Changed jmethodID for old method assigned to %d / result=%d", new_jmethod_id, result); ++ //TRACE_RC3("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } else { ++ jmethodID mid = new_method_h->jmethod_id(); ++ bool result = instanceKlass::cast(new_method_h->method_holder())->update_jmethod_id(new_method_h(), jmid); ++ //TRACE_RC3("Changed jmethodID for new method assigned to %d / result=%d", jmid, result); ++ } ++ JNIHandles::change_method_associated_with_jmethod_id(jmid, new_method_h); ++ //TRACE_RC3("changing method associated with jmethod id %d to %s", (int)jmid, new_method_h->name()->as_C_string()); ++ assert(JNIHandles::resolve_jmethod_id(jmid) == (methodOop)_new_methods->obj_at(_matching_new_methods[j]), "should be replaced"); ++ jmethodID mid = ((methodOop)_new_methods->obj_at(_matching_new_methods[j]))->jmethod_id(); ++ assert(JNIHandles::resolve_non_null((jobject)mid) == new_method_h(), "must match!"); ++ ++ //TRACE_RC3("jmethodID new method: %d jmethodID old method: %d", new_method_h->jmethod_id(), old_method->jmethod_id()); ++ } ++ } + } + ++ + // Deoptimize all compiled code that depends on this class. + // + // If the can_redefine_classes capability is obtained in the onload +@@ -2964,7 +1835,10 @@ void VM_RedefineClasses::flush_dependent_code(instanceKlassHandle k_h, TRAPS) { + + // All dependencies have been recorded from startup or this is a second or + // subsequent use of RedefineClasses +- if (JvmtiExport::all_dependencies_are_recorded()) { ++ ++ // For now deopt all ++ // (tw) TODO: Improve the dependency system such that we can safely deopt only a subset of the methods ++ if (0 && JvmtiExport::all_dependencies_are_recorded()) { + Universe::flush_evol_dependents_on(k_h); + } else { + CodeCache::mark_all_nmethods_for_deoptimization(); +@@ -2987,10 +1861,10 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + methodOop old_method; + methodOop new_method; + +- _matching_old_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _matching_new_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); +- _added_methods = NEW_RESOURCE_ARRAY(methodOop, _new_methods->length()); +- _deleted_methods = NEW_RESOURCE_ARRAY(methodOop, _old_methods->length()); ++ _matching_old_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _matching_new_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); ++ _added_methods = NEW_RESOURCE_ARRAY(int, _new_methods->length()); ++ _deleted_methods = NEW_RESOURCE_ARRAY(int, _old_methods->length()); + + _matching_methods_length = 0; + _deleted_methods_length = 0; +@@ -3005,36 +1879,36 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + // New method at the end + new_method = (methodOop) _new_methods->obj_at(nj); +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj; + ++nj; + } else if (nj >= _new_methods->length()) { + // Old method, at the end, is deleted + old_method = (methodOop) _old_methods->obj_at(oj); +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj; + ++oj; + } else { + old_method = (methodOop) _old_methods->obj_at(oj); + new_method = (methodOop) _new_methods->obj_at(nj); + if (old_method->name() == new_method->name()) { + if (old_method->signature() == new_method->signature()) { +- _matching_old_methods[_matching_methods_length ] = old_method; +- _matching_new_methods[_matching_methods_length++] = new_method; ++ _matching_old_methods[_matching_methods_length ] = oj;//old_method; ++ _matching_new_methods[_matching_methods_length++] = nj;//new_method; + ++nj; + ++oj; + } else { + // added overloaded have already been moved to the end, + // so this is a deleted overloaded method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } else { // names don't match + if (old_method->name()->fast_compare(new_method->name()) > 0) { + // new method +- _added_methods[_added_methods_length++] = new_method; ++ _added_methods[_added_methods_length++] = nj;//new_method; + ++nj; + } else { + // deleted method +- _deleted_methods[_deleted_methods_length++] = old_method; ++ _deleted_methods[_deleted_methods_length++] = oj;//old_method; + ++oj; + } + } +@@ -3042,6 +1916,7 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + } + assert(_matching_methods_length + _deleted_methods_length == _old_methods->length(), "sanity"); + assert(_matching_methods_length + _added_methods_length == _new_methods->length(), "sanity"); ++ TRACE_RC3("Matching methods = %d / deleted methods = %d / added methods = %d", _matching_methods_length, _deleted_methods_length, _added_methods_length); + } + + +@@ -3049,287 +1924,176 @@ void VM_RedefineClasses::compute_added_deleted_matching_methods() { + // Install the redefinition of a class: + // - house keeping (flushing breakpoints and caches, deoptimizing + // dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding a weak reference to track the obsolete but interesting +-// parts of the_class + // - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-void VM_RedefineClasses::redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS) { ++void VM_RedefineClasses::redefine_single_class(instanceKlassHandle the_new_class, TRAPS) { ++ ++ ResourceMark rm(THREAD); + +- RC_TIMER_START(_timer_rsc_phase1); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); + +- oop the_class_mirror = JNIHandles::resolve_non_null(the_jclass); +- klassOop the_class_oop = java_lang_Class::as_klassOop(the_class_mirror); +- instanceKlassHandle the_class = instanceKlassHandle(THREAD, the_class_oop); ++ instanceKlassHandle the_old_class = instanceKlassHandle(THREAD, the_new_class->old_version()); + ++#ifndef JVMTI_KERNEL + // Remove all breakpoints in methods of this class + JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); +- jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); ++ jvmti_breakpoints.clearall_in_class_at_safepoint(the_old_class()); ++#endif // !JVMTI_KERNEL + +- if (the_class_oop == Universe::reflect_invoke_cache()->klass()) { ++ if (the_old_class() == Universe::reflect_invoke_cache()->klass()) { + // We are redefining java.lang.reflect.Method. Method.invoke() is + // cached and users of the cache care about each active version of + // the method so we have to track this previous version. + // Do this before methods get switched + Universe::reflect_invoke_cache()->add_previous_version( +- the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); ++ the_old_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum())); + } + +- // Deoptimize all compiled code that depends on this class +- flush_dependent_code(the_class, THREAD); +- +- _old_methods = the_class->methods(); +- _new_methods = scratch_class->methods(); +- _the_class_oop = the_class_oop; ++ _old_methods = the_old_class->methods(); ++ _new_methods = the_new_class->methods(); + compute_added_deleted_matching_methods(); +- update_jmethod_ids(); +- +- // Attach new constant pool to the original klass. The original +- // klass still refers to the old constant pool (for now). +- scratch_class->constants()->set_pool_holder(the_class()); +- +-#if 0 +- // In theory, with constant pool merging in place we should be able +- // to save space by using the new, merged constant pool in place of +- // the old constant pool(s). By "pool(s)" I mean the constant pool in +- // the klass version we are replacing now and any constant pool(s) in +- // previous versions of klass. Nice theory, doesn't work in practice. +- // When this code is enabled, even simple programs throw NullPointer +- // exceptions. I'm guessing that this is caused by some constant pool +- // cache difference between the new, merged constant pool and the +- // constant pool that was just being used by the klass. I'm keeping +- // this code around to archive the idea, but the code has to remain +- // disabled for now. +- +- // Attach each old method to the new constant pool. This can be +- // done here since we are past the bytecode verification and +- // constant pool optimization phases. +- for (int i = _old_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)_old_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- +- { +- // walk all previous versions of the klass +- instanceKlass *ik = (instanceKlass *)the_class()->klass_part(); +- PreviousVersionWalker pvw(ik); +- instanceKlassHandle ikh; +- do { +- ikh = pvw.next_previous_version(); +- if (!ikh.is_null()) { +- ik = ikh(); +- +- // attach previous version of klass to the new constant pool +- ik->set_constants(scratch_class->constants()); +- +- // Attach each method in the previous version of klass to the +- // new constant pool +- objArrayOop prev_methods = ik->methods(); +- for (int i = prev_methods->length() - 1; i >= 0; i--) { +- methodOop method = (methodOop)prev_methods->obj_at(i); +- method->set_constants(scratch_class->constants()); +- } +- } +- } while (!ikh.is_null()); +- } +-#endif +- +- // Replace methods and constantpool +- the_class->set_methods(_new_methods); +- scratch_class->set_methods(_old_methods); // To prevent potential GCing of the old methods, +- // and to be able to undo operation easily. +- +- constantPoolOop old_constants = the_class->constants(); +- the_class->set_constants(scratch_class->constants()); +- scratch_class->set_constants(old_constants); // See the previous comment. +-#if 0 +- // We are swapping the guts of "the new class" with the guts of "the +- // class". Since the old constant pool has just been attached to "the +- // new class", it seems logical to set the pool holder in the old +- // constant pool also. However, doing this will change the observable +- // class hierarchy for any old methods that are still executing. A +- // method can query the identity of its "holder" and this query uses +- // the method's constant pool link to find the holder. The change in +- // holding class from "the class" to "the new class" can confuse +- // things. +- // +- // Setting the old constant pool's holder will also cause +- // verification done during vtable initialization below to fail. +- // During vtable initialization, the vtable's class is verified to be +- // a subtype of the method's holder. The vtable's class is "the +- // class" and the method's holder is gotten from the constant pool +- // link in the method itself. For "the class"'s directly implemented +- // methods, the method holder is "the class" itself (as gotten from +- // the new constant pool). The check works fine in this case. The +- // check also works fine for methods inherited from super classes. +- // +- // Miranda methods are a little more complicated. A miranda method is +- // provided by an interface when the class implementing the interface +- // does not provide its own method. These interfaces are implemented +- // internally as an instanceKlass. These special instanceKlasses +- // share the constant pool of the class that "implements" the +- // interface. By sharing the constant pool, the method holder of a +- // miranda method is the class that "implements" the interface. In a +- // non-redefine situation, the subtype check works fine. However, if +- // the old constant pool's pool holder is modified, then the check +- // fails because there is no class hierarchy relationship between the +- // vtable's class and "the new class". +- +- old_constants->set_pool_holder(scratch_class()); +-#endif + + // track which methods are EMCP for add_previous_version() call below +- BitMap emcp_methods(_old_methods->length()); ++ ++ // (tw) TODO: Check if we need the concept of EMCP? ++ BitMap emcp_methods(_old_methods->length()); + int emcp_method_count = 0; + emcp_methods.clear(); // clears 0..(length() - 1) ++ ++ // We need to mark methods as old!! + check_methods_and_mark_as_obsolete(&emcp_methods, &emcp_method_count); +- transfer_old_native_function_registrations(the_class); +- +- // The class file bytes from before any retransformable agents mucked +- // with them was cached on the scratch class, move to the_class. +- // Note: we still want to do this if nothing needed caching since it +- // should get cleared in the_class too. +- if (the_class->get_cached_class_file_bytes() == 0) { +- // the_class doesn't have a cache yet so copy it +- the_class->set_cached_class_file( +- scratch_class->get_cached_class_file_bytes(), +- scratch_class->get_cached_class_file_len()); +- } +-#ifndef PRODUCT +- else { +- assert(the_class->get_cached_class_file_bytes() == +- scratch_class->get_cached_class_file_bytes(), "cache ptrs must match"); +- assert(the_class->get_cached_class_file_len() == +- scratch_class->get_cached_class_file_len(), "cache lens must match"); +- } +-#endif ++ update_jmethod_ids(); + +- // Replace inner_classes +- typeArrayOop old_inner_classes = the_class->inner_classes(); +- the_class->set_inner_classes(scratch_class->inner_classes()); +- scratch_class->set_inner_classes(old_inner_classes); ++ // TODO: ++ transfer_old_native_function_registrations(the_old_class); + +- // Initialize the vtable and interface table after +- // methods have been rewritten +- { +- ResourceMark rm(THREAD); +- // no exception should happen here since we explicitly +- // do not check loader constraints. +- // compare_and_normalize_class_versions has already checked: +- // - classloaders unchanged, signatures unchanged +- // - all instanceKlasses for redefined classes reused & contents updated +- the_class->vtable()->initialize_vtable(false, THREAD); +- the_class->itable()->initialize_itable(false, THREAD); +- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception"); +- } +- +- // Leave arrays of jmethodIDs and itable index cache unchanged +- +- // Copy the "source file name" attribute from new class version +- the_class->set_source_file_name(scratch_class->source_file_name()); +- +- // Copy the "source debug extension" attribute from new class version +- the_class->set_source_debug_extension( +- scratch_class->source_debug_extension(), +- scratch_class->source_debug_extension() == NULL ? 0 : +- (int)strlen(scratch_class->source_debug_extension())); +- +- // Use of javac -g could be different in the old and the new +- if (scratch_class->access_flags().has_localvariable_table() != +- the_class->access_flags().has_localvariable_table()) { +- +- AccessFlags flags = the_class->access_flags(); +- if (scratch_class->access_flags().has_localvariable_table()) { +- flags.set_has_localvariable_table(); +- } else { +- flags.clear_has_localvariable_table(); +- } +- the_class->set_access_flags(flags); +- } +- +- // Replace class annotation fields values +- typeArrayOop old_class_annotations = the_class->class_annotations(); +- the_class->set_class_annotations(scratch_class->class_annotations()); +- scratch_class->set_class_annotations(old_class_annotations); +- +- // Replace fields annotation fields values +- objArrayOop old_fields_annotations = the_class->fields_annotations(); +- the_class->set_fields_annotations(scratch_class->fields_annotations()); +- scratch_class->set_fields_annotations(old_fields_annotations); +- +- // Replace methods annotation fields values +- objArrayOop old_methods_annotations = the_class->methods_annotations(); +- the_class->set_methods_annotations(scratch_class->methods_annotations()); +- scratch_class->set_methods_annotations(old_methods_annotations); +- +- // Replace methods parameter annotation fields values +- objArrayOop old_methods_parameter_annotations = +- the_class->methods_parameter_annotations(); +- the_class->set_methods_parameter_annotations( +- scratch_class->methods_parameter_annotations()); +- scratch_class->set_methods_parameter_annotations(old_methods_parameter_annotations); +- +- // Replace methods default annotation fields values +- objArrayOop old_methods_default_annotations = +- the_class->methods_default_annotations(); +- the_class->set_methods_default_annotations( +- scratch_class->methods_default_annotations()); +- scratch_class->set_methods_default_annotations(old_methods_default_annotations); +- +- // Replace minor version number of class file +- u2 old_minor_version = the_class->minor_version(); +- the_class->set_minor_version(scratch_class->minor_version()); +- scratch_class->set_minor_version(old_minor_version); +- +- // Replace major version number of class file +- u2 old_major_version = the_class->major_version(); +- the_class->set_major_version(scratch_class->major_version()); +- scratch_class->set_major_version(old_major_version); +- +- // Replace CP indexes for class and name+type of enclosing method +- u2 old_class_idx = the_class->enclosing_method_class_index(); +- u2 old_method_idx = the_class->enclosing_method_method_index(); +- the_class->set_enclosing_method_indices( +- scratch_class->enclosing_method_class_index(), +- scratch_class->enclosing_method_method_index()); +- scratch_class->set_enclosing_method_indices(old_class_idx, old_method_idx); +- +- // keep track of previous versions of this class +- the_class->add_previous_version(scratch_class, &emcp_methods, +- emcp_method_count); +- +- RC_TIMER_STOP(_timer_rsc_phase1); +- RC_TIMER_START(_timer_rsc_phase2); + +- // Adjust constantpool caches and vtables for all classes +- // that reference methods of the evolved class. +- SystemDictionary::classes_do(adjust_cpool_cache_and_vtable, THREAD); + +- if (the_class->oop_map_cache() != NULL) { +- // Flush references to any obsolete methods from the oop map cache +- // so that obsolete methods are not pinned. +- the_class->oop_map_cache()->flush_obsolete_entries(); ++#ifdef ASSERT ++ ++// klassOop systemLookup1 = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++// assert(systemLookup1 == the_new_class(), "New class must be in system dictionary!"); ++ ++ //JNIHandles::verify(); ++ ++// klassOop systemLookup = SystemDictionary::resolve_or_null(the_old_class->name(), the_old_class->class_loader(), the_old_class->protection_domain(), THREAD); ++ ++// assert(systemLookup == the_new_class(), "New class must be in system dictionary!"); ++ assert(the_new_class->old_version() != NULL, "Must not be null"); ++ assert(the_new_class->old_version()->klass_part()->new_version() == the_new_class(), "Must equal"); ++ ++ for (int i=0; i<the_new_class->methods()->length(); i++) { ++ assert(((methodOop)the_new_class->methods()->obj_at(i))->method_holder() == the_new_class(), "method holder must match!"); + } + ++ _old_methods->verify(); ++ _new_methods->verify(); ++ ++ the_new_class->vtable()->verify(tty); ++ the_old_class->vtable()->verify(tty); ++ ++#endif ++ + // increment the classRedefinedCount field in the_class and in any + // direct and indirect subclasses of the_class +- increment_class_counter((instanceKlass *)the_class()->klass_part(), THREAD); ++ increment_class_counter((instanceKlass *)the_old_class()->klass_part(), THREAD); ++ ++} ++ ++ ++void VM_RedefineClasses::check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p) { ++ TRACE_RC3("Checking matching methods for EMCP"); ++ *emcp_method_count_p = 0; ++ int obsolete_count = 0; ++ int old_index = 0; ++ for (int j = 0; j < _matching_methods_length; ++j, ++old_index) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_matching_old_methods[j]); ++ methodOop new_method = (methodOop)_new_methods->obj_at(_matching_new_methods[j]); ++ methodOop old_array_method; ++ ++ // Maintain an old_index into the _old_methods array by skipping ++ // deleted methods ++ while ((old_array_method = (methodOop) _old_methods->obj_at(old_index)) ++ != old_method) { ++ ++old_index; ++ } + +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00000001, THREAD, +- ("redefined name=%s, count=%d (avail_mem=" UINT64_FORMAT "K)", +- the_class->external_name(), +- java_lang_Class::classRedefinedCount(the_class_mirror), +- os::available_memory() >> 10)); ++ if (MethodComparator::methods_EMCP(old_method, new_method)) { ++ // The EMCP definition from JSR-163 requires the bytecodes to be ++ // the same with the exception of constant pool indices which may ++ // differ. However, the constants referred to by those indices ++ // must be the same. ++ // ++ // We use methods_EMCP() for comparison since constant pool ++ // merging can remove duplicate constant pool entries that were ++ // present in the old method and removed from the rewritten new ++ // method. A faster binary comparison function would consider the ++ // old and new methods to be different when they are actually ++ // EMCP. ++ ++ // track which methods are EMCP for add_previous_version() call ++ emcp_methods->set_bit(old_index); ++ (*emcp_method_count_p)++; ++ ++ // An EMCP method is _not_ obsolete. An obsolete method has a ++ // different jmethodID than the current method. An EMCP method ++ // has the same jmethodID as the current method. Having the ++ // same jmethodID for all EMCP versions of a method allows for ++ // a consistent view of the EMCP methods regardless of which ++ // EMCP method you happen to have in hand. For example, a ++ // breakpoint set in one EMCP method will work for all EMCP ++ // versions of the method including the current one. ++ ++ old_method->set_new_version(new_method); ++ new_method->set_old_version(old_method); ++ ++ TRACE_RC3("Found EMCP method %s", old_method->name_and_sig_as_C_string()); ++ ++ // Transfer breakpoints ++ instanceKlass *ik = instanceKlass::cast(old_method->method_holder()); ++ for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = bp->next()) { ++ TRACE_RC2("Checking breakpoint"); ++ TRACE_RC2("%d / %d", bp->match(old_method), bp->match(new_method)); ++ if (bp->match(old_method)) { ++ assert(bp->match(new_method), "if old method is method, then new method must match too"); ++ TRACE_RC2("Found a breakpoint in an old EMCP method"); ++ new_method->set_breakpoint(bp->bci()); ++ } ++ } ++ } else { ++ // mark obsolete methods as such ++ old_method->set_is_obsolete(); ++ obsolete_count++; ++ ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ TRACE_RC3("mark %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string()); ++ } ++ old_method->set_is_old(); ++ } ++ for (int i = 0; i < _deleted_methods_length; ++i) { ++ methodOop old_method = (methodOop)_old_methods->obj_at(_deleted_methods[i]); + +- RC_TIMER_STOP(_timer_rsc_phase2); +-} // end redefine_single_class() ++ //assert(old_method->vtable_index() < 0, ++ // "cannot delete methods with vtable entries");; + ++ // Mark all deleted methods as old and obsolete ++ old_method->set_is_old(); ++ old_method->set_is_obsolete(); ++ ++obsolete_count; ++ // With tracing we try not to "yack" too much. The position of ++ // this trace assumes there are fewer obsolete methods than ++ // EMCP methods. ++ TRACE_RC3("mark deleted %s(%s) as obsolete", ++ old_method->name()->as_C_string(), ++ old_method->signature()->as_C_string()); ++ } ++ //assert((*emcp_method_count_p + obsolete_count) == _old_methods->length(), "sanity check"); ++ TRACE_RC3("EMCP_cnt=%d, obsolete_cnt=%d !", *emcp_method_count_p, obsolete_count); ++} + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. +@@ -3338,134 +2102,267 @@ void VM_RedefineClasses::increment_class_counter(instanceKlass *ik, TRAPS) { + klassOop class_oop = java_lang_Class::as_klassOop(class_mirror); + int new_count = java_lang_Class::classRedefinedCount(class_mirror) + 1; + java_lang_Class::set_classRedefinedCount(class_mirror, new_count); +- +- if (class_oop != _the_class_oop) { +- // _the_class_oop count is printed at end of redefine_single_class() +- RC_TRACE_WITH_THREAD(0x00000008, THREAD, +- ("updated count in subclass=%s to %d", ik->external_name(), new_count)); +- } +- +- for (Klass *subk = ik->subklass(); subk != NULL; +- subk = subk->next_sibling()) { +- if (subk->oop_is_instance()) { +- // Only update instanceKlasses +- instanceKlass *subik = (instanceKlass*)subk; +- // recursively do subclasses of the current subclass +- increment_class_counter(subik, THREAD); +- } +- } ++ TRACE_RC3("updated count for class=%s to %d", ik->external_name(), new_count); + } + +-void VM_RedefineClasses::check_class(klassOop k_oop, +- oop initiating_loader, TRAPS) { ++#ifndef PRODUCT ++void VM_RedefineClasses::check_class(klassOop k_oop, TRAPS) { + Klass *k = k_oop->klass_part(); + if (k->oop_is_instance()) { + HandleMark hm(THREAD); + instanceKlass *ik = (instanceKlass *) k; +- bool no_old_methods = true; // be optimistic +- ResourceMark rm(THREAD); ++ assert(ik->is_newest_version(), "must be latest version in system dictionary"); ++ ++ if (ik->vtable_length() > 0) { ++ ResourceMark rm(THREAD); ++ if (!ik->vtable()->check_no_old_or_obsolete_entries()) { ++ TRACE_RC1("size of class: %d\n", k_oop->size()); ++ TRACE_RC1("klassVtable::check_no_old_entries failure -- OLD method found -- class: %s", ik->signature_name()); ++ assert(false, "OLD method found"); ++ } ++ ++ ik->vtable()->verify(tty, true); ++ } ++ } ++} ++ ++#endif + +- // a vtable should never contain old or obsolete methods +- if (ik->vtable_length() > 0 && +- !ik->vtable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassVtable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->vtable()->dump_vtable(); ++static bool match_right(void* value, Pair<klassOop, klassOop> elem) { ++ return elem.right() == value; ++} ++ ++jvmtiError VM_RedefineClasses::do_topological_class_sorting( const jvmtiClassDefinition *class_defs, int class_count, TRAPS) ++{ ++ GrowableArray< Pair<klassOop, klassOop> > links; ++ ++ for (int i=0; i<class_count; i++) { ++ ++ oop mirror = JNIHandles::resolve_non_null(class_defs[i].klass); ++ instanceKlassHandle the_class(THREAD, java_lang_Class::as_klassOop(mirror)); ++ Handle the_class_loader(THREAD, the_class->class_loader()); ++ Handle protection_domain(THREAD, the_class->protection_domain()); ++ ++ ClassFileStream st((u1*) class_defs[i].class_bytes, ++ class_defs[i].class_byte_count, (char *)"__VM_RedefineClasses__"); ++ ClassFileParser cfp(&st); ++ ++ GrowableArray<Symbol*> symbolArr; ++ TempNewSymbol parsed_name; ++ TRACE_RC2("Before find super symbols of class %s", the_class->name()->as_C_string()); ++ cfp.parseClassFile(the_class->name(), the_class_loader, protection_domain, the_class, KlassHandle(), NULL, &symbolArr, parsed_name, false, THREAD); ++ ++ for (int j=0; j<symbolArr.length(); j++) { ++ Symbol* sym = symbolArr.at(j); ++ TRACE_RC3("Before adding link to super class %s", sym->as_C_string()); ++ klassOop super_klass = SystemDictionary::resolve_or_null(sym, the_class_loader, protection_domain, THREAD); ++ if (super_klass != NULL) { ++ instanceKlassHandle the_super_class(THREAD, super_klass); ++ if (_affected_klasses->contains(the_super_class)) { ++ TRACE_RC2("Found class to link"); ++ links.append(Pair<klassOop, klassOop>(super_klass, the_class())); ++ } + } +- no_old_methods = false; +- } +- +- // an itable should never contain old or obsolete methods +- if (ik->itable_length() > 0 && +- !ik->itable()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("klassItable::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->itable()->dump_itable(); ++ } ++ ++ assert(the_class->check_redefinition_flag(Klass::MarkedAsAffected), ""); ++ the_class->clear_redefinition_flag(Klass::MarkedAsAffected); ++ } ++ ++ ++ TRACE_RC1("Identified links between classes! "); ++ ++ for (int i=0; i < _affected_klasses->length(); i++) { ++ instanceKlassHandle klass = _affected_klasses->at(i); ++ ++ if (klass->check_redefinition_flag(Klass::MarkedAsAffected)) { ++ klass->clear_redefinition_flag(Klass::MarkedAsAffected); ++ klassOop superKlass = klass->super(); ++ if (_affected_klasses->contains(superKlass)) { ++ links.append(Pair<klassOop, klassOop>(superKlass, klass())); + } +- no_old_methods = false; +- } +- +- // the constant pool cache should never contain old or obsolete methods +- if (ik->constants() != NULL && +- ik->constants()->cache() != NULL && +- !ik->constants()->cache()->check_no_old_or_obsolete_entries()) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- RC_TRACE_WITH_THREAD(0x00004000, THREAD, +- ("cp-cache::check_no_old_or_obsolete_entries failure" +- " -- OLD or OBSOLETE method found -- class: %s", +- ik->signature_name())); +- ik->constants()->cache()->dump_cache(); ++ ++ objArrayOop superInterfaces = klass->local_interfaces(); ++ for (int j=0; j<superInterfaces->length(); j++) { ++ klassOop interfaceKlass = (klassOop)superInterfaces->obj_at(j); ++ if (_affected_klasses->contains(interfaceKlass)) { ++ links.append(Pair<klassOop, klassOop>(interfaceKlass, klass())); ++ } + } +- no_old_methods = false; ++ } ++ } ++ ++ IF_TRACE_RC2 { ++ TRACE_RC2("Identified links: "); ++ for (int i=0; i<links.length(); i++) { ++ TRACE_RC2("%s to %s", links.at(i).left()->klass_part()->name()->as_C_string(), ++ links.at(i).right()->klass_part()->name()->as_C_string()); ++ } ++ } ++ ++ for (int i = 0; i < _affected_klasses->length(); i++) { ++ int j; ++ for (j = i; j < _affected_klasses->length(); j++) { ++ // Search for node with no incoming edges ++ klassOop oop = _affected_klasses->at(j)(); ++ int k = links.find(oop, match_right); ++ if (k == -1) break; ++ } ++ if (j == _affected_klasses->length()) { ++ return JVMTI_ERROR_CIRCULAR_CLASS_DEFINITION; + } + +- if (!no_old_methods) { +- if (RC_TRACE_ENABLED(0x00004000)) { +- dump_methods(); ++ // Remove all links from this node ++ klassOop oop = _affected_klasses->at(j)(); ++ int k = 0; ++ while (k < links.length()) { ++ if (links.adr_at(k)->left() == oop) { ++ links.delete_at(k); + } else { +- tty->print_cr("INFO: use the '-XX:TraceRedefineClasses=16384' option " +- "to see more info about the following guarantee() failure."); ++ k++; + } +- guarantee(false, "OLD and/or OBSOLETE method(s) found"); + } ++ ++ // Swap node ++ instanceKlassHandle tmp = _affected_klasses->at(j); ++ _affected_klasses->at_put(j, _affected_klasses->at(i)); ++ _affected_klasses->at_put(i, tmp); + } ++ ++ return JVMTI_ERROR_NONE; + } + +-void VM_RedefineClasses::dump_methods() { +- int j; +- RC_TRACE(0x00004000, ("_old_methods --")); +- for (j = 0; j < _old_methods->length(); ++j) { +- methodOop m = (methodOop) _old_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_new_methods --")); +- for (j = 0; j < _new_methods->length(); ++j) { +- methodOop m = (methodOop) _new_methods->obj_at(j); +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_matching_(old/new)_methods --")); +- for (j = 0; j < _matching_methods_length; ++j) { +- methodOop m = _matching_old_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- m = _matching_new_methods[j]; +- RC_TRACE_NO_CR(0x00004000, (" (%5d) ", m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_deleted_methods --")); +- for (j = 0; j < _deleted_methods_length; ++j) { +- methodOop m = _deleted_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); +- } +- RC_TRACE(0x00004000, ("_added_methods --")); +- for (j = 0; j < _added_methods_length; ++j) { +- methodOop m = _added_methods[j]; +- RC_TRACE_NO_CR(0x00004000, ("%4d (%5d) ", j, m->vtable_index())); +- m->access_flags().print_on(tty); +- tty->print(" -- "); +- m->print_name(tty); +- tty->cr(); ++// This internal class transfers the native function registration from old methods ++// to new methods. It is designed to handle both the simple case of unchanged ++// native methods and the complex cases of native method prefixes being added and/or ++// removed. ++// It expects only to be used during the VM_RedefineClasses op (a safepoint). ++// ++// This class is used after the new methods have been installed in "the_class". ++// ++// So, for example, the following must be handled. Where 'm' is a method and ++// a number followed by an underscore is a prefix. ++// ++// Old Name New Name ++// Simple transfer to new method m -> m ++// Add prefix m -> 1_m ++// Remove prefix 1_m -> m ++// Simultaneous add of prefixes m -> 3_2_1_m ++// Simultaneous removal of prefixes 3_2_1_m -> m ++// Simultaneous add and remove 1_m -> 2_m ++// Same, caused by prefix removal only 3_2_1_m -> 3_2_m ++// ++class TransferNativeFunctionRegistration { ++private: ++ instanceKlassHandle the_class; ++ int prefix_count; ++ char** prefixes; ++ ++ // Recursively search the binary tree of possibly prefixed method names. ++ // Iteration could be used if all agents were well behaved. Full tree walk is ++ // more resilent to agents not cleaning up intermediate methods. ++ // Branch at each depth in the binary tree is: ++ // (1) without the prefix. ++ // (2) with the prefix. ++ // where 'prefix' is the prefix at that 'depth' (first prefix, second prefix,...) ++ methodOop search_prefix_name_space(int depth, char* name_str, size_t name_len, ++ Symbol* signature) { ++ Symbol* name_symbol = SymbolTable::probe(name_str, (int)name_len); ++ if (name_symbol != NULL) { ++ methodOop method = Klass::cast(the_class()->klass_part()->new_version())->lookup_method(name_symbol, signature); ++ if (method != NULL) { ++ // Even if prefixed, intermediate methods must exist. ++ if (method->is_native()) { ++ // Wahoo, we found a (possibly prefixed) version of the method, return it. ++ return method; ++ } ++ if (depth < prefix_count) { ++ // Try applying further prefixes (other than this one). ++ method = search_prefix_name_space(depth+1, name_str, name_len, signature); ++ if (method != NULL) { ++ return method; // found ++ } ++ ++ // Try adding this prefix to the method name and see if it matches ++ // another method name. ++ char* prefix = prefixes[depth]; ++ size_t prefix_len = strlen(prefix); ++ size_t trial_len = name_len + prefix_len; ++ char* trial_name_str = NEW_RESOURCE_ARRAY(char, trial_len + 1); ++ strcpy(trial_name_str, prefix); ++ strcat(trial_name_str, name_str); ++ method = search_prefix_name_space(depth+1, trial_name_str, trial_len, ++ signature); ++ if (method != NULL) { ++ // If found along this branch, it was prefixed, mark as such ++ method->set_is_prefixed_native(); ++ return method; // found ++ } ++ } ++ } ++ } ++ return NULL; // This whole branch bore nothing ++ } ++ ++ // Return the method name with old prefixes stripped away. ++ char* method_name_without_prefixes(methodOop method) { ++ Symbol* name = method->name(); ++ char* name_str = name->as_utf8(); ++ ++ // Old prefixing may be defunct, strip prefixes, if any. ++ for (int i = prefix_count-1; i >= 0; i--) { ++ char* prefix = prefixes[i]; ++ size_t prefix_len = strlen(prefix); ++ if (strncmp(prefix, name_str, prefix_len) == 0) { ++ name_str += prefix_len; ++ } ++ } ++ return name_str; ++ } ++ ++ // Strip any prefixes off the old native method, then try to find a ++ // (possibly prefixed) new native that matches it. ++ methodOop strip_and_search_for_new_native(methodOop method) { ++ ResourceMark rm; ++ char* name_str = method_name_without_prefixes(method); ++ return search_prefix_name_space(0, name_str, strlen(name_str), ++ method->signature()); ++ } ++ ++public: ++ ++ // Construct a native method transfer processor for this class. ++ TransferNativeFunctionRegistration(instanceKlassHandle _the_class) { ++ assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); ++ ++ the_class = _the_class; ++ prefixes = JvmtiExport::get_all_native_method_prefixes(&prefix_count); ++ } ++ ++ // Attempt to transfer any of the old or deleted methods that are native ++ void transfer_registrations(instanceKlassHandle old_klass, int* old_methods, int methods_length) { ++ for (int j = 0; j < methods_length; j++) { ++ methodOop old_method = (methodOop)old_klass->methods()->obj_at(old_methods[j]); ++ ++ if (old_method->is_native() && old_method->has_native_function()) { ++ methodOop new_method = strip_and_search_for_new_native(old_method); ++ if (new_method != NULL) { ++ // Actually set the native function in the new method. ++ // Redefine does not send events (except CFLH), certainly not this ++ // behind the scenes re-registration. ++ new_method->set_native_function(old_method->native_function(), ++ !methodOopDesc::native_bind_event_is_interesting); ++ ++ TRACE_RC3("Transfering native function for method %s", old_method->name()->as_C_string()); ++ } ++ } ++ } + } ++}; ++ ++// Don't lose the association between a native method and its JNI function. ++void VM_RedefineClasses::transfer_old_native_function_registrations(instanceKlassHandle old_klass) { ++ TransferNativeFunctionRegistration transfer(old_klass); ++ transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); ++ transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); + } +diff --git a/src/share/vm/prims/jvmtiRedefineClasses.hpp b/src/share/vm/prims/jvmtiRedefineClasses.hpp +index 671f2ae..8333cee 100644 +--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -30,332 +30,29 @@ + #include "memory/resourceArea.hpp" + #include "oops/objArrayKlass.hpp" + #include "oops/objArrayOop.hpp" ++#include "oops/fieldStreams.hpp" + #include "prims/jvmtiRedefineClassesTrace.hpp" +-#include "runtime/vm_operations.hpp" +- +-// Introduction: +-// +-// The RedefineClasses() API is used to change the definition of one or +-// more classes. While the API supports redefining more than one class +-// in a single call, in general, the API is discussed in the context of +-// changing the definition of a single current class to a single new +-// class. For clarity, the current class is will always be called +-// "the_class" and the new class will always be called "scratch_class". +-// +-// The name "the_class" is used because there is only one structure +-// that represents a specific class; redefinition does not replace the +-// structure, but instead replaces parts of the structure. The name +-// "scratch_class" is used because the structure that represents the +-// new definition of a specific class is simply used to carry around +-// the parts of the new definition until they are used to replace the +-// appropriate parts in the_class. Once redefinition of a class is +-// complete, scratch_class is thrown away. +-// +-// +-// Implementation Overview: +-// +-// The RedefineClasses() API is mostly a wrapper around the VM op that +-// does the real work. The work is split in varying degrees between +-// doit_prologue(), doit() and doit_epilogue(). +-// +-// 1) doit_prologue() is called by the JavaThread on the way to a +-// safepoint. It does parameter verification and loads scratch_class +-// which involves: +-// - parsing the incoming class definition using the_class' class +-// loader and security context +-// - linking scratch_class +-// - merging constant pools and rewriting bytecodes as needed +-// for the merged constant pool +-// - verifying the bytecodes in scratch_class +-// - setting up the constant pool cache and rewriting bytecodes +-// as needed to use the cache +-// - finally, scratch_class is compared to the_class to verify +-// that it is a valid replacement class +-// - if everything is good, then scratch_class is saved in an +-// instance field in the VM operation for the doit() call +-// +-// Note: A JavaThread must do the above work. +-// +-// 2) doit() is called by the VMThread during a safepoint. It installs +-// the new class definition(s) which involves: +-// - retrieving the scratch_class from the instance field in the +-// VM operation +-// - house keeping (flushing breakpoints and caches, deoptimizing +-// dependent compiled code) +-// - replacing parts in the_class with parts from scratch_class +-// - adding weak reference(s) to track the obsolete but interesting +-// parts of the_class +-// - adjusting constant pool caches and vtables in other classes +-// that refer to methods in the_class. These adjustments use the +-// SystemDictionary::classes_do() facility which only allows +-// a helper method to be specified. The interesting parameters +-// that we would like to pass to the helper method are saved in +-// static global fields in the VM operation. +-// - telling the SystemDictionary to notice our changes +-// +-// Note: the above work must be done by the VMThread to be safe. +-// +-// 3) doit_epilogue() is called by the JavaThread after the VM op +-// is finished and the safepoint is done. It simply cleans up +-// memory allocated in doit_prologue() and used in doit(). +-// +-// +-// Constant Pool Details: +-// +-// When the_class is redefined, we cannot just replace the constant +-// pool in the_class with the constant pool from scratch_class because +-// that could confuse obsolete methods that may still be running. +-// Instead, the constant pool from the_class, old_cp, is merged with +-// the constant pool from scratch_class, scratch_cp. The resulting +-// constant pool, merge_cp, replaces old_cp in the_class. +-// +-// The key part of any merging algorithm is the entry comparison +-// function so we have to know the types of entries in a constant pool +-// in order to merge two of them together. Constant pools can contain +-// up to 12 different kinds of entries; the JVM_CONSTANT_Unicode entry +-// is not presently used so we only have to worry about the other 11 +-// entry types. For the purposes of constant pool merging, it is +-// helpful to know that the 11 entry types fall into 3 different +-// subtypes: "direct", "indirect" and "double-indirect". +-// +-// Direct CP entries contain data and do not contain references to +-// other CP entries. The following are direct CP entries: +-// JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} +-// +-// Indirect CP entries contain 1 or 2 references to a direct CP entry +-// and no other data. The following are indirect CP entries: +-// JVM_CONSTANT_{Class,NameAndType,String} +-// +-// Double-indirect CP entries contain two references to indirect CP +-// entries and no other data. The following are double-indirect CP +-// entries: +-// JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} +-// +-// When comparing entries between two constant pools, the entry types +-// are compared first and if they match, then further comparisons are +-// made depending on the entry subtype. Comparing direct CP entries is +-// simply a matter of comparing the data associated with each entry. +-// Comparing both indirect and double-indirect CP entries requires +-// recursion. +-// +-// Fortunately, the recursive combinations are limited because indirect +-// CP entries can only refer to direct CP entries and double-indirect +-// CP entries can only refer to indirect CP entries. The following is +-// an example illustration of the deepest set of indirections needed to +-// access the data associated with a JVM_CONSTANT_Fieldref entry: +-// +-// JVM_CONSTANT_Fieldref { +-// class_index => JVM_CONSTANT_Class { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-1> +-// } +-// } +-// name_and_type_index => JVM_CONSTANT_NameAndType { +-// name_index => JVM_CONSTANT_Utf8 { +-// <data-2> +-// } +-// descriptor_index => JVM_CONSTANT_Utf8 { +-// <data-3> +-// } +-// } +-// } +-// +-// The above illustration is not a data structure definition for any +-// computer language. The curly braces ('{' and '}') are meant to +-// delimit the context of the "fields" in the CP entry types shown. +-// Each indirection from the JVM_CONSTANT_Fieldref entry is shown via +-// "=>", e.g., the class_index is used to indirectly reference a +-// JVM_CONSTANT_Class entry where the name_index is used to indirectly +-// reference a JVM_CONSTANT_Utf8 entry which contains the interesting +-// <data-1>. In order to understand a JVM_CONSTANT_Fieldref entry, we +-// have to do a total of 5 indirections just to get to the CP entries +-// that contain the interesting pieces of data and then we have to +-// fetch the three pieces of data. This means we have to do a total of +-// (5 + 3) * 2 == 16 dereferences to compare two JVM_CONSTANT_Fieldref +-// entries. +-// +-// Here is the indirection, data and dereference count for each entry +-// type: +-// +-// JVM_CONSTANT_Class 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Double 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Fieldref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Float 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Integer 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_InterfaceMethodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_Long 0 indir, 1 data, 1 deref +-// JVM_CONSTANT_Methodref 2 indir, 3 data, 8 derefs +-// JVM_CONSTANT_NameAndType 1 indir, 2 data, 4 derefs +-// JVM_CONSTANT_String 1 indir, 1 data, 2 derefs +-// JVM_CONSTANT_Utf8 0 indir, 1 data, 1 deref +-// +-// So different subtypes of CP entries require different amounts of +-// work for a proper comparison. +-// +-// Now that we've talked about the different entry types and how to +-// compare them we need to get back to merging. This is not a merge in +-// the "sort -u" sense or even in the "sort" sense. When we merge two +-// constant pools, we copy all the entries from old_cp to merge_cp, +-// preserving entry order. Next we append all the unique entries from +-// scratch_cp to merge_cp and we track the index changes from the +-// location in scratch_cp to the possibly new location in merge_cp. +-// When we are done, any obsolete code that is still running that +-// uses old_cp should not be able to observe any difference if it +-// were to use merge_cp. As for the new code in scratch_class, it is +-// modified to use the appropriate index values in merge_cp before it +-// is used to replace the code in the_class. +-// +-// There is one small complication in copying the entries from old_cp +-// to merge_cp. Two of the CP entry types are special in that they are +-// lazily resolved. Before explaining the copying complication, we need +-// to digress into CP entry resolution. +-// +-// JVM_CONSTANT_Class and JVM_CONSTANT_String entries are present in +-// the class file, but are not stored in memory as such until they are +-// resolved. The entries are not resolved unless they are used because +-// resolution is expensive. During class file parsing the entries are +-// initially stored in memory as JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries. These special CP entry types +-// indicate that the JVM_CONSTANT_Class and JVM_CONSTANT_String entries +-// have been parsed, but the index values in the entries have not been +-// validated. After the entire constant pool has been parsed, the index +-// values can be validated and then the entries are converted into +-// JVM_CONSTANT_UnresolvedClass and JVM_CONSTANT_UnresolvedString +-// entries. During this conversion process, the UTF8 values that are +-// indirectly referenced by the JVM_CONSTANT_ClassIndex and +-// JVM_CONSTANT_StringIndex entries are changed into Symbol*s and the +-// entries are modified to refer to the Symbol*s. This optimization +-// eliminates one level of indirection for those two CP entry types and +-// gets the entries ready for verification. During class file parsing +-// it is also possible for JVM_CONSTANT_UnresolvedString entries to be +-// resolved into JVM_CONSTANT_String entries. Verification expects to +-// find JVM_CONSTANT_UnresolvedClass and either JVM_CONSTANT_String or +-// JVM_CONSTANT_UnresolvedString entries and not JVM_CONSTANT_Class +-// entries. +-// +-// Now we can get back to the copying complication. When we copy +-// entries from old_cp to merge_cp, we have to revert any +-// JVM_CONSTANT_Class entries to JVM_CONSTANT_UnresolvedClass entries +-// or verification will fail. +-// +-// It is important to explicitly state that the merging algorithm +-// effectively unresolves JVM_CONSTANT_Class entries that were in the +-// old_cp when they are changed into JVM_CONSTANT_UnresolvedClass +-// entries in the merge_cp. This is done both to make verification +-// happy and to avoid adding more brittleness between RedefineClasses +-// and the constant pool cache. By allowing the constant pool cache +-// implementation to (re)resolve JVM_CONSTANT_UnresolvedClass entries +-// into JVM_CONSTANT_Class entries, we avoid having to embed knowledge +-// about those algorithms in RedefineClasses. +-// +-// Appending unique entries from scratch_cp to merge_cp is straight +-// forward for direct CP entries and most indirect CP entries. For the +-// indirect CP entry type JVM_CONSTANT_NameAndType and for the double- +-// indirect CP entry types, the presence of more than one piece of +-// interesting data makes appending the entries more complicated. +-// +-// For the JVM_CONSTANT_{Double,Float,Integer,Long,Utf8} entry types, +-// the entry is simply copied from scratch_cp to the end of merge_cp. +-// If the index in scratch_cp is different than the destination index +-// in merge_cp, then the change in index value is tracked. +-// +-// Note: the above discussion for the direct CP entries also applies +-// to the JVM_CONSTANT_Unresolved{Class,String} entry types. +-// +-// For the JVM_CONSTANT_{Class,String} entry types, since there is only +-// one data element at the end of the recursion, we know that we have +-// either one or two unique entries. If the JVM_CONSTANT_Utf8 entry is +-// unique then it is appended to merge_cp before the current entry. +-// If the JVM_CONSTANT_Utf8 entry is not unique, then the current entry +-// is updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// Note: the above discussion for JVM_CONSTANT_{Class,String} entry +-// types is theoretical. Since those entry types have already been +-// optimized into JVM_CONSTANT_Unresolved{Class,String} entry types, +-// they are handled as direct CP entries. +-// +-// For the JVM_CONSTANT_NameAndType entry type, since there are two +-// data elements at the end of the recursions, we know that we have +-// between one and three unique entries. Any unique JVM_CONSTANT_Utf8 +-// entries are appended to merge_cp before the current entry. For any +-// JVM_CONSTANT_Utf8 entries that are not unique, the current entry is +-// updated to refer to the duplicate entry in merge_cp before it is +-// appended to merge_cp. Again, any changes in index values are tracked +-// as needed. +-// +-// For the JVM_CONSTANT_{Fieldref,InterfaceMethodref,Methodref} entry +-// types, since there are two indirect CP entries and three data +-// elements at the end of the recursions, we know that we have between +-// one and six unique entries. See the JVM_CONSTANT_Fieldref diagram +-// above for an example of all six entries. The uniqueness algorithm +-// for the JVM_CONSTANT_Class and JVM_CONSTANT_NameAndType entries is +-// covered above. Any unique entries are appended to merge_cp before +-// the current entry. For any entries that are not unique, the current +-// entry is updated to refer to the duplicate entry in merge_cp before +-// it is appended to merge_cp. Again, any changes in index values are +-// tracked as needed. +-// +-// +-// Other Details: +-// +-// Details for other parts of RedefineClasses need to be written. +-// This is a placeholder section. +-// +-// +-// Open Issues (in no particular order): +-// +-// - How do we serialize the RedefineClasses() API without deadlocking? +-// +-// - SystemDictionary::parse_stream() was called with a NULL protection +-// domain since the initial version. This has been changed to pass +-// the_class->protection_domain(). This change has been tested with +-// all NSK tests and nothing broke, but what will adding it now break +-// in ways that we don't test? +-// +-// - GenerateOopMap::rewrite_load_or_store() has a comment in its +-// (indirect) use of the Relocator class that the max instruction +-// size is 4 bytes. goto_w and jsr_w are 5 bytes and wide/iinc is +-// 6 bytes. Perhaps Relocator only needs a 4 byte buffer to do +-// what it does to the bytecodes. More investigation is needed. +-// +-// - java.lang.Object methods can be called on arrays. This is +-// implemented via the arrayKlassOop vtable which we don't +-// update. For example, if we redefine java.lang.Object.toString(), +-// then the new version of the method will not be called for array +-// objects. +-// +-// - How do we know if redefine_single_class() and the guts of +-// instanceKlass are out of sync? I don't think this can be +-// automated, but we should probably order the work in +-// redefine_single_class() to match the order of field +-// definitions in instanceKlass. We also need to add some +-// comments about keeping things in sync. +-// +-// - set_new_constant_pool() is huge and we should consider refactoring +-// it into smaller chunks of work. +-// +-// - The exception table update code in set_new_constant_pool() defines +-// const values that are also defined in a local context elsewhere. +-// The same literal values are also used in elsewhere. We need to +-// coordinate a cleanup of these constants with Runtime. +-// +- +-class VM_RedefineClasses: public VM_Operation { ++#include "gc_implementation/shared/vmGCOperations.hpp" ++ ++// New version that allows arbitrary changes to already loaded classes. ++class VM_RedefineClasses: public VM_GC_Operation { + private: ++ + // These static fields are needed by SystemDictionary::classes_do() + // facility and the adjust_cpool_cache_and_vtable() helper: + static objArrayOop _old_methods; + static objArrayOop _new_methods; +- static methodOop* _matching_old_methods; +- static methodOop* _matching_new_methods; +- static methodOop* _deleted_methods; +- static methodOop* _added_methods; ++ static int* _matching_old_methods; ++ static int* _matching_new_methods; ++ static int* _deleted_methods; ++ static int* _added_methods; + static int _matching_methods_length; + static int _deleted_methods_length; + static int _added_methods_length; +- static klassOop _the_class_oop; ++ ++ static int _revision_number; ++ ++ static GrowableArray<instanceKlassHandle>* _affected_klasses; + + // The instance fields are used to pass information from + // doit_prologue() to doit() and doit_epilogue(). +@@ -366,40 +63,28 @@ class VM_RedefineClasses: public VM_Operation { + // RetransformClasses. Indicate which. + JvmtiClassLoadKind _class_load_kind; + +- // _index_map_count is just an optimization for knowing if +- // _index_map_p contains any entries. +- int _index_map_count; +- intArray * _index_map_p; +- // ptr to _class_count scratch_classes +- instanceKlassHandle * _scratch_classes; +- jvmtiError _res; ++ GrowableArray<instanceKlassHandle>* _new_classes; ++ jvmtiError _result; ++ int _max_redefinition_flags; + + // Performance measurement support. These timers do not cover all + // the work done for JVM/TI RedefineClasses() but they do cover + // the heavy lifting. +- elapsedTimer _timer_rsc_phase1; +- elapsedTimer _timer_rsc_phase2; +- elapsedTimer _timer_vm_op_prologue; +- +- // These routines are roughly in call order unless otherwise noted. +- +- // Load the caller's new class definition(s) into _scratch_classes. +- // Constant pool merging work is done here as needed. Also calls +- // compare_and_normalize_class_versions() to verify the class +- // definition(s). ++ elapsedTimer _timer_total; ++ elapsedTimer _timer_prologue; ++ elapsedTimer _timer_class_linking; ++ elapsedTimer _timer_class_loading; ++ elapsedTimer _timer_prepare_redefinition; ++ elapsedTimer _timer_wait_for_locks; ++ elapsedTimer _timer_heap_iteration; ++ elapsedTimer _timer_redefinition; ++ elapsedTimer _timer_vm_op_epilogue; ++ ++ jvmtiError check_redefinition_allowed(instanceKlassHandle new_class); ++ jvmtiError find_sorted_affected_classes( ); ++ jvmtiError find_class_bytes(instanceKlassHandle the_class, const unsigned char **class_bytes, jint *class_byte_count, jboolean *not_changed); + jvmtiError load_new_class_versions(TRAPS); + +- // Verify that the caller provided class definition(s) that meet +- // the restrictions of RedefineClasses. Normalize the order of +- // overloaded methods as needed. +- jvmtiError compare_and_normalize_class_versions( +- instanceKlassHandle the_class, instanceKlassHandle scratch_class); +- +- // Swap annotations[i] with annotations[j] +- // Used by compare_and_normalize_class_versions() when normalizing +- // overloaded methods or changing idnum as when adding or deleting methods. +- void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); +- + // Figure out which new methods match old methods in name and signature, + // which methods have been added, and which are no longer present + void compute_added_deleted_matching_methods(); +@@ -407,95 +92,71 @@ class VM_RedefineClasses: public VM_Operation { + // Change jmethodIDs to point to the new methods + void update_jmethod_ids(); + +- // In addition to marking methods as obsolete, this routine +- // records which methods are EMCP (Equivalent Module Constant +- // Pool) in the emcp_methods BitMap and returns the number of +- // EMCP methods via emcp_method_count_p. This information is +- // used when information about the previous version of the_class +- // is squirreled away. +- void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, +- int * emcp_method_count_p); +- void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ void swap_all_method_annotations(int i, int j, instanceKlassHandle scratch_class); ++ ++ static void add_affected_klasses( klassOop obj ); + +- // Unevolving classes may point to methods of the_class directly +- // from their constant pool caches, itables, and/or vtables. We +- // use the SystemDictionary::classes_do() facility and this helper +- // to fix up these pointers. +- static void adjust_cpool_cache_and_vtable(klassOop k_oop, oop loader, TRAPS); ++ static jvmtiError do_topological_class_sorting(const jvmtiClassDefinition *class_definitions, int class_count, TRAPS); + + // Install the redefinition of a class +- void redefine_single_class(jclass the_jclass, +- instanceKlassHandle scratch_class, TRAPS); ++ void redefine_single_class(instanceKlassHandle the_new_class, TRAPS); + + // Increment the classRedefinedCount field in the specific instanceKlass + // and in all direct and indirect subclasses. + void increment_class_counter(instanceKlass *ik, TRAPS); + +- // Support for constant pool merging (these routines are in alpha +- // order): +- void append_entry(constantPoolHandle scratch_cp, int scratch_i, +- constantPoolHandle *merge_cp_p, int *merge_cp_length_p, TRAPS); +- int find_new_index(int old_index); +- bool is_unresolved_class_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- bool is_unresolved_string_mismatch(constantPoolHandle cp1, int index1, +- constantPoolHandle cp2, int index2); +- void map_index(constantPoolHandle scratch_cp, int old_index, int new_index); +- bool merge_constant_pools(constantPoolHandle old_cp, +- constantPoolHandle scratch_cp, constantPoolHandle *merge_cp_p, +- int *merge_cp_length_p, TRAPS); +- jvmtiError merge_cp_and_rewrite(instanceKlassHandle the_class, +- instanceKlassHandle scratch_class, TRAPS); +- u2 rewrite_cp_ref_in_annotation_data( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, +- const char * trace_mesg, TRAPS); +- bool rewrite_cp_refs(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_annotation_struct( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_annotations_typeArray( +- typeArrayHandle annotations_typeArray, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_class_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_element_value( +- typeArrayHandle class_annotations, int &byte_i_ref, TRAPS); +- bool rewrite_cp_refs_in_fields_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_method(methodHandle method, +- methodHandle * new_method_p, TRAPS); +- bool rewrite_cp_refs_in_methods(instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_default_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- bool rewrite_cp_refs_in_methods_parameter_annotations( +- instanceKlassHandle scratch_class, TRAPS); +- void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS); +- void rewrite_cp_refs_in_verification_type_info( +- address& stackmap_addr_ref, address stackmap_end, u2 frame_i, +- u1 frame_size, TRAPS); +- void set_new_constant_pool(instanceKlassHandle scratch_class, +- constantPoolHandle scratch_cp, int scratch_cp_length, bool shrink, TRAPS); + + void flush_dependent_code(instanceKlassHandle k_h, TRAPS); + +- static void check_class(klassOop k_oop, oop initiating_loader, TRAPS); +- static void dump_methods(); ++ static void check_class(klassOop k_oop,/* oop initiating_loader,*/ TRAPS) PRODUCT_RETURN; ++ ++ static void adjust_cpool_cache(klassOop k_oop, oop initiating_loader, TRAPS); ++ ++ static void unpatch_bytecode(methodOop method); ++ ++#ifdef ASSERT ++ static void verify_classes(klassOop k_oop, oop initiating_loader, TRAPS); ++#endif ++ ++ int calculate_redefinition_flags(instanceKlassHandle new_version); ++ void calculate_instance_update_information(klassOop new_version); ++ void check_methods_and_mark_as_obsolete(BitMap *emcp_methods, int * emcp_method_count_p); ++ static void mark_as_scavengable(nmethod* nm); ++ ++ bool check_arguments(); ++ jvmtiError check_arguments_error(); + + public: +- VM_RedefineClasses(jint class_count, +- const jvmtiClassDefinition *class_defs, +- JvmtiClassLoadKind class_load_kind); +- VMOp_Type type() const { return VMOp_RedefineClasses; } ++ VM_RedefineClasses(jint class_count, const jvmtiClassDefinition *class_defs, JvmtiClassLoadKind class_load_kind); ++ virtual ~VM_RedefineClasses(); ++ + bool doit_prologue(); + void doit(); + void doit_epilogue(); ++ void rollback(); + +- bool allow_nested_vm_operations() const { return true; } +- jvmtiError check_error() { return _res; } ++ jvmtiError check_exception() const; ++ VMOp_Type type() const { return VMOp_RedefineClasses; } ++ bool skip_operation() const { return false; } ++ bool allow_nested_vm_operations() const { return true; } ++ jvmtiError check_error() { return _result; } + + // Modifiable test must be shared between IsModifiableClass query + // and redefine implementation + static bool is_modifiable_class(oop klass_mirror); ++ ++ // Utility methods for transfering field access flags ++ ++ static void transfer_special_access_flags(JavaFieldStream *from, JavaFieldStream *to); ++ static void transfer_special_access_flags(fieldDescriptor *from, fieldDescriptor *to); ++ ++ void transfer_old_native_function_registrations(instanceKlassHandle the_class); ++ ++ void lock_threads(); ++ void unlock_threads(); ++ ++ static void swap_marks(oop first, oop second); ++ + }; + + #endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSES_HPP +diff --git a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp +index 878d300..9dbe748 100644 +--- a/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp ++++ b/src/share/vm/prims/jvmtiRedefineClassesTrace.hpp +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -22,114 +22,26 @@ + * + */ + +-#ifndef SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP +-#define SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP ++#define IF_TRACE_RC1 if (TraceRedefineClasses >= 1) ++#define IF_TRACE_RC2 if (TraceRedefineClasses >= 2) ++#define IF_TRACE_RC3 if (TraceRedefineClasses >= 3) ++#define IF_TRACE_RC4 if (TraceRedefineClasses >= 4) ++#define IF_TRACE_RC5 if (TraceRedefineClasses >= 5) + +-// RedefineClasses tracing support via the TraceRedefineClasses +-// option. A bit is assigned to each group of trace messages. +-// Groups of messages are individually selectable. We have to use +-// decimal values on the command line since the command option +-// parsing logic doesn't like non-decimal numerics. The HEX values +-// are used in the actual RC_TRACE() calls for sanity. To achieve +-// the old cumulative behavior, pick the level after the one in +-// which you are interested and subtract one, e.g., 33554431 will +-// print every tracing message. +-// +-// 0x00000000 | 0 - default; no tracing messages +-// 0x00000001 | 1 - name each target class before loading, after +-// loading and after redefinition is completed +-// 0x00000002 | 2 - print info if parsing, linking or +-// verification throws an exception +-// 0x00000004 | 4 - print timer info for the VM operation +-// 0x00000008 | 8 - print subclass counter updates +-// 0x00000010 | 16 - unused +-// 0x00000020 | 32 - unused +-// 0x00000040 | 64 - unused +-// 0x00000080 | 128 - unused +-// 0x00000100 | 256 - previous class weak reference addition +-// 0x00000200 | 512 - previous class weak reference mgmt during +-// class unloading checks (GC) +-// 0x00000400 | 1024 - previous class weak reference mgmt during +-// add previous ops (GC) +-// 0x00000800 | 2048 - previous class breakpoint mgmt +-// 0x00001000 | 4096 - detect calls to obsolete methods +-// 0x00002000 | 8192 - fail a guarantee() in addition to detection +-// 0x00004000 | 16384 - detect old/obsolete methods in metadata +-// 0x00008000 | 32768 - old/new method matching/add/delete +-// 0x00010000 | 65536 - impl details: CP size info +-// 0x00020000 | 131072 - impl details: CP merge pass info +-// 0x00040000 | 262144 - impl details: CP index maps +-// 0x00080000 | 524288 - impl details: modified CP index values +-// 0x00100000 | 1048576 - impl details: vtable updates +-// 0x00200000 | 2097152 - impl details: itable updates +-// 0x00400000 | 4194304 - impl details: constant pool cache updates +-// 0x00800000 | 8388608 - impl details: methodComparator info +-// 0x01000000 | 16777216 - impl details: nmethod evolution info +-// 0x02000000 | 33554432 - impl details: annotation updates +-// 0x04000000 | 67108864 - impl details: StackMapTable updates +-// 0x08000000 | 134217728 - impl details: OopMapCache updates +-// 0x10000000 | 268435456 - unused +-// 0x20000000 | 536870912 - unused +-// 0x40000000 | 1073741824 - unused +-// 0x80000000 | 2147483648 - unused +-// +-// Note: The ResourceMark is to cleanup resource allocated args. +-// The "while (0)" is so we can use semi-colon at end of RC_TRACE(). +-#define RC_TRACE(level, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print_cr args; \ +- } while (0) +- +-#define RC_TRACE_NO_CR(level, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print args; \ +- } while (0) +- +-#define RC_TRACE_WITH_THREAD(level, thread, args) \ +- if ((TraceRedefineClasses & level) != 0) { \ +- ResourceMark rm(thread); \ +- tty->print("RedefineClasses-0x%x: ", level); \ +- tty->print_cr args; \ +- } while (0) +- +-#define RC_TRACE_MESG(args) \ +- { \ +- ResourceMark rm; \ +- tty->print("RedefineClasses: "); \ +- tty->print_cr args; \ +- } while (0) +- +-// Macro for checking if TraceRedefineClasses has a specific bit +-// enabled. Returns true if the bit specified by level is set. +-#define RC_TRACE_ENABLED(level) ((TraceRedefineClasses & level) != 0) +- +-// Macro for checking if TraceRedefineClasses has one or more bits +-// set in a range of bit values. Returns true if one or more bits +-// is set in the range from low..high inclusive. Assumes that low +-// and high are single bit values. +-// +-// ((high << 1) - 1) +-// Yields a mask that removes bits greater than the high bit value. +-// This algorithm doesn't work with highest bit. +-// ~(low - 1) +-// Yields a mask that removes bits lower than the low bit value. +-#define RC_TRACE_IN_RANGE(low, high) \ +-(((TraceRedefineClasses & ((high << 1) - 1)) & ~(low - 1)) != 0) ++#define TRACE_RC1 if (TraceRedefineClasses >= 1) tty->print("TraceRedefineClasses-1: "); if (TraceRedefineClasses >= 1) tty->print_cr ++#define TRACE_RC2 if (TraceRedefineClasses >= 2) tty->print(" TraceRedefineClasses-2: "); if (TraceRedefineClasses >= 2) tty->print_cr ++#define TRACE_RC3 if (TraceRedefineClasses >= 3) tty->print(" TraceRedefineClasses-3: "); if (TraceRedefineClasses >= 3) tty->print_cr ++#define TRACE_RC4 if (TraceRedefineClasses >= 4) tty->print(" TraceRedefineClasses-4: "); if (TraceRedefineClasses >= 4) tty->print_cr ++#define TRACE_RC5 if (TraceRedefineClasses >= 5) tty->print(" TraceRedefineClasses-5: "); if (TraceRedefineClasses >= 5) tty->print_cr + + // Timer support macros. Only do timer operations if timer tracing + // is enabled. The "while (0)" is so we can use semi-colon at end of + // the macro. + #define RC_TIMER_START(t) \ +- if (RC_TRACE_ENABLED(0x00000004)) { \ ++ if (TimeRedefineClasses) { \ + t.start(); \ + } while (0) + #define RC_TIMER_STOP(t) \ +- if (RC_TRACE_ENABLED(0x00000004)) { \ ++ if (TimeRedefineClasses) { \ + t.stop(); \ + } while (0) +- +-#endif // SHARE_VM_PRIMS_JVMTIREDEFINECLASSESTRACE_HPP +diff --git a/src/share/vm/prims/methodComparator.cpp b/src/share/vm/prims/methodComparator.cpp +index 60eaf97..785dc24 100644 +--- a/src/share/vm/prims/methodComparator.cpp ++++ b/src/share/vm/prims/methodComparator.cpp +@@ -42,10 +42,9 @@ bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) + if (old_method->code_size() != new_method->code_size()) + return false; + if (check_stack_and_locals_size(old_method, new_method) != 0) { +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE(0x00800000, ("Methods %s non-comparable with diagnosis %d", ++ TRACE_RC4("Methods %s non-comparable with diagnosis %d", + old_method->name()->as_C_string(), +- check_stack_and_locals_size(old_method, new_method))); ++ check_stack_and_locals_size(old_method, new_method)); + return false; + } + +@@ -114,10 +113,9 @@ bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_me + // Now we can test all forward jumps + for (int i = 0; i < fwd_jmps.length() / 2; i++) { + if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) { +- RC_TRACE(0x00800000, +- ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d", ++ TRACE_RC4("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d", + fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)), +- fwd_jmps.at(i*2+1))); ++ fwd_jmps.at(i*2+1)); + return false; + } + } +diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp +index 22d450b..bac1ae6 100644 +--- a/src/share/vm/runtime/arguments.cpp ++++ b/src/share/vm/runtime/arguments.cpp +@@ -54,8 +54,8 @@ + #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" + #endif + +-// Note: This is a special bug reporting site for the JVM +-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" ++// (tw) The DCE VM has its own JIRA bug tracking system. ++#define DEFAULT_VENDOR_URL_BUG "http://ssw.jku.at/dcevm/bugreport/" + #define DEFAULT_JAVA_LAUNCHER "generic" + + char** Arguments::_jvm_flags_array = NULL; +@@ -1792,6 +1792,16 @@ bool Arguments::check_gc_consistency() { + status = false; + } + ++ // (tw) Must use serial GC. This limitation applies because the instance size changing GC modifications ++ // are only built into the mark and compact algorithm. ++ if (!UseSerialGC && i >= 1) { ++ //jio_fprintf(defaultStream::error_stream(), ++ // "Must use the serial GC in the Dynamic Code Evolution VM\n"); ++ //status = false; ++ } else { ++ UseSerialGC = true; ++ } ++ + return status; + } + +@@ -3208,7 +3218,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) { + + // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled. + set_aggressive_opts_flags(); +- ++#ifndef COMPILER2 + // Turn off biased locking for locking debug mode flags, + // which are subtlely different from each other but neither works with + // biased locking. +@@ -3225,6 +3235,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) { + } + UseBiasedLocking = false; + } ++#endif + + #ifdef CC_INTERP + // Clear flags not supported by the C++ interpreter +diff --git a/src/share/vm/runtime/fieldDescriptor.cpp b/src/share/vm/runtime/fieldDescriptor.cpp +index 3d5213f..9cc701b 100644 +--- a/src/share/vm/runtime/fieldDescriptor.cpp ++++ b/src/share/vm/runtime/fieldDescriptor.cpp +@@ -92,7 +92,8 @@ void fieldDescriptor::initialize(klassOop k, int index) { + instanceKlass* ik = instanceKlass::cast(k); + _cp = ik->constants(); + FieldInfo* f = ik->field(index); +- assert(!f->is_internal(), "regular Java fields only"); ++ // (tw) do we need this? ++// assert(!f->is_internal(), "regular Java fields only"); + + _access_flags = accessFlags_from(f->access_flags()); + guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor"); +diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp +index 8df7220..341b399 100644 +--- a/src/share/vm/runtime/globals.hpp ++++ b/src/share/vm/runtime/globals.hpp +@@ -1230,6 +1230,11 @@ class CommandLineFlags { + product(intx, TraceRedefineClasses, 0, \ + "Trace level for JVMTI RedefineClasses") \ + \ ++ product(bool, TimeRedefineClasses, false, \ ++ "Measure timing for JVMTI RedefineClasses") \ ++ \ ++ product(bool, AllowAdvancedClassRedefinition, true, \ ++ "Allow advanced class redefinition beyond swapping method bodies")\ + develop(bool, StressMethodComparator, false, \ + "run the MethodComparator on all loaded methods") \ + \ +diff --git a/src/share/vm/runtime/interfaceSupport.hpp b/src/share/vm/runtime/interfaceSupport.hpp +index 2875ee0..61fd8fe 100644 +--- a/src/share/vm/runtime/interfaceSupport.hpp ++++ b/src/share/vm/runtime/interfaceSupport.hpp +@@ -296,7 +296,7 @@ class ThreadToNativeFromVM : public ThreadStateTransition { + ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) { + // We are leaving the VM at this point and going directly to native code. + // Block, if we are in the middle of a safepoint synchronization. +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + thread->frame_anchor()->make_walkable(thread); + trans_and_fence(_thread_in_vm, _thread_in_native); + // Check for pending. async. exceptions or suspends. +diff --git a/src/share/vm/runtime/javaCalls.cpp b/src/share/vm/runtime/javaCalls.cpp +index edbba98..4a27925 100644 +--- a/src/share/vm/runtime/javaCalls.cpp ++++ b/src/share/vm/runtime/javaCalls.cpp +@@ -60,7 +60,7 @@ JavaCallWrapper::JavaCallWrapper(methodHandle callee_method, Handle receiver, Ja + bool clear_pending_exception = true; + + guarantee(thread->is_Java_thread(), "crucial check - the VM thread cannot and must not escape to Java code"); +- assert(!thread->owns_locks(), "must release all locks when leaving VM"); ++ assert(!thread->owns_locks_but_redefine_classes_lock(), "must release all locks when leaving VM"); + guarantee(!thread->is_Compiler_thread(), "cannot make java calls from the compiler"); + _result = result; + +diff --git a/src/share/vm/runtime/jniHandles.cpp b/src/share/vm/runtime/jniHandles.cpp +index 3cbcaca..30839d7 100644 +--- a/src/share/vm/runtime/jniHandles.cpp ++++ b/src/share/vm/runtime/jniHandles.cpp +@@ -112,6 +112,10 @@ jobject JNIHandles::make_weak_global(Handle obj) { + } + + jmethodID JNIHandles::make_jmethod_id(methodHandle mh) { ++ if (mh->newest_version() != mh()) { ++ methodHandle mh_new(Thread::current(), mh()->newest_version()); ++ return (jmethodID) make_weak_global(mh_new); ++ } + return (jmethodID) make_weak_global(mh); + } + +diff --git a/src/share/vm/runtime/mutex.cpp b/src/share/vm/runtime/mutex.cpp +index 2095237..c541434 100644 +--- a/src/share/vm/runtime/mutex.cpp ++++ b/src/share/vm/runtime/mutex.cpp +@@ -1227,7 +1227,7 @@ Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1247,7 +1247,7 @@ Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { + // in increasing rank order (modulo any native ranks) + for (tmp = locks; tmp != NULL; tmp = tmp->next()) { + if (tmp->next() != NULL) { +- assert(tmp->rank() == Mutex::native || ++ assert(tmp->rank() == Mutex::native || tmp->rank() == Mutex::redefine_classes || + tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); + } + } +@@ -1310,6 +1310,7 @@ void Monitor::set_owner_implementation(Thread *new_owner) { + // already hold Terminator_lock - may happen because of periodic safepoints + if (this->rank() != Mutex::native && + this->rank() != Mutex::suspend_resume && ++ this->rank() != Mutex::redefine_classes && + locks != NULL && locks->rank() <= this->rank() && + !SafepointSynchronize::is_at_safepoint() && + this != Interrupt_lock && +diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp +index 7d2cd82..11eb32e 100644 +--- a/src/share/vm/runtime/mutex.hpp ++++ b/src/share/vm/runtime/mutex.hpp +@@ -109,7 +109,8 @@ class Monitor : public CHeapObj<mtInternal> { + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, +- native = max_nonleaf + 1 ++ native = max_nonleaf + 1, ++ redefine_classes = native + 1 + }; + + // The WaitSet and EntryList linked lists are composed of ParkEvents. +diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp +index d9184c7..cfd1f3a 100644 +--- a/src/share/vm/runtime/mutexLocker.cpp ++++ b/src/share/vm/runtime/mutexLocker.cpp +@@ -49,6 +49,7 @@ + // Consider using GCC's __read_mostly. + + Mutex* Patching_lock = NULL; ++Mutex* RedefineClasses_lock = NULL; + Monitor* SystemDictionary_lock = NULL; + Mutex* PackageTable_lock = NULL; + Mutex* CompiledIC_lock = NULL; +@@ -281,6 +282,7 @@ void mutex_init() { + def(Debug3_lock , Mutex , nonleaf+4, true ); + def(CompileThread_lock , Monitor, nonleaf+5, false); + def(PeriodicTask_lock , Monitor, nonleaf+5, true); ++ def(RedefineClasses_lock , Mutex , nonleaf+7, false ); // for ensuring that class redefinition is not done in parallel + + #ifdef INCLUDE_TRACE + def(JfrMsg_lock , Monitor, leaf, true); +diff --git a/src/share/vm/runtime/mutexLocker.hpp b/src/share/vm/runtime/mutexLocker.hpp +index 90011d8..d6b0c54 100644 +--- a/src/share/vm/runtime/mutexLocker.hpp ++++ b/src/share/vm/runtime/mutexLocker.hpp +@@ -43,6 +43,7 @@ + // Mutexes used in the VM. + + extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code ++extern Mutex* RedefineClasses_lock; // a lock on class redefinition + extern Monitor* SystemDictionary_lock; // a lock on the system dictonary + extern Mutex* PackageTable_lock; // a lock on the class loader package table + extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access +diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp +index 1665d93..6baabba 100644 +--- a/src/share/vm/runtime/reflection.cpp ++++ b/src/share/vm/runtime/reflection.cpp +@@ -468,7 +468,8 @@ bool Reflection::verify_class_access(klassOop current_class, klassOop new_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +@@ -519,6 +520,12 @@ bool Reflection::verify_field_access(klassOop current_class, + AccessFlags access, + bool classloader_only, + bool protected_restriction) { ++ ++ // (tw) Decide accessibility based on active version ++ if (current_class != NULL) { ++ current_class = current_class->klass_part()->active_version(); ++ } ++ + // Verify that current_class can access a field of field_class, where that + // field's access bits are "access". We assume that we've already verified + // that current_class can access field_class. +@@ -560,7 +567,8 @@ bool Reflection::verify_field_access(klassOop current_class, + // sun/reflect/MagicAccessorImpl subclasses to succeed trivially. + if ( JDK_Version::is_gte_jdk14x_version() + && UseNewReflection +- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) { ++ && (Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()) || ++ Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass()->klass_part()->newest_version()))) { + return true; + } + +diff --git a/src/share/vm/runtime/sharedRuntime.cpp b/src/share/vm/runtime/sharedRuntime.cpp +index 709d783..689b9a2 100644 +--- a/src/share/vm/runtime/sharedRuntime.cpp ++++ b/src/share/vm/runtime/sharedRuntime.cpp +@@ -603,21 +603,13 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Symbol* n + // + JRT_LEAF(int, SharedRuntime::rc_trace_method_entry( + JavaThread* thread, methodOopDesc* method)) +- assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call"); ++ assert(TraceRedefineClasses >= 4, "wrong call"); + + if (method->is_obsolete()) { + // We are calling an obsolete method, but this is not necessarily + // an error. Our method could have been redefined just after we + // fetched the methodOop from the constant pool. +- +- // RC_TRACE macro has an embedded ResourceMark +- RC_TRACE_WITH_THREAD(0x00001000, thread, +- ("calling obsolete method '%s'", +- method->name_and_sig_as_C_string())); +- if (RC_TRACE_ENABLED(0x00002000)) { +- // this option is provided to debug calls to obsolete methods +- guarantee(false, "faulting at call to an obsolete method."); +- } ++ TRACE_RC4("calling obsolete method '%s'", method->name_and_sig_as_C_string()); + } + return 0; + JRT_END +@@ -1137,7 +1129,20 @@ methodHandle SharedRuntime::resolve_helper(JavaThread *thread, + if (JvmtiExport::can_hotswap_or_post_breakpoint()) { + int retry_count = 0; + while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && +- callee_method->method_holder() != SystemDictionary::Object_klass()) { ++ callee_method->method_holder()->klass_part()->super() != NULL) { ++ ++ // (tw) If we are executing an old method, this is OK! ++ { ++ ResourceMark rm(thread); ++ RegisterMap cbl_map(thread, false); ++ frame caller_frame = thread->last_frame().sender(&cbl_map); ++ ++ CodeBlob* caller_cb = caller_frame.cb(); ++ guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod"); ++ nmethod* caller_nm = caller_cb->as_nmethod_or_null(); ++ if (caller_nm->method()->is_old()) break; ++ } ++ + // If has a pending exception then there is no need to re-try to + // resolve this method. + // If the method has been redefined, we need to try again. +diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp +index ae28b65..bb0681e 100644 +--- a/src/share/vm/runtime/thread.cpp ++++ b/src/share/vm/runtime/thread.cpp +@@ -216,6 +216,8 @@ Thread::Thread() { + set_self_raw_id(0); + set_lgrp_id(-1); + ++ _redefine_classes_mutex = new Mutex(Mutex::redefine_classes, "redefine classes lock", false); ++ + // allocated data structures + set_osthread(NULL); + set_resource_area(new (mtThread)ResourceArea()); +@@ -249,6 +251,7 @@ Thread::Thread() { + omFreeProvision = 32 ; + omInUseList = NULL ; + omInUseCount = 0 ; ++ _pretend_new_universe = false; + + #ifdef ASSERT + _visited_for_critical_count = false; +@@ -884,6 +887,15 @@ bool Thread::owns_locks_but_compiled_lock() const { + return false; + } + ++bool Thread::owns_locks_but_redefine_classes_lock() const { ++ for(Monitor *cur = _owned_locks; cur; cur = cur->next()) { ++ if (cur != RedefineClasses_lock && cur->rank() != Mutex::redefine_classes) { ++ return true; ++ } ++ } ++ return false; ++} ++ + + #endif + +@@ -1637,7 +1649,7 @@ void JavaThread::run() { + ThreadStateTransition::transition_and_fence(this, _thread_new, _thread_in_vm); + + assert(JavaThread::current() == this, "sanity check"); +- assert(!Thread::current()->owns_locks(), "sanity check"); ++ assert(!Thread::current()->owns_locks_but_redefine_classes_lock(), "sanity check"); + + DTRACE_THREAD_PROBE(start, this); + +@@ -3193,7 +3205,7 @@ static void compiler_thread_entry(JavaThread* thread, TRAPS) { + + // Create a CompilerThread + CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) +-: JavaThread(&compiler_thread_entry) { ++: JavaThread(&compiler_thread_entry), _should_bailout(false) { + _env = NULL; + _log = NULL; + _task = NULL; +@@ -3201,6 +3213,7 @@ CompilerThread::CompilerThread(CompileQueue* queue, CompilerCounters* counters) + _counters = counters; + _buffer_blob = NULL; + _scanned_nmethod = NULL; ++ _compilation_mutex = new Mutex(Mutex::redefine_classes, "compilationMutex", false); + + #ifndef PRODUCT + _ideal_graph_printer = NULL; +diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp +index 774bd27..4620820 100644 +--- a/src/share/vm/runtime/thread.hpp ++++ b/src/share/vm/runtime/thread.hpp +@@ -202,12 +202,15 @@ class Thread: public ThreadShadow { + public: + void enter_signal_handler() { _num_nested_signal++; } + void leave_signal_handler() { _num_nested_signal--; } +- bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ bool is_inside_signal_handler() const { return _num_nested_signal > 0; } ++ Mutex* redefine_classes_mutex() { return _redefine_classes_mutex; } + + private: + // Debug tracing + static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; + ++ Mutex* _redefine_classes_mutex; ++ + // Active_handles points to a block of handles + JNIHandleBlock* _active_handles; + +@@ -530,10 +533,15 @@ public: + uintptr_t _self_raw_id; // used by get_thread (mutable) + int _lgrp_id; + ++ ++ bool _pretend_new_universe; ++ + public: + // Stack overflow support + address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + ++ void set_pretend_new_universe(bool b) { if (_pretend_new_universe != b) { if (TraceRedefineClasses >= 5) tty->print_cr("Changing pretend universe to %d", (int)b); _pretend_new_universe = b; } } ++ bool pretend_new_universe() { return _pretend_new_universe; } + void set_stack_base(address base) { _stack_base = base; } + size_t stack_size() const { return _stack_size; } + void set_stack_size(size_t size) { _stack_size = size; } +@@ -570,6 +578,7 @@ public: + void print_owned_locks() const { print_owned_locks_on(tty); } + Monitor* owned_locks() const { return _owned_locks; } + bool owns_locks() const { return owned_locks() != NULL; } ++ bool owns_locks_but_redefine_classes_lock() const; + bool owns_locks_but_compiled_lock() const; + + // Deadlock detection +@@ -1793,6 +1802,8 @@ class CompilerThread : public JavaThread { + CompileTask* _task; + CompileQueue* _queue; + BufferBlob* _buffer_blob; ++ bool _should_bailout; ++ Mutex* _compilation_mutex; + + nmethod* _scanned_nmethod; // nmethod being scanned by the sweeper + +@@ -1802,12 +1813,16 @@ class CompilerThread : public JavaThread { + + CompilerThread(CompileQueue* queue, CompilerCounters* counters); + ++ bool should_bailout() const { return _should_bailout; } ++ void set_should_bailout(bool b) { _should_bailout = false; } ++ + bool is_Compiler_thread() const { return true; } + // Hide this compiler thread from external view. + bool is_hidden_from_external_view() const { return true; } + + CompileQueue* queue() { return _queue; } + CompilerCounters* counters() { return _counters; } ++ Mutex *compilation_mutex() { return _compilation_mutex; } + + // Get/set the thread's compilation environment. + ciEnv* env() { return _env; } +diff --git a/src/share/vm/runtime/vmThread.cpp b/src/share/vm/runtime/vmThread.cpp +index 7643670..0d3cd70 100644 +--- a/src/share/vm/runtime/vmThread.cpp ++++ b/src/share/vm/runtime/vmThread.cpp +@@ -691,6 +691,9 @@ void VMThread::execute(VM_Operation* op) { + void VMThread::oops_do(OopClosure* f, CodeBlobClosure* cf) { + Thread::oops_do(f, cf); + _vm_queue->oops_do(f); ++ if (_cur_vm_operation != NULL) { ++ _cur_vm_operation->oops_do(f); ++ } + } + + //------------------------------------------------------------------------------------------------------------------ +diff --git a/src/share/vm/utilities/exceptions.cpp b/src/share/vm/utilities/exceptions.cpp +index 03f254d..18e324b 100644 +--- a/src/share/vm/utilities/exceptions.cpp ++++ b/src/share/vm/utilities/exceptions.cpp +@@ -254,6 +254,8 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + assert(thread->is_Java_thread(), "can only be called by a Java thread"); + assert(!thread->has_pending_exception(), "already has exception"); + ++ bool old_pretend_value = Thread::current()->pretend_new_universe(); ++ Thread::current()->set_pretend_new_universe(false); + Handle h_exception; + + // Resolve exception klass +@@ -285,6 +287,7 @@ Handle Exceptions::new_exception(Thread *thread, Symbol* name, + h_exception = Handle(thread, thread->pending_exception()); + thread->clear_pending_exception(); + } ++ Thread::current()->set_pretend_new_universe(old_pretend_value); + return h_exception; + } + +diff --git a/src/share/vm/utilities/growableArray.hpp b/src/share/vm/utilities/growableArray.hpp +index 2a6d6b8..4b6927f 100644 +--- a/src/share/vm/utilities/growableArray.hpp ++++ b/src/share/vm/utilities/growableArray.hpp +@@ -145,6 +145,33 @@ class GenericGrowableArray : public ResourceObj { + assert(on_stack(), "fast ResourceObj path only"); + return (void*)resource_allocate_bytes(thread, elementSize * _max); + } ++ ++}; ++ ++template<class E, class F> class Pair : public StackObj ++{ ++private: ++ E _left; ++ F _right; ++ ++public: ++ ++ Pair() { ++ ++ } ++ ++ Pair(E left, F right) { ++ this->_left = left; ++ this->_right = right; ++ } ++ ++ E left() { ++ return _left; ++ } ++ ++ F right() { ++ return _right; ++ } + }; + + template<class E> class GrowableArray : public GenericGrowableArray { diff --git a/hotspot/.hg/patches/light-jdk8u5-b13.patch b/hotspot/.hg/patches/light-jdk8u5-b13.patch index 32152bc1..3e7d9894 100644 --- a/hotspot/.hg/patches/light-jdk8u5-b13.patch +++ b/hotspot/.hg/patches/light-jdk8u5-b13.patch @@ -1,20 +1,6 @@ -# HG changeset patch -# Parent 8a67179106085689906732013a282efeeb9bd5f4 - -diff --git a/make/openjdk_distro b/make/openjdk_distro ---- a/make/openjdk_distro -+++ b/make/openjdk_distro -@@ -27,6 +27,6 @@ - # - - # Don't put quotes (fail windows build). --HOTSPOT_VM_DISTRO=OpenJDK -+HOTSPOT_VM_DISTRO=Dynamic Code Evolution - COMPANY_NAME= - PRODUCT_NAME=OpenJDK -diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFactory.cpp ---- a/src/share/vm/ci/ciObjectFactory.cpp -+++ b/src/share/vm/ci/ciObjectFactory.cpp +diff -r 8a6717910608 src/share/vm/ci/ciObjectFactory.cpp +--- a/src/share/vm/ci/ciObjectFactory.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/ci/ciObjectFactory.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -750,3 +750,27 @@ _unloaded_instances->length(), _unloaded_klasses->length()); @@ -43,9 +29,9 @@ diff --git a/src/share/vm/ci/ciObjectFactory.cpp b/src/share/vm/ci/ciObjectFacto +#endif // ASSERT +} + -diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFactory.hpp ---- a/src/share/vm/ci/ciObjectFactory.hpp -+++ b/src/share/vm/ci/ciObjectFactory.hpp +diff -r 8a6717910608 src/share/vm/ci/ciObjectFactory.hpp +--- a/src/share/vm/ci/ciObjectFactory.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/ci/ciObjectFactory.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -90,6 +90,7 @@ ciInstance* get_unloaded_instance(ciInstanceKlass* klass); @@ -63,9 +49,9 @@ diff --git a/src/share/vm/ci/ciObjectFactory.hpp b/src/share/vm/ci/ciObjectFacto }; #endif // SHARE_VM_CI_CIOBJECTFACTORY_HPP -diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp ---- a/src/share/vm/classfile/classFileParser.cpp -+++ b/src/share/vm/classfile/classFileParser.cpp +diff -r 8a6717910608 src/share/vm/classfile/classFileParser.cpp +--- a/src/share/vm/classfile/classFileParser.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/classFileParser.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -759,6 +759,7 @@ Array<Klass*>* ClassFileParser::parse_interfaces(int length, Handle protection_domain, @@ -237,9 +223,9 @@ diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile k->set_is_cloneable(); } } -diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp ---- a/src/share/vm/classfile/classFileParser.hpp -+++ b/src/share/vm/classfile/classFileParser.hpp +diff -r 8a6717910608 src/share/vm/classfile/classFileParser.hpp +--- a/src/share/vm/classfile/classFileParser.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/classFileParser.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -214,11 +214,12 @@ Array<Klass*>* parse_interfaces(int length, Handle protection_domain, @@ -285,9 +271,9 @@ diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile TempNewSymbol& parsed_name, bool verify, TRAPS); -diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/classLoader.cpp ---- a/src/share/vm/classfile/classLoader.cpp -+++ b/src/share/vm/classfile/classLoader.cpp +diff -r 8a6717910608 src/share/vm/classfile/classLoader.cpp +--- a/src/share/vm/classfile/classLoader.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/classLoader.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -926,6 +926,7 @@ instanceKlassHandle result = parser.parseClassFile(h_name, loader_data, @@ -296,9 +282,9 @@ diff --git a/src/share/vm/classfile/classLoader.cpp b/src/share/vm/classfile/cla parsed_name, false, CHECK_(h)); -diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dictionary.cpp ---- a/src/share/vm/classfile/dictionary.cpp -+++ b/src/share/vm/classfile/dictionary.cpp +diff -r 8a6717910608 src/share/vm/classfile/dictionary.cpp +--- a/src/share/vm/classfile/dictionary.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/dictionary.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -145,7 +145,7 @@ InstanceKlass* ik = InstanceKlass::cast(e); @@ -368,9 +354,9 @@ diff --git a/src/share/vm/classfile/dictionary.cpp b/src/share/vm/classfile/dict } -diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dictionary.hpp ---- a/src/share/vm/classfile/dictionary.hpp -+++ b/src/share/vm/classfile/dictionary.hpp +diff -r 8a6717910608 src/share/vm/classfile/dictionary.hpp +--- a/src/share/vm/classfile/dictionary.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/dictionary.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -78,6 +78,10 @@ void add_klass(Symbol* class_name, ClassLoaderData* loader_data,KlassHandle obj); @@ -394,9 +380,9 @@ diff --git a/src/share/vm/classfile/dictionary.hpp b/src/share/vm/classfile/dict // Unload (that is, break root links to) all unmarked classes and // loaders. Returns "true" iff something was unloaded. bool do_unloading(); -diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp ---- a/src/share/vm/classfile/javaClasses.cpp -+++ b/src/share/vm/classfile/javaClasses.cpp +diff -r 8a6717910608 src/share/vm/classfile/javaClasses.cpp +--- a/src/share/vm/classfile/javaClasses.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/javaClasses.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -1629,6 +1629,8 @@ skip_throwableInit_check = true; } @@ -406,9 +392,9 @@ diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/jav if (method->is_hidden()) { if (skip_hidden) continue; } -diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfile/loaderConstraints.cpp ---- a/src/share/vm/classfile/loaderConstraints.cpp -+++ b/src/share/vm/classfile/loaderConstraints.cpp +diff -r 8a6717910608 src/share/vm/classfile/loaderConstraints.cpp +--- a/src/share/vm/classfile/loaderConstraints.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/loaderConstraints.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -446,7 +446,7 @@ if (k != NULL) { // We found the class in the system dictionary, so we should @@ -418,9 +404,9 @@ diff --git a/src/share/vm/classfile/loaderConstraints.cpp b/src/share/vm/classfi } else { // If we don't find the class in the system dictionary, it // has to be in the placeholders table. -diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp ---- a/src/share/vm/classfile/systemDictionary.cpp -+++ b/src/share/vm/classfile/systemDictionary.cpp +diff -r 8a6717910608 src/share/vm/classfile/systemDictionary.cpp +--- a/src/share/vm/classfile/systemDictionary.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/systemDictionary.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -174,6 +174,7 @@ // can return a null klass klass = handle_resolution_exception(class_name, class_loader, protection_domain, throw_error, k_h, THREAD); @@ -572,9 +558,9 @@ diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfil linkage_error = "loader (instance of %s): attempted duplicate class " "definition for name: \"%s\""; } else { -diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfile/systemDictionary.hpp ---- a/src/share/vm/classfile/systemDictionary.hpp -+++ b/src/share/vm/classfile/systemDictionary.hpp +diff -r 8a6717910608 src/share/vm/classfile/systemDictionary.hpp +--- a/src/share/vm/classfile/systemDictionary.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/systemDictionary.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -269,7 +269,7 @@ // Resolve from stream (called by jni_DefineClass and JVM_DefineClass) static Klass* resolve_from_stream(Symbol* class_name, Handle class_loader, @@ -612,9 +598,9 @@ diff --git a/src/share/vm/classfile/systemDictionary.hpp b/src/share/vm/classfil static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, Handle class_loader, instanceKlassHandle k, TRAPS); -diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifier.cpp ---- a/src/share/vm/classfile/verifier.cpp -+++ b/src/share/vm/classfile/verifier.cpp +diff -r 8a6717910608 src/share/vm/classfile/verifier.cpp +--- a/src/share/vm/classfile/verifier.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/verifier.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -189,7 +189,7 @@ Symbol* name = klass->name(); Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass(); @@ -642,9 +628,9 @@ diff --git a/src/share/vm/classfile/verifier.cpp b/src/share/vm/classfile/verifi int num_methods = methods->length(); for (int index = 0; index < num_methods; index++) { -diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifier.hpp ---- a/src/share/vm/classfile/verifier.hpp -+++ b/src/share/vm/classfile/verifier.hpp +diff -r 8a6717910608 src/share/vm/classfile/verifier.hpp +--- a/src/share/vm/classfile/verifier.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/classfile/verifier.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -331,6 +331,7 @@ VerificationType object_type() const; @@ -653,193 +639,9 @@ diff --git a/src/share/vm/classfile/verifier.hpp b/src/share/vm/classfile/verifi instanceKlassHandle _klass; // the class being verified methodHandle _method; // current method being verified VerificationType _this_type; // the verification type of the current class -diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp ---- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp -+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp -@@ -161,6 +161,12 @@ - } - } - -+HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, -+ CompactPoint* cp, HeapWord* compact_top) { -+ ShouldNotReachHere(); -+ return NULL; -+} -+ - // Like CompactibleSpace forward() but always calls cross_threshold() to - // update the block offset table. Removed initialize_threshold call because - // CFLS does not use a block offset array for contiguous spaces. -@@ -2098,7 +2104,7 @@ - // Support for compaction - - void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { -- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); -+ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false); - // prepare_for_compaction() uses the space between live objects - // so that later phase can skip dead space quickly. So verification - // of the free lists doesn't work after. -@@ -2119,7 +2125,7 @@ - } - - void CompactibleFreeListSpace::compact() { -- SCAN_AND_COMPACT(obj_size); -+ SCAN_AND_COMPACT(obj_size, false); - } - - // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] -diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp ---- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp -+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp -@@ -150,6 +150,7 @@ - - // Support for compacting cms - HeapWord* cross_threshold(HeapWord* start, HeapWord* end); -+ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); - HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); - - // Initialization helpers. -diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp ---- a/src/share/vm/gc_implementation/shared/markSweep.cpp -+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp -@@ -32,6 +32,8 @@ - #include "oops/objArrayKlass.inline.hpp" - #include "oops/oop.inline.hpp" - -+GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL; -+ - uint MarkSweep::_total_invocations = 0; - - Stack<oop, mtGC> MarkSweep::_marking_stack; -@@ -171,3 +173,100 @@ - } - - #endif -+ -+// (DCEVM) Copy the rescued objects to their destination address after compaction. -+void MarkSweep::copy_rescued_objects_back() { -+ -+ if (_rescued_oops != NULL) { -+ -+ for (int i=0; i<_rescued_oops->length(); i++) { -+ HeapWord* rescued_ptr = _rescued_oops->at(i); -+ oop rescued_obj = (oop) rescued_ptr; -+ -+ int size = rescued_obj->size(); -+ oop new_obj = rescued_obj->forwardee(); -+ -+ assert(rescued_obj->klass()->new_version() != NULL, "just checking"); -+ -+ if (rescued_obj->klass()->new_version()->update_information() != NULL) { -+ MarkSweep::update_fields(rescued_obj, new_obj); -+ } else { -+ rescued_obj->set_klass(rescued_obj->klass()->new_version()); -+ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); -+ } -+ -+ FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size); -+ -+ new_obj->init_mark(); -+ assert(new_obj->is_oop(), "must be a valid oop"); -+ } -+ _rescued_oops->clear(); -+ _rescued_oops = NULL; -+ } -+} -+ -+// (DCEVM) Update instances of a class whose fields changed. -+void MarkSweep::update_fields(oop q, oop new_location) { -+ -+ assert(q->klass()->new_version() != NULL, "class of old object must have new version"); -+ -+ Klass* old_klass_oop = q->klass(); -+ Klass* new_klass_oop = q->klass()->new_version(); -+ -+ InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop); -+ InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop); -+ -+ int size = q->size_given_klass(old_klass); -+ int new_size = q->size_given_klass(new_klass); -+ -+ HeapWord* tmp = NULL; -+ oop tmp_obj = q; -+ -+ // Save object somewhere, there is an overlap in fields -+ if (new_klass_oop->is_copying_backwards()) { -+ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || -+ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { -+ tmp = NEW_RESOURCE_ARRAY(HeapWord, size); -+ q = (oop) tmp; -+ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); -+ } -+ } -+ -+ q->set_klass(new_klass_oop); -+ int *cur = new_klass_oop->update_information(); -+ assert(cur != NULL, "just checking"); -+ MarkSweep::update_fields(new_location, q, cur); -+ -+ if (tmp != NULL) { -+ FREE_RESOURCE_ARRAY(HeapWord, tmp, size); -+ } -+} -+ -+void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) { -+ assert(cur != NULL, "just checking"); -+ char* to = (char*)(HeapWord*)new_location; -+ while (*cur != 0) { -+ int size = *cur; -+ if (size > 0) { -+ cur++; -+ int offset = *cur; -+ HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset); -+ if (size == HeapWordSize) { -+ *((HeapWord*)to) = *from; -+ } else if (size == HeapWordSize * 2) { -+ *((HeapWord*)to) = *from; -+ *(((HeapWord*)to) + 1) = *(from + 1); -+ } else { -+ Copy::conjoint_jbytes(from, to, size); -+ } -+ to += size; -+ cur++; -+ } else { -+ assert(size < 0, ""); -+ int skip = -*cur; -+ Copy::fill_to_bytes(to, skip, 0); -+ to += skip; -+ cur++; -+ } -+ } -+} -diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp ---- a/src/share/vm/gc_implementation/shared/markSweep.hpp -+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp -@@ -107,8 +107,12 @@ - friend class AdjustPointerClosure; - friend class KeepAliveClosure; - friend class VM_MarkSweep; -+ friend class GenMarkSweep; - friend void marksweep_init(); - -+public: -+ static GrowableArray<HeapWord*>* _rescued_oops; -+ - // - // Vars - // -@@ -169,6 +173,9 @@ - - static inline void push_objarray(oop obj, size_t index); - -+ static void copy_rescued_objects_back(); -+ static void update_fields(oop q, oop new_location); -+ static void update_fields(oop new_location, oop tmp_obj, int *cur); - static void follow_stack(); // Empty marking stack. - - static void follow_klass(Klass* klass); -diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interpreter/linkResolver.cpp ---- a/src/share/vm/interpreter/linkResolver.cpp -+++ b/src/share/vm/interpreter/linkResolver.cpp +diff -r 8a6717910608 src/share/vm/interpreter/linkResolver.cpp +--- a/src/share/vm/interpreter/linkResolver.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/interpreter/linkResolver.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -215,8 +215,8 @@ // Klass resolution @@ -869,434 +671,18 @@ diff --git a/src/share/vm/interpreter/linkResolver.cpp b/src/share/vm/interprete THROW(vmSymbols::java_lang_IllegalAccessError()); } -@@ -1199,6 +1199,16 @@ +@@ -1199,6 +1199,8 @@ // recv_klass might be an arrayKlassOop but all vtables start at // the same place. The cast is to avoid virtual call and assertion. InstanceKlass* inst = (InstanceKlass*)recv_klass(); -+ + // (DCEVM) Check that the receiver is a subtype of the holder of the resolved method. -+ if (!inst->is_subtype_of(resolved_method->method_holder())) { -+ inst->print(); -+ tty->print_cr("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); -+ resolved_method->method_holder()->print(); -+ tty->print_cr("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); -+ resolved_method->print(); -+ } + assert(inst->is_subtype_of(resolved_method->method_holder()), "receiver and resolved method holder are inconsistent"); selected_method = methodHandle(THREAD, inst->method_at_vtable(vtable_index)); } } -diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp ---- a/src/share/vm/memory/genMarkSweep.cpp -+++ b/src/share/vm/memory/genMarkSweep.cpp -@@ -334,11 +334,16 @@ - // in the same order in phase2, phase3 and phase4. We don't quite do that - // here (perm_gen first rather than last), so we tell the validate code - // to use a higher index (saved from phase2) when verifying perm_gen. -+ assert(_rescued_oops == NULL, "must be empty before processing"); - GenCollectedHeap* gch = GenCollectedHeap::heap(); - - GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); - trace("4"); - -+ MarkSweep::copy_rescued_objects_back(); -+ - GenCompactClosure blk; - gch->generation_iterate(&blk, true); -+ -+ MarkSweep::copy_rescued_objects_back(); - } -diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp ---- a/src/share/vm/memory/space.cpp -+++ b/src/share/vm/memory/space.cpp -@@ -379,9 +379,8 @@ - _compaction_top = bottom(); - } - --HeapWord* CompactibleSpace::forward(oop q, size_t size, -- CompactPoint* cp, HeapWord* compact_top) { -- // q is alive -+// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. -+HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) { - // First check if we should switch compaction space - assert(this == cp->space, "'this' should be current compaction space."); - size_t compaction_max_size = pointer_delta(end(), compact_top); -@@ -401,8 +400,15 @@ - compaction_max_size = pointer_delta(cp->space->end(), compact_top); - } - -+ return compact_top; -+} -+ -+HeapWord* CompactibleSpace::forward(oop q, size_t size, -+ CompactPoint* cp, HeapWord* compact_top) { -+ compact_top = forward_compact_top(size, cp, compact_top); -+ - // store the forwarding pointer into the mark word -- if ((HeapWord*)q != compact_top) { -+ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { - q->forward_to(oop(compact_top)); - assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); - } else { -@@ -423,6 +429,58 @@ - return compact_top; - } - -+// Compute the forward sizes and leave out objects whose position could -+// possibly overlap other objects. -+HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size, -+ CompactPoint* cp, HeapWord* compact_top) { -+ size_t forward_size = size; -+ -+ // (DCEVM) There is a new version of the class of q => different size -+ if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) { -+ -+ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); -+ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); -+ forward_size = new_size; -+ } -+ -+ compact_top = forward_compact_top(forward_size, cp, compact_top); -+ -+ if (must_rescue(oop(q), oop(compact_top))) { -+ if (MarkSweep::_rescued_oops == NULL) { -+ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); -+ } -+ MarkSweep::_rescued_oops->append(q); -+ return compact_top; -+ } -+ -+ return forward(oop(q), forward_size, cp, compact_top); -+} -+ -+// Compute the forwarding addresses for the objects that need to be rescued. -+HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) { -+ // TODO: empty the _rescued_oops after ALL spaces are compacted! -+ if (MarkSweep::_rescued_oops != NULL) { -+ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { -+ HeapWord* q = MarkSweep::_rescued_oops->at(i); -+ -+ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ -+ size_t size = block_size(q); -+ -+ // (DCEVM) There is a new version of the class of q => different size -+ if (oop(q)->klass()->new_version() != NULL) { -+ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); -+ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); -+ size = new_size; -+ } -+ -+ compact_top = cp->space->forward(oop(q), size, cp, compact_top); -+ assert(compact_top <= end(), "must not write over end of space!"); -+ } -+ MarkSweep::_rescued_oops->clear(); -+ MarkSweep::_rescued_oops = NULL; -+ } -+ return compact_top; -+} - - bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, - HeapWord* q, size_t deadlength) { -@@ -444,12 +502,17 @@ - #define adjust_obj_size(s) s - - void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { -- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); -+ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false); - } - - // Faster object search. - void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { -- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); -+ if (!Universe::is_redefining_gc_run()) { -+ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false); -+ } else { -+ // Redefinition run -+ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true); -+ } - } - - void Space::adjust_pointers() { -@@ -487,6 +550,111 @@ - assert(q == t, "just checking"); - } - -+ -+#ifdef ASSERT -+ -+int CompactibleSpace::space_index(oop obj) { -+ GenCollectedHeap* heap = GenCollectedHeap::heap(); -+ -+ //if (heap->is_in_permanent(obj)) { -+ // return -1; -+ //} -+ -+ int index = 0; -+ for (int i = heap->n_gens() - 1; i >= 0; i--) { -+ Generation* gen = heap->get_gen(i); -+ CompactibleSpace* space = gen->first_compaction_space(); -+ while (space != NULL) { -+ if (space->is_in_reserved(obj)) { -+ return index; -+ } -+ space = space->next_compaction_space(); -+ index++; -+ } -+ } -+ -+ tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj); -+ index = 0; -+ for (int i = heap->n_gens() - 1; i >= 0; i--) { -+ Generation* gen = heap->get_gen(i); -+ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); -+ -+ CompactibleSpace* space = gen->first_compaction_space(); -+ while (space != NULL) { -+ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); -+ space = space->next_compaction_space(); -+ index++; -+ } -+ } -+ -+ ShouldNotReachHere(); -+ return 0; -+} -+#endif -+ -+bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { -+ // Only redefined objects can have the need to be rescued. -+ if (oop(old_obj)->klass()->new_version() == NULL) return false; -+ -+ //if (old_obj->is_perm()) { -+ // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj. -+ // return true; -+ //} -+ -+ int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version()); -+ int original_size = old_obj->size(); -+ -+ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); -+ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj); -+ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj); -+ if (old_in_tenured == new_in_tenured) { -+ // Rescue if object may overlap with a higher memory address. -+ bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size); -+ if (old_in_tenured) { -+ // Old and new address are in same space, so just compare the address. -+ // Must rescue if object moves towards the top of the space. -+ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); -+ } else { -+ // In the new generation, eden is located before the from space, so a -+ // simple pointer comparison is sufficient. -+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); -+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); -+ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); -+ } -+ return overlap; -+ -+ } else { -+ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); -+ if (tenured_gen->is_in_reserved(new_obj)) { -+ // Must never rescue when moving from the new into the old generation. -+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); -+ assert(space_index(old_obj) > space_index(new_obj), "must be"); -+ return false; -+ -+ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ { -+ // Must always rescue when moving from the old into the new generation. -+ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); -+ assert(space_index(old_obj) < space_index(new_obj), "must be"); -+ return true; -+ } -+ } -+} -+ -+HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) { -+ assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise"); -+ -+ int size = oop(old_obj)->size(); -+ HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size); -+ Copy::aligned_disjoint_words(old_obj, rescued_obj, size); -+ -+ if (MarkSweep::_rescued_oops == NULL) { -+ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); -+ } -+ -+ MarkSweep::_rescued_oops->append(rescued_obj); -+ return rescued_obj; -+} -+ - void CompactibleSpace::adjust_pointers() { - // Check first is there is any work to do. - if (used() == 0) { -@@ -497,7 +665,12 @@ - } - - void CompactibleSpace::compact() { -- SCAN_AND_COMPACT(obj_size); -+ if(!Universe::is_redefining_gc_run()) { -+ SCAN_AND_COMPACT(obj_size, false); -+ } else { -+ // Redefinition run -+ SCAN_AND_COMPACT(obj_size, true) -+ } - } - - void Space::print_short() const { print_short_on(tty); } -diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp ---- a/src/share/vm/memory/space.hpp -+++ b/src/share/vm/memory/space.hpp -@@ -450,6 +450,9 @@ - // indicates when the next such action should be taken. - virtual void prepare_for_compaction(CompactPoint* cp); - // MarkSweep support phase3 -+ DEBUG_ONLY(int space_index(oop obj)); -+ bool must_rescue(oop old_obj, oop new_obj); -+ HeapWord* rescue(HeapWord* old_obj); - virtual void adjust_pointers(); - // MarkSweep support phase4 - virtual void compact(); -@@ -479,6 +482,15 @@ - // accordingly". - virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, - HeapWord* compact_top); -+ // (DCEVM) same as forwad, but can rescue objects. Invoked only during -+ // redefinition runs -+ HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp, -+ HeapWord* compact_top); -+ -+ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top); -+ -+ // (tw) Compute new compact top without actually forwarding the object. -+ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); - - // Return a size with adjusments as required of the space. - virtual size_t adjust_object_size_v(size_t size) const { return size; } -@@ -509,7 +521,7 @@ - size_t word_len); - }; - --#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ -+#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \ - /* Compute the new addresses for the live objects and store it in the mark \ - * Used by universe::mark_sweep_phase2() \ - */ \ -@@ -567,7 +579,17 @@ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - size_t size = block_size(q); \ -+ if (redefinition_run) { \ -+ compact_top = cp->space->forward_with_rescue(q, size, \ -+ cp, compact_top); \ -+ if (q < first_dead && oop(q)->is_gc_marked()) { \ -+ /* Was moved (otherwise, forward would reset mark), \ -+ set first_dead to here */ \ -+ first_dead = q; \ -+ } \ -+ } else { \ - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ -+ } \ - q += size; \ - end_of_live = q; \ - } else { \ -@@ -616,6 +638,8 @@ - } \ - } \ - \ -+ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \ -+ \ - assert(q == t, "just checking"); \ - if (liveRange != NULL) { \ - liveRange->set_end(q); \ -@@ -662,13 +686,8 @@ - q += size; \ - } \ - \ -- if (_first_dead == t) { \ -- q = t; \ -- } else { \ -- /* $$$ This is funky. Using this to read the previously written \ -- * LiveRange. See also use below. */ \ -- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ -- } \ -+ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ -+ q = _first_dead; \ - } \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ -@@ -696,7 +715,7 @@ - assert(q == t, "just checking"); \ - } - --#define SCAN_AND_COMPACT(obj_size) { \ -+#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \ - /* Copy all live objects to their new location \ - * Used by MarkSweep::mark_sweep_phase4() */ \ - \ -@@ -721,13 +740,9 @@ - } \ - ) /* debug_only */ \ - \ -- if (_first_dead == t) { \ -- q = t; \ -- } else { \ -- /* $$$ Funky */ \ -- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ -+ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ -+ q = _first_dead; \ - } \ -- } \ - \ - const intx scan_interval = PrefetchScanIntervalInBytes; \ - const intx copy_interval = PrefetchCopyIntervalInBytes; \ -@@ -745,11 +760,34 @@ - size_t size = obj_size(q); \ - HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ - \ -+ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \ -+ rescue(q); \ -+ debug_only(Copy::fill_to_words(q, size, 0)); \ -+ q += size; \ -+ continue; \ -+ } \ -+ \ - /* prefetch beyond compaction_top */ \ - Prefetch::write(compaction_top, copy_interval); \ - \ - /* copy object and reinit its mark */ \ -- assert(q != compaction_top, "everything in this pass should be moving"); \ -+ assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \ -+ "everything in this pass should be moving"); \ -+ if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \ -+ Klass* new_version = oop(q)->klass()->new_version(); \ -+ if (new_version->update_information() == NULL) { \ -+ Copy::aligned_conjoint_words(q, compaction_top, size); \ -+ oop(compaction_top)->set_klass(new_version); \ -+ } else { \ -+ MarkSweep::update_fields(oop(q), oop(compaction_top)); \ -+ } \ -+ oop(compaction_top)->init_mark(); \ -+ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ -+ \ -+ debug_only(prev_q = q); \ -+ q += size; \ -+ continue; \ -+ } \ - Copy::aligned_conjoint_words(q, compaction_top, size); \ - oop(compaction_top)->init_mark(); \ - assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ -diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp ---- a/src/share/vm/memory/universe.cpp -+++ b/src/share/vm/memory/universe.cpp -@@ -78,6 +78,8 @@ - #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" - #endif // INCLUDE_ALL_GCS - -+bool Universe::_is_redefining_gc_run = false; -+ - // Known objects - Klass* Universe::_boolArrayKlassObj = NULL; - Klass* Universe::_byteArrayKlassObj = NULL; +diff -r 8a6717910608 src/share/vm/memory/universe.cpp +--- a/src/share/vm/memory/universe.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/memory/universe.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -157,6 +159,42 @@ f(doubleArrayKlassObj()); } @@ -1340,23 +726,9 @@ diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp void Universe::oops_do(OopClosure* f, bool do_all) { f->do_oop((oop*) &_int_mirror); -diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp ---- a/src/share/vm/memory/universe.hpp -+++ b/src/share/vm/memory/universe.hpp -@@ -248,7 +248,13 @@ - - static void compute_verify_oop_data(); - -+ static bool _is_redefining_gc_run; -+ - public: -+ -+ static bool is_redefining_gc_run() { return _is_redefining_gc_run; } -+ static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; } -+ - // Known classes in the VM - static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; } - static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; } +diff -r 8a6717910608 src/share/vm/memory/universe.hpp +--- a/src/share/vm/memory/universe.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/memory/universe.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -401,6 +407,7 @@ static void run_finalizers_on_exit(); @@ -1365,9 +737,9 @@ diff --git a/src/share/vm/memory/universe.hpp b/src/share/vm/memory/universe.hpp // Apply "f" to the addresses of all the direct heap pointers maintained // as static fields of "Universe". -diff --git a/src/share/vm/oops/cpCache.cpp b/src/share/vm/oops/cpCache.cpp ---- a/src/share/vm/oops/cpCache.cpp -+++ b/src/share/vm/oops/cpCache.cpp +diff -r 8a6717910608 src/share/vm/oops/cpCache.cpp +--- a/src/share/vm/oops/cpCache.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/cpCache.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -336,7 +336,8 @@ if (has_appendix) { const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset; @@ -1430,9 +802,9 @@ diff --git a/src/share/vm/oops/cpCache.cpp b/src/share/vm/oops/cpCache.cpp #endif // INCLUDE_JVMTI -diff --git a/src/share/vm/oops/cpCache.hpp b/src/share/vm/oops/cpCache.hpp ---- a/src/share/vm/oops/cpCache.hpp -+++ b/src/share/vm/oops/cpCache.hpp +diff -r 8a6717910608 src/share/vm/oops/cpCache.hpp +--- a/src/share/vm/oops/cpCache.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/cpCache.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -373,6 +373,10 @@ bool * trace_name_printed); bool check_no_old_or_obsolete_entries(); @@ -1455,9 +827,9 @@ diff --git a/src/share/vm/oops/cpCache.hpp b/src/share/vm/oops/cpCache.hpp #endif // INCLUDE_JVMTI // Deallocate - no fields to deallocate -diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp ---- a/src/share/vm/oops/instanceKlass.cpp -+++ b/src/share/vm/oops/instanceKlass.cpp +diff -r 8a6717910608 src/share/vm/oops/instanceKlass.cpp +--- a/src/share/vm/oops/instanceKlass.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/instanceKlass.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -718,7 +718,8 @@ } #endif @@ -1601,9 +973,9 @@ diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKla } // end has_previous_version() -diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp ---- a/src/share/vm/oops/instanceKlass.hpp -+++ b/src/share/vm/oops/instanceKlass.hpp +diff -r 8a6717910608 src/share/vm/oops/instanceKlass.hpp +--- a/src/share/vm/oops/instanceKlass.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/instanceKlass.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -139,6 +139,7 @@ friend class VMStructs; friend class ClassFileParser; @@ -1648,9 +1020,9 @@ diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKla void methods_do(void f(Method* method)); void array_klasses_do(void f(Klass* k)); void array_klasses_do(void f(Klass* k, TRAPS), TRAPS); -diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp ---- a/src/share/vm/oops/klass.cpp -+++ b/src/share/vm/oops/klass.cpp +diff -r 8a6717910608 src/share/vm/oops/klass.cpp +--- a/src/share/vm/oops/klass.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/klass.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -170,6 +170,13 @@ set_next_link(NULL); TRACE_INIT_ID(this); @@ -1690,9 +1062,9 @@ diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp bool Klass::is_loader_alive(BoolObjectClosure* is_alive) { assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace"); -diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp ---- a/src/share/vm/oops/klass.hpp -+++ b/src/share/vm/oops/klass.hpp +diff -r 8a6717910608 src/share/vm/oops/klass.hpp +--- a/src/share/vm/oops/klass.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/klass.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -149,6 +149,10 @@ oop _java_mirror; // Superclass @@ -1800,9 +1172,9 @@ diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp // Compiler support static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); } -diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp ---- a/src/share/vm/oops/klassVtable.cpp -+++ b/src/share/vm/oops/klassVtable.cpp +diff -r 8a6717910608 src/share/vm/oops/klassVtable.cpp +--- a/src/share/vm/oops/klassVtable.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/klassVtable.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -1409,6 +1409,8 @@ void klassVtable::verify_against(outputStream* st, klassVtable* vt, int index) { @@ -1832,9 +1204,9 @@ diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.c } } -diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp ---- a/src/share/vm/oops/method.cpp -+++ b/src/share/vm/oops/method.cpp +diff -r 8a6717910608 src/share/vm/oops/method.cpp +--- a/src/share/vm/oops/method.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/method.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -1185,6 +1185,8 @@ // Reset correct method/const method, method size, and parameter info @@ -1855,9 +1227,9 @@ diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp ClassLoaderData* cld = loader_data; if (!SafepointSynchronize::is_at_safepoint()) { -diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp ---- a/src/share/vm/oops/method.hpp -+++ b/src/share/vm/oops/method.hpp +diff -r 8a6717910608 src/share/vm/oops/method.hpp +--- a/src/share/vm/oops/method.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/oops/method.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -105,6 +105,10 @@ AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) @@ -1893,9 +1265,9 @@ diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp // signature Symbol* signature() const { return constants()->symbol_at(signature_index()); } int signature_index() const { return constMethod()->signature_index(); } -diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp ---- a/src/share/vm/prims/jni.cpp -+++ b/src/share/vm/prims/jni.cpp +diff -r 8a6717910608 src/share/vm/prims/jni.cpp +--- a/src/share/vm/prims/jni.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/prims/jni.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -406,6 +406,7 @@ } Klass* k = SystemDictionary::resolve_from_stream(class_name, class_loader, @@ -1904,9 +1276,9 @@ diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp CHECK_NULL); if (TraceClassResolution && k != NULL) { -diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp ---- a/src/share/vm/prims/jvm.cpp -+++ b/src/share/vm/prims/jvm.cpp +diff -r 8a6717910608 src/share/vm/prims/jvm.cpp +--- a/src/share/vm/prims/jvm.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/prims/jvm.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -904,6 +904,7 @@ Klass* k = SystemDictionary::resolve_from_stream(class_name, class_loader, protection_domain, &st, @@ -1915,9 +1287,9 @@ diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp CHECK_NULL); if (TraceClassResolution && k != NULL) { -diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp ---- a/src/share/vm/prims/jvmtiEnv.cpp -+++ b/src/share/vm/prims/jvmtiEnv.cpp +diff -r 8a6717910608 src/share/vm/prims/jvmtiEnv.cpp +--- a/src/share/vm/prims/jvmtiEnv.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/prims/jvmtiEnv.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -42,6 +42,7 @@ #include "prims/jvmtiManageCapabilities.hpp" #include "prims/jvmtiRawMonitor.hpp" @@ -1963,9 +1335,9 @@ diff --git a/src/share/vm/prims/jvmtiEnv.cpp b/src/share/vm/prims/jvmtiEnv.cpp VM_RedefineClasses op(class_count, class_definitions, jvmti_class_load_kind_redefine); VMThread::execute(&op); return (op.check_error()); -diff --git a/src/share/vm/prims/jvmtiExport.hpp b/src/share/vm/prims/jvmtiExport.hpp ---- a/src/share/vm/prims/jvmtiExport.hpp -+++ b/src/share/vm/prims/jvmtiExport.hpp +diff -r 8a6717910608 src/share/vm/prims/jvmtiExport.hpp +--- a/src/share/vm/prims/jvmtiExport.hpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/prims/jvmtiExport.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -188,6 +188,7 @@ // systems as needed to relax invariant checks. static bool _has_redefined_a_class; @@ -1974,9 +1346,9 @@ diff --git a/src/share/vm/prims/jvmtiExport.hpp b/src/share/vm/prims/jvmtiExport inline static void set_has_redefined_a_class() { JVMTI_ONLY(_has_redefined_a_class = true;) } -diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp ---- a/src/share/vm/prims/jvmtiImpl.cpp -+++ b/src/share/vm/prims/jvmtiImpl.cpp +diff -r 8a6717910608 src/share/vm/prims/jvmtiImpl.cpp +--- a/src/share/vm/prims/jvmtiImpl.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/prims/jvmtiImpl.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -289,6 +289,11 @@ Symbol* m_name = _method->name(); Symbol* m_signature = _method->signature(); @@ -1989,10 +1361,9 @@ diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp // search previous versions if they exist PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh()); for (PreviousVersionNode * pv_node = pvw.next_previous_version(); -diff --git a/src/share/vm/prims/jvmtiRedefineClasses2.cpp b/src/share/vm/prims/jvmtiRedefineClasses2.cpp -new file mode 100644 ---- /dev/null -+++ b/src/share/vm/prims/jvmtiRedefineClasses2.cpp +diff -r 8a6717910608 src/share/vm/prims/jvmtiRedefineClasses2.cpp +--- /dev/null Thu Jan 01 00:00:00 1970 +0000 ++++ b/src/share/vm/prims/jvmtiRedefineClasses2.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -0,0 +1,2034 @@ +/* + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. @@ -4028,10 +3399,9 @@ new file mode 100644 + transfer.transfer_registrations(old_klass, _deleted_methods, _deleted_methods_length); + transfer.transfer_registrations(old_klass, _matching_old_methods, _matching_methods_length); +} -diff --git a/src/share/vm/prims/jvmtiRedefineClasses2.hpp b/src/share/vm/prims/jvmtiRedefineClasses2.hpp -new file mode 100644 ---- /dev/null -+++ b/src/share/vm/prims/jvmtiRedefineClasses2.hpp +diff -r 8a6717910608 src/share/vm/prims/jvmtiRedefineClasses2.hpp +--- /dev/null Thu Jan 01 00:00:00 1970 +0000 ++++ b/src/share/vm/prims/jvmtiRedefineClasses2.hpp Wed Apr 30 11:27:18 2014 -0700 @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. @@ -4189,65 +3559,9 @@ new file mode 100644 +}; + +#endif // SHARE_VM_PRIMS_JVMTIENHANCEDREDEFINECLASSES_HPP -diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp ---- a/src/share/vm/runtime/arguments.cpp -+++ b/src/share/vm/runtime/arguments.cpp -@@ -59,8 +59,8 @@ - #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" - #endif // INCLUDE_ALL_GCS - --// Note: This is a special bug reporting site for the JVM --#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" -+// (DCEVM) The DCE VM has its own JIRA bug tracking system. -+#define DEFAULT_VENDOR_URL_BUG "https://github.com/Guidewire/DCEVM/issues" - #define DEFAULT_JAVA_LAUNCHER "generic" - - // Disable options not supported in this release, with a warning if they -@@ -1507,6 +1507,10 @@ - - void Arguments::set_ergonomics_flags() { - -+ if (AllowEnhancedClassRedefinition) { -+ // (DCEVM) enforces serial GC -+ FLAG_SET_ERGO(bool, UseSerialGC, true); -+ } - if (os::is_server_class_machine()) { - // If no other collector is requested explicitly, - // let the VM select the collector based on -@@ -1948,6 +1952,17 @@ - if (UseConcMarkSweepGC || UseParNewGC) i++; - if (UseParallelGC || UseParallelOldGC) i++; - if (UseG1GC) i++; -+ -+ if (AllowEnhancedClassRedefinition) { -+ // (DCEVM) Must use serial GC. This limitation applies because the instance size changing GC modifications -+ // are only built into the mark and compact algorithm. -+ if (!UseSerialGC && i >= 1) { -+ jio_fprintf(defaultStream::error_stream(), -+ "Must use the serial GC in the DCEVM\n"); -+ status = false; -+ } -+ } -+ - if (i > 1) { - jio_fprintf(defaultStream::error_stream(), - "Conflicting collector combinations in option list; " -diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp ---- a/src/share/vm/runtime/globals.hpp -+++ b/src/share/vm/runtime/globals.hpp -@@ -1273,6 +1273,9 @@ - product(intx, TraceRedefineClasses, 0, \ - "Trace level for JVMTI RedefineClasses") \ - \ -+ product(bool, AllowEnhancedClassRedefinition, true, \ -+ "Allow enhanced class redefinition beyond swapping method bodies")\ -+ \ - develop(bool, StressMethodComparator, false, \ - "Run the MethodComparator on all loaded methods") \ - \ -diff --git a/src/share/vm/runtime/reflection.cpp b/src/share/vm/runtime/reflection.cpp ---- a/src/share/vm/runtime/reflection.cpp -+++ b/src/share/vm/runtime/reflection.cpp +diff -r 8a6717910608 src/share/vm/runtime/reflection.cpp +--- a/src/share/vm/runtime/reflection.cpp Tue Mar 11 13:02:13 2014 -0700 ++++ b/src/share/vm/runtime/reflection.cpp Wed Apr 30 11:27:18 2014 -0700 @@ -519,6 +519,12 @@ AccessFlags access, bool classloader_only, diff --git a/hotspot/.hg/patches/series b/hotspot/.hg/patches/series index b17eafbf..e59ea4e6 100644 --- a/hotspot/.hg/patches/series +++ b/hotspot/.hg/patches/series @@ -1,3 +1,17 @@ +# Change distribution name +distro-name.patch + +# Add AllowEnhancedRedefinition argument +arguments-java8.patch + +# GC changes to allow modifying instances during redefinition run +gc-java8.patch #+jdk8 # Rest of the changes -light-jdk8u5-b13.patch #+light #+jdk8 #+u5-b13 +full-jdk7u11-b21.patch #+full-jdk7u11-b21 +full-jdk7u45-b08.patch #+full-jdk7u45-b08 +full-jdk7u51-b13.patch #+full-jdk7u51-b13 +light-jdk7u40-b43.patch #+light-jdk7u40-b43 +light-jdk7u51-b13.patch #+light-jdk7u51-b13 +light-jdk8u5-b13.patch #+light-jdk8u5-b13 + |