diff options
author | Ivan Dubrov <idubrov@guidewire.com> | 2015-03-31 18:14:18 -0700 |
---|---|---|
committer | Ivan Dubrov <idubrov@guidewire.com> | 2015-03-31 18:16:53 -0700 |
commit | 53c3f53814ae54b4450fc68f01f1972631eaf9e8 (patch) | |
tree | 4474eab37dec71cbdc2b368ee48d0376c7b9ab2a /hotspot/.hg/patches/gc-java8u40.patch | |
parent | b620f801f5a0382da772f73af23da315667357a4 (diff) | |
download | dcevm-53c3f53814ae54b4450fc68f01f1972631eaf9e8.tar.gz dcevm-53c3f53814ae54b4450fc68f01f1972631eaf9e8.zip |
jdk8u40-b25 updates
Diffstat (limited to 'hotspot/.hg/patches/gc-java8u40.patch')
-rw-r--r-- | hotspot/.hg/patches/gc-java8u40.patch | 619 |
1 files changed, 619 insertions, 0 deletions
diff --git a/hotspot/.hg/patches/gc-java8u40.patch b/hotspot/.hg/patches/gc-java8u40.patch new file mode 100644 index 00000000..f4c471a3 --- /dev/null +++ b/hotspot/.hg/patches/gc-java8u40.patch @@ -0,0 +1,619 @@ +# HG changeset patch +# Parent 8f44f8a7e50563e6c9a82fb0ed6c7bce4925bd3b +Change MarkAndSweep garbage collector to allow changing instances during redefinition. + +diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:05:19 2015 -0700 +@@ -163,6 +163,12 @@ + } + } + ++HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ ShouldNotReachHere(); ++ return NULL; ++} ++ + // Like CompactibleSpace forward() but always calls cross_threshold() to + // update the block offset table. Removed initialize_threshold call because + // CFLS does not use a block offset array for contiguous spaces. +@@ -2097,7 +2103,7 @@ + // Support for compaction + + void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); ++ SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false); + // prepare_for_compaction() uses the space between live objects + // so that later phase can skip dead space quickly. So verification + // of the free lists doesn't work after. +@@ -2118,7 +2124,7 @@ + } + + void CompactibleFreeListSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ SCAN_AND_COMPACT(obj_size, false); + } + + // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] +diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:05:19 2015 -0700 +@@ -150,6 +150,7 @@ + + // Support for compacting cms + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); ++ HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Initialization helpers. +diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.cpp +--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:05:19 2015 -0700 +@@ -48,6 +48,8 @@ + STWGCTimer* MarkSweep::_gc_timer = NULL; + SerialOldTracer* MarkSweep::_gc_tracer = NULL; + ++GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL; ++ + MarkSweep::FollowRootClosure MarkSweep::follow_root_closure; + + void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); } +@@ -165,3 +167,100 @@ + } + + #endif ++ ++// (DCEVM) Copy the rescued objects to their destination address after compaction. ++void MarkSweep::copy_rescued_objects_back() { ++ ++ if (_rescued_oops != NULL) { ++ ++ for (int i=0; i<_rescued_oops->length(); i++) { ++ HeapWord* rescued_ptr = _rescued_oops->at(i); ++ oop rescued_obj = (oop) rescued_ptr; ++ ++ int size = rescued_obj->size(); ++ oop new_obj = rescued_obj->forwardee(); ++ ++ assert(rescued_obj->klass()->new_version() != NULL, "just checking"); ++ ++ if (rescued_obj->klass()->new_version()->update_information() != NULL) { ++ MarkSweep::update_fields(rescued_obj, new_obj); ++ } else { ++ rescued_obj->set_klass(rescued_obj->klass()->new_version()); ++ Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size); ++ } ++ ++ FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size); ++ ++ new_obj->init_mark(); ++ assert(new_obj->is_oop(), "must be a valid oop"); ++ } ++ _rescued_oops->clear(); ++ _rescued_oops = NULL; ++ } ++} ++ ++// (DCEVM) Update instances of a class whose fields changed. ++void MarkSweep::update_fields(oop q, oop new_location) { ++ ++ assert(q->klass()->new_version() != NULL, "class of old object must have new version"); ++ ++ Klass* old_klass_oop = q->klass(); ++ Klass* new_klass_oop = q->klass()->new_version(); ++ ++ InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop); ++ InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop); ++ ++ int size = q->size_given_klass(old_klass); ++ int new_size = q->size_given_klass(new_klass); ++ ++ HeapWord* tmp = NULL; ++ oop tmp_obj = q; ++ ++ // Save object somewhere, there is an overlap in fields ++ if (new_klass_oop->is_copying_backwards()) { ++ if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) || ++ ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) { ++ tmp = NEW_RESOURCE_ARRAY(HeapWord, size); ++ q = (oop) tmp; ++ Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size); ++ } ++ } ++ ++ q->set_klass(new_klass_oop); ++ int *cur = new_klass_oop->update_information(); ++ assert(cur != NULL, "just checking"); ++ MarkSweep::update_fields(new_location, q, cur); ++ ++ if (tmp != NULL) { ++ FREE_RESOURCE_ARRAY(HeapWord, tmp, size); ++ } ++} ++ ++void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) { ++ assert(cur != NULL, "just checking"); ++ char* to = (char*)(HeapWord*)new_location; ++ while (*cur != 0) { ++ int size = *cur; ++ if (size > 0) { ++ cur++; ++ int offset = *cur; ++ HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset); ++ if (size == HeapWordSize) { ++ *((HeapWord*)to) = *from; ++ } else if (size == HeapWordSize * 2) { ++ *((HeapWord*)to) = *from; ++ *(((HeapWord*)to) + 1) = *(from + 1); ++ } else { ++ Copy::conjoint_jbytes(from, to, size); ++ } ++ to += size; ++ cur++; ++ } else { ++ assert(size < 0, ""); ++ int skip = -*cur; ++ Copy::fill_to_bytes(to, skip, 0); ++ to += skip; ++ cur++; ++ } ++ } ++} +diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.hpp +--- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:05:19 2015 -0700 +@@ -96,8 +96,12 @@ + friend class AdjustPointerClosure; + friend class KeepAliveClosure; + friend class VM_MarkSweep; ++ friend class GenMarkSweep; + friend void marksweep_init(); + ++public: ++ static GrowableArray<HeapWord*>* _rescued_oops; ++ + // + // Vars + // +@@ -157,6 +161,9 @@ + + static inline void push_objarray(oop obj, size_t index); + ++ static void copy_rescued_objects_back(); ++ static void update_fields(oop q, oop new_location); ++ static void update_fields(oop new_location, oop tmp_obj, int *cur); + static void follow_stack(); // Empty marking stack. + + static void follow_klass(Klass* klass); +diff -r 8f44f8a7e505 src/share/vm/memory/genMarkSweep.cpp +--- a/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:05:19 2015 -0700 +@@ -327,11 +327,16 @@ + // in the same order in phase2, phase3 and phase4. We don't quite do that + // here (perm_gen first rather than last), so we tell the validate code + // to use a higher index (saved from phase2) when verifying perm_gen. ++ assert(_rescued_oops == NULL, "must be empty before processing"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); + trace("4"); + ++ MarkSweep::copy_rescued_objects_back(); ++ + GenCompactClosure blk; + gch->generation_iterate(&blk, true); ++ ++ MarkSweep::copy_rescued_objects_back(); + } +diff -r 8f44f8a7e505 src/share/vm/memory/space.cpp +--- a/src/share/vm/memory/space.cpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/space.cpp Tue Mar 31 18:05:19 2015 -0700 +@@ -377,9 +377,8 @@ + _compaction_top = bottom(); + } + +-HeapWord* CompactibleSpace::forward(oop q, size_t size, +- CompactPoint* cp, HeapWord* compact_top) { +- // q is alive ++// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap. ++HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) { + // First check if we should switch compaction space + assert(this == cp->space, "'this' should be current compaction space."); + size_t compaction_max_size = pointer_delta(end(), compact_top); +@@ -399,8 +398,15 @@ + compaction_max_size = pointer_delta(cp->space->end(), compact_top); + } + ++ return compact_top; ++} ++ ++HeapWord* CompactibleSpace::forward(oop q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ compact_top = forward_compact_top(size, cp, compact_top); ++ + // store the forwarding pointer into the mark word +- if ((HeapWord*)q != compact_top) { ++ if ((HeapWord*)q != compact_top || (size_t)q->size() != size) { + q->forward_to(oop(compact_top)); + assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); + } else { +@@ -421,6 +427,58 @@ + return compact_top; + } + ++// Compute the forward sizes and leave out objects whose position could ++// possibly overlap other objects. ++HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size, ++ CompactPoint* cp, HeapWord* compact_top) { ++ size_t forward_size = size; ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) { ++ ++ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); ++ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); ++ forward_size = new_size; ++ } ++ ++ compact_top = forward_compact_top(forward_size, cp, compact_top); ++ ++ if (must_rescue(oop(q), oop(compact_top))) { ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); ++ } ++ MarkSweep::_rescued_oops->append(q); ++ return compact_top; ++ } ++ ++ return forward(oop(q), forward_size, cp, compact_top); ++} ++ ++// Compute the forwarding addresses for the objects that need to be rescued. ++HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) { ++ // TODO: empty the _rescued_oops after ALL spaces are compacted! ++ if (MarkSweep::_rescued_oops != NULL) { ++ for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) { ++ HeapWord* q = MarkSweep::_rescued_oops->at(i); ++ ++ /* size_t size = oop(q)->size(); changing this for cms for perm gen */ ++ size_t size = block_size(q); ++ ++ // (DCEVM) There is a new version of the class of q => different size ++ if (oop(q)->klass()->new_version() != NULL) { ++ size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version()); ++ assert(size != new_size, "instances without changed size have to be updated prior to GC run"); ++ size = new_size; ++ } ++ ++ compact_top = cp->space->forward(oop(q), size, cp, compact_top); ++ assert(compact_top <= end(), "must not write over end of space!"); ++ } ++ MarkSweep::_rescued_oops->clear(); ++ MarkSweep::_rescued_oops = NULL; ++ } ++ return compact_top; ++} + + bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, + HeapWord* q, size_t deadlength) { +@@ -442,12 +500,17 @@ + #define adjust_obj_size(s) s + + void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); ++ SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false); + } + + // Faster object search. + void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { +- SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); ++ if (!Universe::is_redefining_gc_run()) { ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true); ++ } + } + + void Space::adjust_pointers() { +@@ -485,6 +548,111 @@ + assert(q == t, "just checking"); + } + ++ ++#ifdef ASSERT ++ ++int CompactibleSpace::space_index(oop obj) { ++ GenCollectedHeap* heap = GenCollectedHeap::heap(); ++ ++ //if (heap->is_in_permanent(obj)) { ++ // return -1; ++ //} ++ ++ int index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ if (space->is_in_reserved(obj)) { ++ return index; ++ } ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj); ++ index = 0; ++ for (int i = heap->n_gens() - 1; i >= 0; i--) { ++ Generation* gen = heap->get_gen(i); ++ tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end()); ++ ++ CompactibleSpace* space = gen->first_compaction_space(); ++ while (space != NULL) { ++ tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end()); ++ space = space->next_compaction_space(); ++ index++; ++ } ++ } ++ ++ ShouldNotReachHere(); ++ return 0; ++} ++#endif ++ ++bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) { ++ // Only redefined objects can have the need to be rescued. ++ if (oop(old_obj)->klass()->new_version() == NULL) return false; ++ ++ //if (old_obj->is_perm()) { ++ // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj. ++ // return true; ++ //} ++ ++ int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version()); ++ int original_size = old_obj->size(); ++ ++ Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1); ++ bool old_in_tenured = tenured_gen->is_in_reserved(old_obj); ++ bool new_in_tenured = tenured_gen->is_in_reserved(new_obj); ++ if (old_in_tenured == new_in_tenured) { ++ // Rescue if object may overlap with a higher memory address. ++ bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size); ++ if (old_in_tenured) { ++ // Old and new address are in same space, so just compare the address. ++ // Must rescue if object moves towards the top of the space. ++ assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space"); ++ } else { ++ // In the new generation, eden is located before the from space, so a ++ // simple pointer comparison is sufficient. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result"); ++ } ++ return overlap; ++ ++ } else { ++ assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces"); ++ if (tenured_gen->is_in_reserved(new_obj)) { ++ // Must never rescue when moving from the new into the old generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) > space_index(new_obj), "must be"); ++ return false; ++ ++ } else /* if (tenured_gen->is_in_reserved(old_obj)) */ { ++ // Must always rescue when moving from the old into the new generation. ++ assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration"); ++ assert(space_index(old_obj) < space_index(new_obj), "must be"); ++ return true; ++ } ++ } ++} ++ ++HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) { ++ assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise"); ++ ++ int size = oop(old_obj)->size(); ++ HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size); ++ Copy::aligned_disjoint_words(old_obj, rescued_obj, size); ++ ++ if (MarkSweep::_rescued_oops == NULL) { ++ MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128); ++ } ++ ++ MarkSweep::_rescued_oops->append(rescued_obj); ++ return rescued_obj; ++} ++ + void CompactibleSpace::adjust_pointers() { + // Check first is there is any work to do. + if (used() == 0) { +@@ -495,7 +663,12 @@ + } + + void CompactibleSpace::compact() { +- SCAN_AND_COMPACT(obj_size); ++ if(!Universe::is_redefining_gc_run()) { ++ SCAN_AND_COMPACT(obj_size, false); ++ } else { ++ // Redefinition run ++ SCAN_AND_COMPACT(obj_size, true) ++ } + } + + void Space::print_short() const { print_short_on(tty); } +diff -r 8f44f8a7e505 src/share/vm/memory/space.hpp +--- a/src/share/vm/memory/space.hpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/space.hpp Tue Mar 31 18:05:19 2015 -0700 +@@ -392,6 +392,9 @@ + // indicates when the next such action should be taken. + virtual void prepare_for_compaction(CompactPoint* cp); + // MarkSweep support phase3 ++ DEBUG_ONLY(int space_index(oop obj)); ++ bool must_rescue(oop old_obj, oop new_obj); ++ HeapWord* rescue(HeapWord* old_obj); + virtual void adjust_pointers(); + // MarkSweep support phase4 + virtual void compact(); +@@ -421,6 +424,15 @@ + // accordingly". + virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp, + HeapWord* compact_top); ++ // (DCEVM) same as forwad, but can rescue objects. Invoked only during ++ // redefinition runs ++ HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp, ++ HeapWord* compact_top); ++ ++ HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top); ++ ++ // (tw) Compute new compact top without actually forwarding the object. ++ virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top); + + // Return a size with adjusments as required of the space. + virtual size_t adjust_object_size_v(size_t size) const { return size; } +diff -r 8f44f8a7e505 src/share/vm/memory/space.inline.hpp +--- a/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:05:19 2015 -0700 +@@ -35,7 +35,7 @@ + return block_start_const(p); + } + +-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ ++#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \ + /* Compute the new addresses for the live objects and store it in the mark \ + * Used by universe::mark_sweep_phase2() \ + */ \ +@@ -93,7 +93,17 @@ + /* prefetch beyond q */ \ + Prefetch::write(q, interval); \ + size_t size = block_size(q); \ ++ if (redefinition_run) { \ ++ compact_top = cp->space->forward_with_rescue(q, size, \ ++ cp, compact_top); \ ++ if (q < first_dead && oop(q)->is_gc_marked()) { \ ++ /* Was moved (otherwise, forward would reset mark), \ ++ set first_dead to here */ \ ++ first_dead = q; \ ++ } \ ++ } else { \ + compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ ++ } \ + q += size; \ + end_of_live = q; \ + } else { \ +@@ -142,6 +152,8 @@ + } \ + } \ + \ ++ if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \ ++ \ + assert(q == t, "just checking"); \ + if (liveRange != NULL) { \ + liveRange->set_end(q); \ +@@ -188,13 +200,8 @@ + q += size; \ + } \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ This is funky. Using this to read the previously written \ +- * LiveRange. See also use below. */ \ +- q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ +- } \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ + \ + const intx interval = PrefetchScanIntervalInBytes; \ +@@ -222,7 +229,7 @@ + assert(q == t, "just checking"); \ + } + +-#define SCAN_AND_COMPACT(obj_size) { \ ++#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \ + /* Copy all live objects to their new location \ + * Used by MarkSweep::mark_sweep_phase4() */ \ + \ +@@ -247,13 +254,9 @@ + } \ + ) /* debug_only */ \ + \ +- if (_first_dead == t) { \ +- q = t; \ +- } else { \ +- /* $$$ Funky */ \ +- q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ ++ /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \ ++ q = _first_dead; \ + } \ +- } \ + \ + const intx scan_interval = PrefetchScanIntervalInBytes; \ + const intx copy_interval = PrefetchCopyIntervalInBytes; \ +@@ -271,11 +274,34 @@ + size_t size = obj_size(q); \ + HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ + \ ++ if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \ ++ rescue(q); \ ++ debug_only(Copy::fill_to_words(q, size, 0)); \ ++ q += size; \ ++ continue; \ ++ } \ ++ \ + /* prefetch beyond compaction_top */ \ + Prefetch::write(compaction_top, copy_interval); \ + \ + /* copy object and reinit its mark */ \ +- assert(q != compaction_top, "everything in this pass should be moving"); \ ++ assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \ ++ "everything in this pass should be moving"); \ ++ if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \ ++ Klass* new_version = oop(q)->klass()->new_version(); \ ++ if (new_version->update_information() == NULL) { \ ++ Copy::aligned_conjoint_words(q, compaction_top, size); \ ++ oop(compaction_top)->set_klass(new_version); \ ++ } else { \ ++ MarkSweep::update_fields(oop(q), oop(compaction_top)); \ ++ } \ ++ oop(compaction_top)->init_mark(); \ ++ assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ ++ \ ++ debug_only(prev_q = q); \ ++ q += size; \ ++ continue; \ ++ } \ + Copy::aligned_conjoint_words(q, compaction_top, size); \ + oop(compaction_top)->init_mark(); \ + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ +diff -r 8f44f8a7e505 src/share/vm/memory/universe.cpp +--- a/src/share/vm/memory/universe.cpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/universe.cpp Tue Mar 31 18:05:19 2015 -0700 +@@ -84,6 +84,8 @@ + + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + ++bool Universe::_is_redefining_gc_run = false; ++ + // Known objects + Klass* Universe::_boolArrayKlassObj = NULL; + Klass* Universe::_byteArrayKlassObj = NULL; +diff -r 8f44f8a7e505 src/share/vm/memory/universe.hpp +--- a/src/share/vm/memory/universe.hpp Tue Mar 31 18:01:20 2015 -0700 ++++ b/src/share/vm/memory/universe.hpp Tue Mar 31 18:05:19 2015 -0700 +@@ -251,7 +251,13 @@ + + static void compute_verify_oop_data(); + ++ static bool _is_redefining_gc_run; ++ + public: ++ ++ static bool is_redefining_gc_run() { return _is_redefining_gc_run; } ++ static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; } ++ + // Known classes in the VM + static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; } + static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; } |