You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

gc-java8u40.patch 28KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. # HG changeset patch
  2. # Parent dc85548673e78358fc7a3dcbe2f51cad5a91201f
  3. Change MarkAndSweep garbage collector to allow changing instances during redefinition.
  4. diff -r dc85548673e7 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
  5. --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:13:48 2015 -0700
  6. +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Apr 01 11:22:01 2015 -0700
  7. @@ -163,6 +163,12 @@
  8. }
  9. }
  10. +HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size,
  11. + CompactPoint* cp, HeapWord* compact_top) {
  12. + ShouldNotReachHere();
  13. + return NULL;
  14. +}
  15. +
  16. // Like CompactibleSpace forward() but always calls cross_threshold() to
  17. // update the block offset table. Removed initialize_threshold call because
  18. // CFLS does not use a block offset array for contiguous spaces.
  19. @@ -2097,7 +2103,7 @@
  20. // Support for compaction
  21. void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  22. - SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  23. + SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false);
  24. // prepare_for_compaction() uses the space between live objects
  25. // so that later phase can skip dead space quickly. So verification
  26. // of the free lists doesn't work after.
  27. @@ -2118,7 +2124,7 @@
  28. }
  29. void CompactibleFreeListSpace::compact() {
  30. - SCAN_AND_COMPACT(obj_size);
  31. + SCAN_AND_COMPACT(obj_size, false);
  32. }
  33. // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  34. diff -r dc85548673e7 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
  35. --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:13:48 2015 -0700
  36. +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Apr 01 11:22:01 2015 -0700
  37. @@ -150,6 +150,7 @@
  38. // Support for compacting cms
  39. HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
  40. + HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
  41. HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
  42. // Initialization helpers.
  43. diff -r dc85548673e7 src/share/vm/gc_implementation/g1/heapRegion.cpp
  44. --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Mar 31 18:13:48 2015 -0700
  45. +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Apr 01 11:22:01 2015 -0700
  46. @@ -1063,7 +1063,7 @@
  47. #define block_is_always_obj(q) true
  48. void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
  49. - SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
  50. + SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size, false);
  51. }
  52. #undef block_is_always_obj
  53. diff -r dc85548673e7 src/share/vm/gc_implementation/shared/markSweep.cpp
  54. --- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:13:48 2015 -0700
  55. +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Wed Apr 01 11:22:01 2015 -0700
  56. @@ -48,6 +48,8 @@
  57. STWGCTimer* MarkSweep::_gc_timer = NULL;
  58. SerialOldTracer* MarkSweep::_gc_tracer = NULL;
  59. +GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL;
  60. +
  61. MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
  62. void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
  63. @@ -165,3 +167,100 @@
  64. }
  65. #endif
  66. +
  67. +// (DCEVM) Copy the rescued objects to their destination address after compaction.
  68. +void MarkSweep::copy_rescued_objects_back() {
  69. +
  70. + if (_rescued_oops != NULL) {
  71. +
  72. + for (int i=0; i<_rescued_oops->length(); i++) {
  73. + HeapWord* rescued_ptr = _rescued_oops->at(i);
  74. + oop rescued_obj = (oop) rescued_ptr;
  75. +
  76. + int size = rescued_obj->size();
  77. + oop new_obj = rescued_obj->forwardee();
  78. +
  79. + assert(rescued_obj->klass()->new_version() != NULL, "just checking");
  80. +
  81. + if (rescued_obj->klass()->new_version()->update_information() != NULL) {
  82. + MarkSweep::update_fields(rescued_obj, new_obj);
  83. + } else {
  84. + rescued_obj->set_klass(rescued_obj->klass()->new_version());
  85. + Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size);
  86. + }
  87. +
  88. + FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size);
  89. +
  90. + new_obj->init_mark();
  91. + assert(new_obj->is_oop(), "must be a valid oop");
  92. + }
  93. + _rescued_oops->clear();
  94. + _rescued_oops = NULL;
  95. + }
  96. +}
  97. +
  98. +// (DCEVM) Update instances of a class whose fields changed.
  99. +void MarkSweep::update_fields(oop q, oop new_location) {
  100. +
  101. + assert(q->klass()->new_version() != NULL, "class of old object must have new version");
  102. +
  103. + Klass* old_klass_oop = q->klass();
  104. + Klass* new_klass_oop = q->klass()->new_version();
  105. +
  106. + InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop);
  107. + InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop);
  108. +
  109. + int size = q->size_given_klass(old_klass);
  110. + int new_size = q->size_given_klass(new_klass);
  111. +
  112. + HeapWord* tmp = NULL;
  113. + oop tmp_obj = q;
  114. +
  115. + // Save object somewhere, there is an overlap in fields
  116. + if (new_klass_oop->is_copying_backwards()) {
  117. + if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) ||
  118. + ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) {
  119. + tmp = NEW_RESOURCE_ARRAY(HeapWord, size);
  120. + q = (oop) tmp;
  121. + Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size);
  122. + }
  123. + }
  124. +
  125. + q->set_klass(new_klass_oop);
  126. + int *cur = new_klass_oop->update_information();
  127. + assert(cur != NULL, "just checking");
  128. + MarkSweep::update_fields(new_location, q, cur);
  129. +
  130. + if (tmp != NULL) {
  131. + FREE_RESOURCE_ARRAY(HeapWord, tmp, size);
  132. + }
  133. +}
  134. +
  135. +void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) {
  136. + assert(cur != NULL, "just checking");
  137. + char* to = (char*)(HeapWord*)new_location;
  138. + while (*cur != 0) {
  139. + int size = *cur;
  140. + if (size > 0) {
  141. + cur++;
  142. + int offset = *cur;
  143. + HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset);
  144. + if (size == HeapWordSize) {
  145. + *((HeapWord*)to) = *from;
  146. + } else if (size == HeapWordSize * 2) {
  147. + *((HeapWord*)to) = *from;
  148. + *(((HeapWord*)to) + 1) = *(from + 1);
  149. + } else {
  150. + Copy::conjoint_jbytes(from, to, size);
  151. + }
  152. + to += size;
  153. + cur++;
  154. + } else {
  155. + assert(size < 0, "");
  156. + int skip = -*cur;
  157. + Copy::fill_to_bytes(to, skip, 0);
  158. + to += skip;
  159. + cur++;
  160. + }
  161. + }
  162. +}
  163. diff -r dc85548673e7 src/share/vm/gc_implementation/shared/markSweep.hpp
  164. --- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:13:48 2015 -0700
  165. +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Wed Apr 01 11:22:01 2015 -0700
  166. @@ -96,8 +96,12 @@
  167. friend class AdjustPointerClosure;
  168. friend class KeepAliveClosure;
  169. friend class VM_MarkSweep;
  170. + friend class GenMarkSweep;
  171. friend void marksweep_init();
  172. +public:
  173. + static GrowableArray<HeapWord*>* _rescued_oops;
  174. +
  175. //
  176. // Vars
  177. //
  178. @@ -157,6 +161,9 @@
  179. static inline void push_objarray(oop obj, size_t index);
  180. + static void copy_rescued_objects_back();
  181. + static void update_fields(oop q, oop new_location);
  182. + static void update_fields(oop new_location, oop tmp_obj, int *cur);
  183. static void follow_stack(); // Empty marking stack.
  184. static void follow_klass(Klass* klass);
  185. diff -r dc85548673e7 src/share/vm/memory/genMarkSweep.cpp
  186. --- a/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:13:48 2015 -0700
  187. +++ b/src/share/vm/memory/genMarkSweep.cpp Wed Apr 01 11:22:01 2015 -0700
  188. @@ -327,11 +327,16 @@
  189. // in the same order in phase2, phase3 and phase4. We don't quite do that
  190. // here (perm_gen first rather than last), so we tell the validate code
  191. // to use a higher index (saved from phase2) when verifying perm_gen.
  192. + assert(_rescued_oops == NULL, "must be empty before processing");
  193. GenCollectedHeap* gch = GenCollectedHeap::heap();
  194. GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  195. trace("4");
  196. + MarkSweep::copy_rescued_objects_back();
  197. +
  198. GenCompactClosure blk;
  199. gch->generation_iterate(&blk, true);
  200. +
  201. + MarkSweep::copy_rescued_objects_back();
  202. }
  203. diff -r dc85548673e7 src/share/vm/memory/space.cpp
  204. --- a/src/share/vm/memory/space.cpp Tue Mar 31 18:13:48 2015 -0700
  205. +++ b/src/share/vm/memory/space.cpp Wed Apr 01 11:22:01 2015 -0700
  206. @@ -377,9 +377,8 @@
  207. _compaction_top = bottom();
  208. }
  209. -HeapWord* CompactibleSpace::forward(oop q, size_t size,
  210. - CompactPoint* cp, HeapWord* compact_top) {
  211. - // q is alive
  212. +// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap.
  213. +HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) {
  214. // First check if we should switch compaction space
  215. assert(this == cp->space, "'this' should be current compaction space.");
  216. size_t compaction_max_size = pointer_delta(end(), compact_top);
  217. @@ -399,8 +398,15 @@
  218. compaction_max_size = pointer_delta(cp->space->end(), compact_top);
  219. }
  220. + return compact_top;
  221. +}
  222. +
  223. +HeapWord* CompactibleSpace::forward(oop q, size_t size,
  224. + CompactPoint* cp, HeapWord* compact_top) {
  225. + compact_top = forward_compact_top(size, cp, compact_top);
  226. +
  227. // store the forwarding pointer into the mark word
  228. - if ((HeapWord*)q != compact_top) {
  229. + if ((HeapWord*)q != compact_top || (size_t)q->size() != size) {
  230. q->forward_to(oop(compact_top));
  231. assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
  232. } else {
  233. @@ -421,6 +427,58 @@
  234. return compact_top;
  235. }
  236. +// Compute the forward sizes and leave out objects whose position could
  237. +// possibly overlap other objects.
  238. +HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size,
  239. + CompactPoint* cp, HeapWord* compact_top) {
  240. + size_t forward_size = size;
  241. +
  242. + // (DCEVM) There is a new version of the class of q => different size
  243. + if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) {
  244. +
  245. + size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
  246. + assert(size != new_size, "instances without changed size have to be updated prior to GC run");
  247. + forward_size = new_size;
  248. + }
  249. +
  250. + compact_top = forward_compact_top(forward_size, cp, compact_top);
  251. +
  252. + if (must_rescue(oop(q), oop(compact_top))) {
  253. + if (MarkSweep::_rescued_oops == NULL) {
  254. + MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
  255. + }
  256. + MarkSweep::_rescued_oops->append(q);
  257. + return compact_top;
  258. + }
  259. +
  260. + return forward(oop(q), forward_size, cp, compact_top);
  261. +}
  262. +
  263. +// Compute the forwarding addresses for the objects that need to be rescued.
  264. +HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) {
  265. + // TODO: empty the _rescued_oops after ALL spaces are compacted!
  266. + if (MarkSweep::_rescued_oops != NULL) {
  267. + for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) {
  268. + HeapWord* q = MarkSweep::_rescued_oops->at(i);
  269. +
  270. + /* size_t size = oop(q)->size(); changing this for cms for perm gen */
  271. + size_t size = block_size(q);
  272. +
  273. + // (DCEVM) There is a new version of the class of q => different size
  274. + if (oop(q)->klass()->new_version() != NULL) {
  275. + size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
  276. + assert(size != new_size, "instances without changed size have to be updated prior to GC run");
  277. + size = new_size;
  278. + }
  279. +
  280. + compact_top = cp->space->forward(oop(q), size, cp, compact_top);
  281. + assert(compact_top <= end(), "must not write over end of space!");
  282. + }
  283. + MarkSweep::_rescued_oops->clear();
  284. + MarkSweep::_rescued_oops = NULL;
  285. + }
  286. + return compact_top;
  287. +}
  288. bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
  289. HeapWord* q, size_t deadlength) {
  290. @@ -442,12 +500,17 @@
  291. #define adjust_obj_size(s) s
  292. void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
  293. - SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
  294. + SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false);
  295. }
  296. // Faster object search.
  297. void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
  298. - SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
  299. + if (!Universe::is_redefining_gc_run()) {
  300. + SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false);
  301. + } else {
  302. + // Redefinition run
  303. + SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true);
  304. + }
  305. }
  306. void Space::adjust_pointers() {
  307. @@ -485,6 +548,111 @@
  308. assert(q == t, "just checking");
  309. }
  310. +
  311. +#ifdef ASSERT
  312. +
  313. +int CompactibleSpace::space_index(oop obj) {
  314. + GenCollectedHeap* heap = GenCollectedHeap::heap();
  315. +
  316. + //if (heap->is_in_permanent(obj)) {
  317. + // return -1;
  318. + //}
  319. +
  320. + int index = 0;
  321. + for (int i = heap->n_gens() - 1; i >= 0; i--) {
  322. + Generation* gen = heap->get_gen(i);
  323. + CompactibleSpace* space = gen->first_compaction_space();
  324. + while (space != NULL) {
  325. + if (space->is_in_reserved(obj)) {
  326. + return index;
  327. + }
  328. + space = space->next_compaction_space();
  329. + index++;
  330. + }
  331. + }
  332. +
  333. + tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj);
  334. + index = 0;
  335. + for (int i = heap->n_gens() - 1; i >= 0; i--) {
  336. + Generation* gen = heap->get_gen(i);
  337. + tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end());
  338. +
  339. + CompactibleSpace* space = gen->first_compaction_space();
  340. + while (space != NULL) {
  341. + tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end());
  342. + space = space->next_compaction_space();
  343. + index++;
  344. + }
  345. + }
  346. +
  347. + ShouldNotReachHere();
  348. + return 0;
  349. +}
  350. +#endif
  351. +
  352. +bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) {
  353. + // Only redefined objects can have the need to be rescued.
  354. + if (oop(old_obj)->klass()->new_version() == NULL) return false;
  355. +
  356. + //if (old_obj->is_perm()) {
  357. + // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj.
  358. + // return true;
  359. + //}
  360. +
  361. + int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version());
  362. + int original_size = old_obj->size();
  363. +
  364. + Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1);
  365. + bool old_in_tenured = tenured_gen->is_in_reserved(old_obj);
  366. + bool new_in_tenured = tenured_gen->is_in_reserved(new_obj);
  367. + if (old_in_tenured == new_in_tenured) {
  368. + // Rescue if object may overlap with a higher memory address.
  369. + bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size);
  370. + if (old_in_tenured) {
  371. + // Old and new address are in same space, so just compare the address.
  372. + // Must rescue if object moves towards the top of the space.
  373. + assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space");
  374. + } else {
  375. + // In the new generation, eden is located before the from space, so a
  376. + // simple pointer comparison is sufficient.
  377. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
  378. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
  379. + assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result");
  380. + }
  381. + return overlap;
  382. +
  383. + } else {
  384. + assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces");
  385. + if (tenured_gen->is_in_reserved(new_obj)) {
  386. + // Must never rescue when moving from the new into the old generation.
  387. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
  388. + assert(space_index(old_obj) > space_index(new_obj), "must be");
  389. + return false;
  390. +
  391. + } else /* if (tenured_gen->is_in_reserved(old_obj)) */ {
  392. + // Must always rescue when moving from the old into the new generation.
  393. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
  394. + assert(space_index(old_obj) < space_index(new_obj), "must be");
  395. + return true;
  396. + }
  397. + }
  398. +}
  399. +
  400. +HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) {
  401. + assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise");
  402. +
  403. + int size = oop(old_obj)->size();
  404. + HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size);
  405. + Copy::aligned_disjoint_words(old_obj, rescued_obj, size);
  406. +
  407. + if (MarkSweep::_rescued_oops == NULL) {
  408. + MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
  409. + }
  410. +
  411. + MarkSweep::_rescued_oops->append(rescued_obj);
  412. + return rescued_obj;
  413. +}
  414. +
  415. void CompactibleSpace::adjust_pointers() {
  416. // Check first is there is any work to do.
  417. if (used() == 0) {
  418. @@ -495,7 +663,12 @@
  419. }
  420. void CompactibleSpace::compact() {
  421. - SCAN_AND_COMPACT(obj_size);
  422. + if(!Universe::is_redefining_gc_run()) {
  423. + SCAN_AND_COMPACT(obj_size, false);
  424. + } else {
  425. + // Redefinition run
  426. + SCAN_AND_COMPACT(obj_size, true)
  427. + }
  428. }
  429. void Space::print_short() const { print_short_on(tty); }
  430. diff -r dc85548673e7 src/share/vm/memory/space.hpp
  431. --- a/src/share/vm/memory/space.hpp Tue Mar 31 18:13:48 2015 -0700
  432. +++ b/src/share/vm/memory/space.hpp Wed Apr 01 11:22:01 2015 -0700
  433. @@ -392,6 +392,9 @@
  434. // indicates when the next such action should be taken.
  435. virtual void prepare_for_compaction(CompactPoint* cp);
  436. // MarkSweep support phase3
  437. + DEBUG_ONLY(int space_index(oop obj));
  438. + bool must_rescue(oop old_obj, oop new_obj);
  439. + HeapWord* rescue(HeapWord* old_obj);
  440. virtual void adjust_pointers();
  441. // MarkSweep support phase4
  442. virtual void compact();
  443. @@ -421,6 +424,15 @@
  444. // accordingly".
  445. virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
  446. HeapWord* compact_top);
  447. + // (DCEVM) same as forwad, but can rescue objects. Invoked only during
  448. + // redefinition runs
  449. + HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp,
  450. + HeapWord* compact_top);
  451. +
  452. + HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top);
  453. +
  454. + // (tw) Compute new compact top without actually forwarding the object.
  455. + virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
  456. // Return a size with adjusments as required of the space.
  457. virtual size_t adjust_object_size_v(size_t size) const { return size; }
  458. diff -r dc85548673e7 src/share/vm/memory/space.inline.hpp
  459. --- a/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:13:48 2015 -0700
  460. +++ b/src/share/vm/memory/space.inline.hpp Wed Apr 01 11:22:01 2015 -0700
  461. @@ -35,7 +35,7 @@
  462. return block_start_const(p);
  463. }
  464. -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
  465. +#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \
  466. /* Compute the new addresses for the live objects and store it in the mark \
  467. * Used by universe::mark_sweep_phase2() \
  468. */ \
  469. @@ -93,7 +93,17 @@
  470. /* prefetch beyond q */ \
  471. Prefetch::write(q, interval); \
  472. size_t size = block_size(q); \
  473. + if (redefinition_run) { \
  474. + compact_top = cp->space->forward_with_rescue(q, size, \
  475. + cp, compact_top); \
  476. + if (q < first_dead && oop(q)->is_gc_marked()) { \
  477. + /* Was moved (otherwise, forward would reset mark), \
  478. + set first_dead to here */ \
  479. + first_dead = q; \
  480. + } \
  481. + } else { \
  482. compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
  483. + } \
  484. q += size; \
  485. end_of_live = q; \
  486. } else { \
  487. @@ -142,6 +152,8 @@
  488. } \
  489. } \
  490. \
  491. + if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \
  492. + \
  493. assert(q == t, "just checking"); \
  494. if (liveRange != NULL) { \
  495. liveRange->set_end(q); \
  496. @@ -188,13 +200,8 @@
  497. q += size; \
  498. } \
  499. \
  500. - if (_first_dead == t) { \
  501. - q = t; \
  502. - } else { \
  503. - /* $$$ This is funky. Using this to read the previously written \
  504. - * LiveRange. See also use below. */ \
  505. - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
  506. - } \
  507. + /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
  508. + q = _first_dead; \
  509. } \
  510. \
  511. const intx interval = PrefetchScanIntervalInBytes; \
  512. @@ -222,7 +229,7 @@
  513. assert(q == t, "just checking"); \
  514. }
  515. -#define SCAN_AND_COMPACT(obj_size) { \
  516. +#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \
  517. /* Copy all live objects to their new location \
  518. * Used by MarkSweep::mark_sweep_phase4() */ \
  519. \
  520. @@ -247,13 +254,9 @@
  521. } \
  522. ) /* debug_only */ \
  523. \
  524. - if (_first_dead == t) { \
  525. - q = t; \
  526. - } else { \
  527. - /* $$$ Funky */ \
  528. - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
  529. + /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
  530. + q = _first_dead; \
  531. } \
  532. - } \
  533. \
  534. const intx scan_interval = PrefetchScanIntervalInBytes; \
  535. const intx copy_interval = PrefetchCopyIntervalInBytes; \
  536. @@ -271,11 +274,34 @@
  537. size_t size = obj_size(q); \
  538. HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
  539. \
  540. + if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \
  541. + rescue(q); \
  542. + debug_only(Copy::fill_to_words(q, size, 0)); \
  543. + q += size; \
  544. + continue; \
  545. + } \
  546. + \
  547. /* prefetch beyond compaction_top */ \
  548. Prefetch::write(compaction_top, copy_interval); \
  549. \
  550. /* copy object and reinit its mark */ \
  551. - assert(q != compaction_top, "everything in this pass should be moving"); \
  552. + assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \
  553. + "everything in this pass should be moving"); \
  554. + if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \
  555. + Klass* new_version = oop(q)->klass()->new_version(); \
  556. + if (new_version->update_information() == NULL) { \
  557. + Copy::aligned_conjoint_words(q, compaction_top, size); \
  558. + oop(compaction_top)->set_klass(new_version); \
  559. + } else { \
  560. + MarkSweep::update_fields(oop(q), oop(compaction_top)); \
  561. + } \
  562. + oop(compaction_top)->init_mark(); \
  563. + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
  564. + \
  565. + debug_only(prev_q = q); \
  566. + q += size; \
  567. + continue; \
  568. + } \
  569. Copy::aligned_conjoint_words(q, compaction_top, size); \
  570. oop(compaction_top)->init_mark(); \
  571. assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
  572. diff -r dc85548673e7 src/share/vm/memory/universe.cpp
  573. --- a/src/share/vm/memory/universe.cpp Tue Mar 31 18:13:48 2015 -0700
  574. +++ b/src/share/vm/memory/universe.cpp Wed Apr 01 11:22:01 2015 -0700
  575. @@ -84,6 +84,8 @@
  576. PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  577. +bool Universe::_is_redefining_gc_run = false;
  578. +
  579. // Known objects
  580. Klass* Universe::_boolArrayKlassObj = NULL;
  581. Klass* Universe::_byteArrayKlassObj = NULL;
  582. diff -r dc85548673e7 src/share/vm/memory/universe.hpp
  583. --- a/src/share/vm/memory/universe.hpp Tue Mar 31 18:13:48 2015 -0700
  584. +++ b/src/share/vm/memory/universe.hpp Wed Apr 01 11:22:01 2015 -0700
  585. @@ -251,7 +251,13 @@
  586. static void compute_verify_oop_data();
  587. + static bool _is_redefining_gc_run;
  588. +
  589. public:
  590. +
  591. + static bool is_redefining_gc_run() { return _is_redefining_gc_run; }
  592. + static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; }
  593. +
  594. // Known classes in the VM
  595. static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; }
  596. static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; }