You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

gc-java8.patch 27KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. # HG changeset patch
  2. # Parent 8f44f8a7e50563e6c9a82fb0ed6c7bce4925bd3b
  3. Change MarkAndSweep garbage collector to allow changing instances during redefinition.
  4. diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
  5. --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:01:20 2015 -0700
  6. +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Tue Mar 31 18:05:19 2015 -0700
  7. @@ -163,6 +163,12 @@
  8. }
  9. }
  10. +HeapWord* CompactibleFreeListSpace::forward_compact_top(size_t size,
  11. + CompactPoint* cp, HeapWord* compact_top) {
  12. + ShouldNotReachHere();
  13. + return NULL;
  14. +}
  15. +
  16. // Like CompactibleSpace forward() but always calls cross_threshold() to
  17. // update the block offset table. Removed initialize_threshold call because
  18. // CFLS does not use a block offset array for contiguous spaces.
  19. @@ -2097,7 +2103,7 @@
  20. // Support for compaction
  21. void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  22. - SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  23. + SCAN_AND_FORWARD(cp,end,block_is_obj,block_size,false);
  24. // prepare_for_compaction() uses the space between live objects
  25. // so that later phase can skip dead space quickly. So verification
  26. // of the free lists doesn't work after.
  27. @@ -2118,7 +2124,7 @@
  28. }
  29. void CompactibleFreeListSpace::compact() {
  30. - SCAN_AND_COMPACT(obj_size);
  31. + SCAN_AND_COMPACT(obj_size, false);
  32. }
  33. // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  34. diff -r 8f44f8a7e505 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
  35. --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:01:20 2015 -0700
  36. +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Tue Mar 31 18:05:19 2015 -0700
  37. @@ -150,6 +150,7 @@
  38. // Support for compacting cms
  39. HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
  40. + HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
  41. HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
  42. // Initialization helpers.
  43. diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.cpp
  44. --- a/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:01:20 2015 -0700
  45. +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Tue Mar 31 18:05:19 2015 -0700
  46. @@ -48,6 +48,8 @@
  47. STWGCTimer* MarkSweep::_gc_timer = NULL;
  48. SerialOldTracer* MarkSweep::_gc_tracer = NULL;
  49. +GrowableArray<HeapWord*>* MarkSweep::_rescued_oops = NULL;
  50. +
  51. MarkSweep::FollowRootClosure MarkSweep::follow_root_closure;
  52. void MarkSweep::FollowRootClosure::do_oop(oop* p) { follow_root(p); }
  53. @@ -165,3 +167,100 @@
  54. }
  55. #endif
  56. +
  57. +// (DCEVM) Copy the rescued objects to their destination address after compaction.
  58. +void MarkSweep::copy_rescued_objects_back() {
  59. +
  60. + if (_rescued_oops != NULL) {
  61. +
  62. + for (int i=0; i<_rescued_oops->length(); i++) {
  63. + HeapWord* rescued_ptr = _rescued_oops->at(i);
  64. + oop rescued_obj = (oop) rescued_ptr;
  65. +
  66. + int size = rescued_obj->size();
  67. + oop new_obj = rescued_obj->forwardee();
  68. +
  69. + assert(rescued_obj->klass()->new_version() != NULL, "just checking");
  70. +
  71. + if (rescued_obj->klass()->new_version()->update_information() != NULL) {
  72. + MarkSweep::update_fields(rescued_obj, new_obj);
  73. + } else {
  74. + rescued_obj->set_klass(rescued_obj->klass()->new_version());
  75. + Copy::aligned_disjoint_words((HeapWord*)rescued_obj, (HeapWord*)new_obj, size);
  76. + }
  77. +
  78. + FREE_RESOURCE_ARRAY(HeapWord, rescued_ptr, size);
  79. +
  80. + new_obj->init_mark();
  81. + assert(new_obj->is_oop(), "must be a valid oop");
  82. + }
  83. + _rescued_oops->clear();
  84. + _rescued_oops = NULL;
  85. + }
  86. +}
  87. +
  88. +// (DCEVM) Update instances of a class whose fields changed.
  89. +void MarkSweep::update_fields(oop q, oop new_location) {
  90. +
  91. + assert(q->klass()->new_version() != NULL, "class of old object must have new version");
  92. +
  93. + Klass* old_klass_oop = q->klass();
  94. + Klass* new_klass_oop = q->klass()->new_version();
  95. +
  96. + InstanceKlass *old_klass = InstanceKlass::cast(old_klass_oop);
  97. + InstanceKlass *new_klass = InstanceKlass::cast(new_klass_oop);
  98. +
  99. + int size = q->size_given_klass(old_klass);
  100. + int new_size = q->size_given_klass(new_klass);
  101. +
  102. + HeapWord* tmp = NULL;
  103. + oop tmp_obj = q;
  104. +
  105. + // Save object somewhere, there is an overlap in fields
  106. + if (new_klass_oop->is_copying_backwards()) {
  107. + if (((HeapWord *)q >= (HeapWord *)new_location && (HeapWord *)q < (HeapWord *)new_location + new_size) ||
  108. + ((HeapWord *)new_location >= (HeapWord *)q && (HeapWord *)new_location < (HeapWord *)q + size)) {
  109. + tmp = NEW_RESOURCE_ARRAY(HeapWord, size);
  110. + q = (oop) tmp;
  111. + Copy::aligned_disjoint_words((HeapWord*)q, (HeapWord*)tmp_obj, size);
  112. + }
  113. + }
  114. +
  115. + q->set_klass(new_klass_oop);
  116. + int *cur = new_klass_oop->update_information();
  117. + assert(cur != NULL, "just checking");
  118. + MarkSweep::update_fields(new_location, q, cur);
  119. +
  120. + if (tmp != NULL) {
  121. + FREE_RESOURCE_ARRAY(HeapWord, tmp, size);
  122. + }
  123. +}
  124. +
  125. +void MarkSweep::update_fields(oop new_location, oop tmp_obj, int *cur) {
  126. + assert(cur != NULL, "just checking");
  127. + char* to = (char*)(HeapWord*)new_location;
  128. + while (*cur != 0) {
  129. + int size = *cur;
  130. + if (size > 0) {
  131. + cur++;
  132. + int offset = *cur;
  133. + HeapWord* from = (HeapWord*)(((char *)(HeapWord*)tmp_obj) + offset);
  134. + if (size == HeapWordSize) {
  135. + *((HeapWord*)to) = *from;
  136. + } else if (size == HeapWordSize * 2) {
  137. + *((HeapWord*)to) = *from;
  138. + *(((HeapWord*)to) + 1) = *(from + 1);
  139. + } else {
  140. + Copy::conjoint_jbytes(from, to, size);
  141. + }
  142. + to += size;
  143. + cur++;
  144. + } else {
  145. + assert(size < 0, "");
  146. + int skip = -*cur;
  147. + Copy::fill_to_bytes(to, skip, 0);
  148. + to += skip;
  149. + cur++;
  150. + }
  151. + }
  152. +}
  153. diff -r 8f44f8a7e505 src/share/vm/gc_implementation/shared/markSweep.hpp
  154. --- a/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:01:20 2015 -0700
  155. +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Tue Mar 31 18:05:19 2015 -0700
  156. @@ -96,8 +96,12 @@
  157. friend class AdjustPointerClosure;
  158. friend class KeepAliveClosure;
  159. friend class VM_MarkSweep;
  160. + friend class GenMarkSweep;
  161. friend void marksweep_init();
  162. +public:
  163. + static GrowableArray<HeapWord*>* _rescued_oops;
  164. +
  165. //
  166. // Vars
  167. //
  168. @@ -157,6 +161,9 @@
  169. static inline void push_objarray(oop obj, size_t index);
  170. + static void copy_rescued_objects_back();
  171. + static void update_fields(oop q, oop new_location);
  172. + static void update_fields(oop new_location, oop tmp_obj, int *cur);
  173. static void follow_stack(); // Empty marking stack.
  174. static void follow_klass(Klass* klass);
  175. diff -r 8f44f8a7e505 src/share/vm/memory/genMarkSweep.cpp
  176. --- a/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:01:20 2015 -0700
  177. +++ b/src/share/vm/memory/genMarkSweep.cpp Tue Mar 31 18:05:19 2015 -0700
  178. @@ -327,11 +327,16 @@
  179. // in the same order in phase2, phase3 and phase4. We don't quite do that
  180. // here (perm_gen first rather than last), so we tell the validate code
  181. // to use a higher index (saved from phase2) when verifying perm_gen.
  182. + assert(_rescued_oops == NULL, "must be empty before processing");
  183. GenCollectedHeap* gch = GenCollectedHeap::heap();
  184. GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  185. trace("4");
  186. + MarkSweep::copy_rescued_objects_back();
  187. +
  188. GenCompactClosure blk;
  189. gch->generation_iterate(&blk, true);
  190. +
  191. + MarkSweep::copy_rescued_objects_back();
  192. }
  193. diff -r 8f44f8a7e505 src/share/vm/memory/space.cpp
  194. --- a/src/share/vm/memory/space.cpp Tue Mar 31 18:01:20 2015 -0700
  195. +++ b/src/share/vm/memory/space.cpp Tue Mar 31 18:05:19 2015 -0700
  196. @@ -377,9 +377,8 @@
  197. _compaction_top = bottom();
  198. }
  199. -HeapWord* CompactibleSpace::forward(oop q, size_t size,
  200. - CompactPoint* cp, HeapWord* compact_top) {
  201. - // q is alive
  202. +// (DCEVM) Calculates the compact_top that will be used for placing the next object with the giving size on the heap.
  203. +HeapWord* CompactibleSpace::forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top) {
  204. // First check if we should switch compaction space
  205. assert(this == cp->space, "'this' should be current compaction space.");
  206. size_t compaction_max_size = pointer_delta(end(), compact_top);
  207. @@ -399,8 +398,15 @@
  208. compaction_max_size = pointer_delta(cp->space->end(), compact_top);
  209. }
  210. + return compact_top;
  211. +}
  212. +
  213. +HeapWord* CompactibleSpace::forward(oop q, size_t size,
  214. + CompactPoint* cp, HeapWord* compact_top) {
  215. + compact_top = forward_compact_top(size, cp, compact_top);
  216. +
  217. // store the forwarding pointer into the mark word
  218. - if ((HeapWord*)q != compact_top) {
  219. + if ((HeapWord*)q != compact_top || (size_t)q->size() != size) {
  220. q->forward_to(oop(compact_top));
  221. assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
  222. } else {
  223. @@ -421,6 +427,58 @@
  224. return compact_top;
  225. }
  226. +// Compute the forward sizes and leave out objects whose position could
  227. +// possibly overlap other objects.
  228. +HeapWord* CompactibleSpace::forward_with_rescue(HeapWord* q, size_t size,
  229. + CompactPoint* cp, HeapWord* compact_top) {
  230. + size_t forward_size = size;
  231. +
  232. + // (DCEVM) There is a new version of the class of q => different size
  233. + if (oop(q)->klass()->new_version() != NULL && oop(q)->klass()->new_version()->update_information() != NULL) {
  234. +
  235. + size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
  236. + assert(size != new_size, "instances without changed size have to be updated prior to GC run");
  237. + forward_size = new_size;
  238. + }
  239. +
  240. + compact_top = forward_compact_top(forward_size, cp, compact_top);
  241. +
  242. + if (must_rescue(oop(q), oop(compact_top))) {
  243. + if (MarkSweep::_rescued_oops == NULL) {
  244. + MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
  245. + }
  246. + MarkSweep::_rescued_oops->append(q);
  247. + return compact_top;
  248. + }
  249. +
  250. + return forward(oop(q), forward_size, cp, compact_top);
  251. +}
  252. +
  253. +// Compute the forwarding addresses for the objects that need to be rescued.
  254. +HeapWord* CompactibleSpace::forward_rescued(CompactPoint* cp, HeapWord* compact_top) {
  255. + // TODO: empty the _rescued_oops after ALL spaces are compacted!
  256. + if (MarkSweep::_rescued_oops != NULL) {
  257. + for (int i=0; i<MarkSweep::_rescued_oops->length(); i++) {
  258. + HeapWord* q = MarkSweep::_rescued_oops->at(i);
  259. +
  260. + /* size_t size = oop(q)->size(); changing this for cms for perm gen */
  261. + size_t size = block_size(q);
  262. +
  263. + // (DCEVM) There is a new version of the class of q => different size
  264. + if (oop(q)->klass()->new_version() != NULL) {
  265. + size_t new_size = oop(q)->size_given_klass(oop(q)->klass()->new_version());
  266. + assert(size != new_size, "instances without changed size have to be updated prior to GC run");
  267. + size = new_size;
  268. + }
  269. +
  270. + compact_top = cp->space->forward(oop(q), size, cp, compact_top);
  271. + assert(compact_top <= end(), "must not write over end of space!");
  272. + }
  273. + MarkSweep::_rescued_oops->clear();
  274. + MarkSweep::_rescued_oops = NULL;
  275. + }
  276. + return compact_top;
  277. +}
  278. bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
  279. HeapWord* q, size_t deadlength) {
  280. @@ -442,12 +500,17 @@
  281. #define adjust_obj_size(s) s
  282. void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
  283. - SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
  284. + SCAN_AND_FORWARD(cp, end, block_is_obj, block_size, false);
  285. }
  286. // Faster object search.
  287. void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
  288. - SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
  289. + if (!Universe::is_redefining_gc_run()) {
  290. + SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, false);
  291. + } else {
  292. + // Redefinition run
  293. + SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size, true);
  294. + }
  295. }
  296. void Space::adjust_pointers() {
  297. @@ -485,6 +548,111 @@
  298. assert(q == t, "just checking");
  299. }
  300. +
  301. +#ifdef ASSERT
  302. +
  303. +int CompactibleSpace::space_index(oop obj) {
  304. + GenCollectedHeap* heap = GenCollectedHeap::heap();
  305. +
  306. + //if (heap->is_in_permanent(obj)) {
  307. + // return -1;
  308. + //}
  309. +
  310. + int index = 0;
  311. + for (int i = heap->n_gens() - 1; i >= 0; i--) {
  312. + Generation* gen = heap->get_gen(i);
  313. + CompactibleSpace* space = gen->first_compaction_space();
  314. + while (space != NULL) {
  315. + if (space->is_in_reserved(obj)) {
  316. + return index;
  317. + }
  318. + space = space->next_compaction_space();
  319. + index++;
  320. + }
  321. + }
  322. +
  323. + tty->print_cr("could not compute space_index for %08xh", (HeapWord*)obj);
  324. + index = 0;
  325. + for (int i = heap->n_gens() - 1; i >= 0; i--) {
  326. + Generation* gen = heap->get_gen(i);
  327. + tty->print_cr(" generation %s: %08xh - %08xh", gen->name(), gen->reserved().start(), gen->reserved().end());
  328. +
  329. + CompactibleSpace* space = gen->first_compaction_space();
  330. + while (space != NULL) {
  331. + tty->print_cr(" %2d space %08xh - %08xh", index, space->bottom(), space->end());
  332. + space = space->next_compaction_space();
  333. + index++;
  334. + }
  335. + }
  336. +
  337. + ShouldNotReachHere();
  338. + return 0;
  339. +}
  340. +#endif
  341. +
  342. +bool CompactibleSpace::must_rescue(oop old_obj, oop new_obj) {
  343. + // Only redefined objects can have the need to be rescued.
  344. + if (oop(old_obj)->klass()->new_version() == NULL) return false;
  345. +
  346. + //if (old_obj->is_perm()) {
  347. + // // This object is in perm gen: Always rescue to satisfy invariant obj->klass() <= obj.
  348. + // return true;
  349. + //}
  350. +
  351. + int new_size = old_obj->size_given_klass(oop(old_obj)->klass()->new_version());
  352. + int original_size = old_obj->size();
  353. +
  354. + Generation* tenured_gen = GenCollectedHeap::heap()->get_gen(1);
  355. + bool old_in_tenured = tenured_gen->is_in_reserved(old_obj);
  356. + bool new_in_tenured = tenured_gen->is_in_reserved(new_obj);
  357. + if (old_in_tenured == new_in_tenured) {
  358. + // Rescue if object may overlap with a higher memory address.
  359. + bool overlap = ((HeapWord*)old_obj + original_size < (HeapWord*)new_obj + new_size);
  360. + if (old_in_tenured) {
  361. + // Old and new address are in same space, so just compare the address.
  362. + // Must rescue if object moves towards the top of the space.
  363. + assert(space_index(old_obj) == space_index(new_obj), "old_obj and new_obj must be in same space");
  364. + } else {
  365. + // In the new generation, eden is located before the from space, so a
  366. + // simple pointer comparison is sufficient.
  367. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
  368. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
  369. + assert(overlap == (space_index(old_obj) < space_index(new_obj)), "slow and fast computation must yield same result");
  370. + }
  371. + return overlap;
  372. +
  373. + } else {
  374. + assert(space_index(old_obj) != space_index(new_obj), "old_obj and new_obj must be in different spaces");
  375. + if (tenured_gen->is_in_reserved(new_obj)) {
  376. + // Must never rescue when moving from the new into the old generation.
  377. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(old_obj), "old_obj must be in DefNewGeneration");
  378. + assert(space_index(old_obj) > space_index(new_obj), "must be");
  379. + return false;
  380. +
  381. + } else /* if (tenured_gen->is_in_reserved(old_obj)) */ {
  382. + // Must always rescue when moving from the old into the new generation.
  383. + assert(GenCollectedHeap::heap()->get_gen(0)->is_in_reserved(new_obj), "new_obj must be in DefNewGeneration");
  384. + assert(space_index(old_obj) < space_index(new_obj), "must be");
  385. + return true;
  386. + }
  387. + }
  388. +}
  389. +
  390. +HeapWord* CompactibleSpace::rescue(HeapWord* old_obj) {
  391. + assert(must_rescue(oop(old_obj), oop(old_obj)->forwardee()), "do not call otherwise");
  392. +
  393. + int size = oop(old_obj)->size();
  394. + HeapWord* rescued_obj = NEW_RESOURCE_ARRAY(HeapWord, size);
  395. + Copy::aligned_disjoint_words(old_obj, rescued_obj, size);
  396. +
  397. + if (MarkSweep::_rescued_oops == NULL) {
  398. + MarkSweep::_rescued_oops = new GrowableArray<HeapWord*>(128);
  399. + }
  400. +
  401. + MarkSweep::_rescued_oops->append(rescued_obj);
  402. + return rescued_obj;
  403. +}
  404. +
  405. void CompactibleSpace::adjust_pointers() {
  406. // Check first is there is any work to do.
  407. if (used() == 0) {
  408. @@ -495,7 +663,12 @@
  409. }
  410. void CompactibleSpace::compact() {
  411. - SCAN_AND_COMPACT(obj_size);
  412. + if(!Universe::is_redefining_gc_run()) {
  413. + SCAN_AND_COMPACT(obj_size, false);
  414. + } else {
  415. + // Redefinition run
  416. + SCAN_AND_COMPACT(obj_size, true)
  417. + }
  418. }
  419. void Space::print_short() const { print_short_on(tty); }
  420. diff -r 8f44f8a7e505 src/share/vm/memory/space.hpp
  421. --- a/src/share/vm/memory/space.hpp Tue Mar 31 18:01:20 2015 -0700
  422. +++ b/src/share/vm/memory/space.hpp Tue Mar 31 18:05:19 2015 -0700
  423. @@ -392,6 +392,9 @@
  424. // indicates when the next such action should be taken.
  425. virtual void prepare_for_compaction(CompactPoint* cp);
  426. // MarkSweep support phase3
  427. + DEBUG_ONLY(int space_index(oop obj));
  428. + bool must_rescue(oop old_obj, oop new_obj);
  429. + HeapWord* rescue(HeapWord* old_obj);
  430. virtual void adjust_pointers();
  431. // MarkSweep support phase4
  432. virtual void compact();
  433. @@ -421,6 +424,15 @@
  434. // accordingly".
  435. virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
  436. HeapWord* compact_top);
  437. + // (DCEVM) same as forwad, but can rescue objects. Invoked only during
  438. + // redefinition runs
  439. + HeapWord* forward_with_rescue(HeapWord* q, size_t size, CompactPoint* cp,
  440. + HeapWord* compact_top);
  441. +
  442. + HeapWord* forward_rescued(CompactPoint* cp, HeapWord* compact_top);
  443. +
  444. + // (tw) Compute new compact top without actually forwarding the object.
  445. + virtual HeapWord* forward_compact_top(size_t size, CompactPoint* cp, HeapWord* compact_top);
  446. // Return a size with adjusments as required of the space.
  447. virtual size_t adjust_object_size_v(size_t size) const { return size; }
  448. diff -r 8f44f8a7e505 src/share/vm/memory/space.inline.hpp
  449. --- a/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:01:20 2015 -0700
  450. +++ b/src/share/vm/memory/space.inline.hpp Tue Mar 31 18:05:19 2015 -0700
  451. @@ -35,7 +35,7 @@
  452. return block_start_const(p);
  453. }
  454. -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
  455. +#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size,redefinition_run) { \
  456. /* Compute the new addresses for the live objects and store it in the mark \
  457. * Used by universe::mark_sweep_phase2() \
  458. */ \
  459. @@ -93,7 +93,17 @@
  460. /* prefetch beyond q */ \
  461. Prefetch::write(q, interval); \
  462. size_t size = block_size(q); \
  463. + if (redefinition_run) { \
  464. + compact_top = cp->space->forward_with_rescue(q, size, \
  465. + cp, compact_top); \
  466. + if (q < first_dead && oop(q)->is_gc_marked()) { \
  467. + /* Was moved (otherwise, forward would reset mark), \
  468. + set first_dead to here */ \
  469. + first_dead = q; \
  470. + } \
  471. + } else { \
  472. compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
  473. + } \
  474. q += size; \
  475. end_of_live = q; \
  476. } else { \
  477. @@ -142,6 +152,8 @@
  478. } \
  479. } \
  480. \
  481. + if (redefinition_run) { compact_top = forward_rescued(cp, compact_top); } \
  482. + \
  483. assert(q == t, "just checking"); \
  484. if (liveRange != NULL) { \
  485. liveRange->set_end(q); \
  486. @@ -188,13 +200,8 @@
  487. q += size; \
  488. } \
  489. \
  490. - if (_first_dead == t) { \
  491. - q = t; \
  492. - } else { \
  493. - /* $$$ This is funky. Using this to read the previously written \
  494. - * LiveRange. See also use below. */ \
  495. - q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
  496. - } \
  497. + /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
  498. + q = _first_dead; \
  499. } \
  500. \
  501. const intx interval = PrefetchScanIntervalInBytes; \
  502. @@ -222,7 +229,7 @@
  503. assert(q == t, "just checking"); \
  504. }
  505. -#define SCAN_AND_COMPACT(obj_size) { \
  506. +#define SCAN_AND_COMPACT(obj_size, redefinition_run) { \
  507. /* Copy all live objects to their new location \
  508. * Used by MarkSweep::mark_sweep_phase4() */ \
  509. \
  510. @@ -247,13 +254,9 @@
  511. } \
  512. ) /* debug_only */ \
  513. \
  514. - if (_first_dead == t) { \
  515. - q = t; \
  516. - } else { \
  517. - /* $$$ Funky */ \
  518. - q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
  519. + /* (DCEVM) first_dead can be live object if we move/rescue resized objects */ \
  520. + q = _first_dead; \
  521. } \
  522. - } \
  523. \
  524. const intx scan_interval = PrefetchScanIntervalInBytes; \
  525. const intx copy_interval = PrefetchCopyIntervalInBytes; \
  526. @@ -271,11 +274,34 @@
  527. size_t size = obj_size(q); \
  528. HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
  529. \
  530. + if (redefinition_run && must_rescue(oop(q), oop(q)->forwardee())) { \
  531. + rescue(q); \
  532. + debug_only(Copy::fill_to_words(q, size, 0)); \
  533. + q += size; \
  534. + continue; \
  535. + } \
  536. + \
  537. /* prefetch beyond compaction_top */ \
  538. Prefetch::write(compaction_top, copy_interval); \
  539. \
  540. /* copy object and reinit its mark */ \
  541. - assert(q != compaction_top, "everything in this pass should be moving"); \
  542. + assert(q != compaction_top || oop(q)->klass()->new_version() != NULL, \
  543. + "everything in this pass should be moving"); \
  544. + if (redefinition_run && oop(q)->klass()->new_version() != NULL) { \
  545. + Klass* new_version = oop(q)->klass()->new_version(); \
  546. + if (new_version->update_information() == NULL) { \
  547. + Copy::aligned_conjoint_words(q, compaction_top, size); \
  548. + oop(compaction_top)->set_klass(new_version); \
  549. + } else { \
  550. + MarkSweep::update_fields(oop(q), oop(compaction_top)); \
  551. + } \
  552. + oop(compaction_top)->init_mark(); \
  553. + assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
  554. + \
  555. + debug_only(prev_q = q); \
  556. + q += size; \
  557. + continue; \
  558. + } \
  559. Copy::aligned_conjoint_words(q, compaction_top, size); \
  560. oop(compaction_top)->init_mark(); \
  561. assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
  562. diff -r 8f44f8a7e505 src/share/vm/memory/universe.cpp
  563. --- a/src/share/vm/memory/universe.cpp Tue Mar 31 18:01:20 2015 -0700
  564. +++ b/src/share/vm/memory/universe.cpp Tue Mar 31 18:05:19 2015 -0700
  565. @@ -84,6 +84,8 @@
  566. PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  567. +bool Universe::_is_redefining_gc_run = false;
  568. +
  569. // Known objects
  570. Klass* Universe::_boolArrayKlassObj = NULL;
  571. Klass* Universe::_byteArrayKlassObj = NULL;
  572. diff -r 8f44f8a7e505 src/share/vm/memory/universe.hpp
  573. --- a/src/share/vm/memory/universe.hpp Tue Mar 31 18:01:20 2015 -0700
  574. +++ b/src/share/vm/memory/universe.hpp Tue Mar 31 18:05:19 2015 -0700
  575. @@ -251,7 +251,13 @@
  576. static void compute_verify_oop_data();
  577. + static bool _is_redefining_gc_run;
  578. +
  579. public:
  580. +
  581. + static bool is_redefining_gc_run() { return _is_redefining_gc_run; }
  582. + static void set_redefining_gc_run(bool b) { _is_redefining_gc_run = b; }
  583. +
  584. // Known classes in the VM
  585. static Klass* boolArrayKlassObj() { return _boolArrayKlassObj; }
  586. static Klass* byteArrayKlassObj() { return _byteArrayKlassObj; }