changeset 9434:9ec23e56f818

Several smallish fixes in ShenandoahMarkCompact and less Shenandoah specific hooks in shared mark-compact code.
author rkennke
date Wed, 29 Jul 2015 07:53:38 +0200
parents 40b528fa3e2a
children daae0f9a9a43
files src/share/vm/gc/shared/space.cpp src/share/vm/gc/shared/space.hpp src/share/vm/gc/shared/space.inline.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp
diffstat 6 files changed, 150 insertions(+), 102 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc/shared/space.cpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc/shared/space.cpp	Wed Jul 29 07:53:38 2015 +0200
@@ -385,10 +385,6 @@
       assert(cp->space != NULL, "generation must have a first compaction space");
     }
     compact_top = cp->space->bottom();
-    if (UseShenandoahGC) {
-      // TODO: Make better.
-      compact_top += 1;
-    }
     cp->space->set_compaction_top(compact_top);
     cp->threshold = cp->space->initialize_threshold();
     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
@@ -396,7 +392,7 @@
 
   // store the forwarding pointer into the mark word
   if ((HeapWord*)q != compact_top) {
-    q->forward_to(oop(compact_top));
+    q->forward_to(compact_oop(compact_top));
     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   } else {
     // if the object isn't moving we can just set the mark to the default
--- a/src/share/vm/gc/shared/space.hpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc/shared/space.hpp	Wed Jul 29 07:53:38 2015 +0200
@@ -360,8 +360,6 @@
 
   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
   inline size_t adjust_obj_size(size_t size) const {
-    if (UseShenandoahGC)  // Is there a better way to figure out BP size?  fixme
-      return size + 1;
     return size;
   }
 
@@ -369,6 +367,10 @@
     return oop(addr)->size();
   }
 
+  inline oop make_oop(HeapWord* addr) const {
+    return oop(addr);
+  }
+
 public:
   CompactibleSpace() :
    _compaction_top(NULL), _next_compaction_space(NULL) {}
@@ -444,6 +446,10 @@
   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
                     HeapWord* compact_top);
 
+  virtual oop compact_oop(HeapWord* addr) const {
+    return oop(addr);
+  }
+
   // Return a size with adjustments as required of the space.
   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 
@@ -514,10 +520,7 @@
   }
 
   inline size_t scanned_block_size(const HeapWord* addr) const {
-    size_t size = oop(addr)->size();
-    if (UseShenandoahGC)  // Is there a better way to figure out BP size?  fixme
-      return size + 1;
-    return size;
+    return oop(addr)->size();
   }
 
  protected:
--- a/src/share/vm/gc/shared/space.inline.hpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc/shared/space.inline.hpp	Wed Jul 29 07:53:38 2015 +0200
@@ -35,9 +35,6 @@
 #include "runtime/prefetch.inline.hpp"
 #include "runtime/safepoint.hpp"
 
-// Figure out a better place for this.
-const uint BROOKS_POINTER_OBJ_SIZE = 1;
-
 inline HeapWord* Space::block_start(const void* p) {
   return block_start_const(p);
 }
@@ -85,9 +82,6 @@
   // space, so this is a good time to initialize this:
   space->set_compaction_top(space->bottom());
   
-  if (UseShenandoahGC) 
-    space->set_compaction_top(space->bottom() + BROOKS_POINTER_OBJ_SIZE);
-
   if (cp->space == NULL) {
     assert(cp->gen != NULL, "need a generation");
     assert(cp->threshold == NULL, "just checking");
@@ -116,8 +110,6 @@
   HeapWord* q = space->bottom();
   HeapWord* t = space->scan_limit();
 
-  if (UseShenandoahGC)  q +=  BROOKS_POINTER_OBJ_SIZE;
-
   HeapWord*  end_of_live= q;            // One byte beyond the last byte of the last
                                         // live object.
   HeapWord*  first_dead = space->end(); // The first dead object.
@@ -128,16 +120,16 @@
   const intx interval = PrefetchScanIntervalInBytes;
 
   while (q < t) {
+
     assert(!space->scanned_block_is_obj(q) ||
-           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
-           oop(q)->mark()->has_bias_pattern(),
+           space->make_oop(q)->mark()->is_marked() || space->make_oop(q)->mark()->is_unlocked() ||
+           space->make_oop(q)->mark()->has_bias_pattern(),
            "these are the only valid states during a mark sweep");
-    if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
+    if (space->scanned_block_is_obj(q) && space->make_oop(q)->is_gc_marked()) {
       // prefetch beyond q
       Prefetch::write(q, interval);
       size_t size = space->scanned_block_size(q);
-      compact_top = cp->space->forward(oop(q), size, cp, compact_top);
-
+      compact_top = cp->space->forward(space->make_oop(q), size, cp, compact_top);
       q += size;
       end_of_live = q;
     } else {
@@ -147,14 +139,14 @@
         // prefetch beyond end
         Prefetch::write(end, interval);
         end += space->scanned_block_size(end);
-      } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
+      } while (end < t && (!space->scanned_block_is_obj(end) || !space->make_oop(end)->is_gc_marked()));
 
       // see if we might want to pretend this object is alive so that
       // we don't have to compact quite as often.
       if (allowed_deadspace > 0 && q == compact_top) {
         size_t sz = pointer_delta(end, q);
         if (space->insert_deadspace(allowed_deadspace, q, sz)) {
-          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
+          compact_top = cp->space->forward(space->make_oop(q), sz, cp, compact_top);
           q = end;
           end_of_live = end;
           continue;
@@ -170,7 +162,7 @@
 
       // record the current LiveRange object.
       // liveRange->start() is overlaid on the mark word.
-      liveRange = (LiveRange*)q;
+      liveRange = (LiveRange*) (HeapWord*) space->make_oop(q);
       liveRange->set_start(end);
       liveRange->set_end(end);
 
@@ -184,9 +176,7 @@
     }
   }
 
-  if (UseShenandoahGC) 
-    assert(q == t + BROOKS_POINTER_OBJ_SIZE, "just checking");
-  else assert(q == t, "just checking");
+  assert(q == t, "just checking");
       
   if (liveRange != NULL) {
     liveRange->set_end(q);
@@ -207,12 +197,11 @@
   // Used by MarkSweep::mark_sweep_phase3()
 
   HeapWord* q = space->bottom();
-  if (UseShenandoahGC) q+= BROOKS_POINTER_OBJ_SIZE;
   HeapWord* t = space->_end_of_live;  // Established by "prepare_for_compaction".
 
   assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
 
-  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+  if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) {
     // we have a chunk of the space which hasn't moved and we've
     // reinitialized the mark word during the previous pass, so we can't
     // use is_gc_marked for the traversal.
@@ -227,7 +216,7 @@
       assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
 
       // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
+      size_t size = MarkSweep::adjust_pointers(space->make_oop(q));
       size = space->adjust_obj_size(size);
 
       q += size;
@@ -238,7 +227,7 @@
     } else {
       // $$$ This is funky.  Using this to read the previously written
       // LiveRange.  See also use below.
-      q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
+      q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
     }
   }
 
@@ -248,10 +237,10 @@
   while (q < t) {
     // prefetch beyond q
     Prefetch::write(q, interval);
-    if (oop(q)->is_gc_marked()) {
+    if (space->make_oop(q)->is_gc_marked()) {
       // q is alive
       // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
+      size_t size = MarkSweep::adjust_pointers(space->make_oop(q));
       size = space->adjust_obj_size(size);
       debug_only(prev_q = q);
       q += size;
@@ -259,8 +248,7 @@
       // q is not a live object, so its mark should point at the next
       // live object
       debug_only(prev_q = q);
-      HeapWord* fixme = q;
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer();
       assert(q > prev_q, "we should be moving forward through memory");
     }
   }
@@ -277,10 +265,7 @@
   HeapWord* const t = space->_end_of_live;
   debug_only(HeapWord* prev_q = NULL);
   
-  if (UseShenandoahGC)
-    q += BROOKS_POINTER_OBJ_SIZE;
-  
-  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+  if (q < t && space->_first_dead > q && !space->make_oop(q)->is_gc_marked()) {
     #ifdef ASSERT // Debug only
       // we have a chunk of the space which hasn't moved and we've reinitialized
       // the mark word during the previous pass, so we can't use is_gc_marked for
@@ -289,8 +274,7 @@
 
       while (q < end) {
         size_t size = space->obj_size(q);
-	size = space->adjust_obj_size(size);
-        assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
+        assert(!space->make_oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
         prev_q = q;
         q += size;
       }
@@ -307,10 +291,10 @@
   const intx scan_interval = PrefetchScanIntervalInBytes;
   const intx copy_interval = PrefetchCopyIntervalInBytes;
   while (q < t) {
-    if (!oop(q)->is_gc_marked()) {
+    if (!space->make_oop(q)->is_gc_marked()) {
       // mark is pointer to next marked oop
       debug_only(prev_q = q);
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      q = (HeapWord*) space->make_oop(q)->mark()->decode_pointer();
       assert(q > prev_q, "we should be moving forward through memory");
     } else {
       // prefetch beyond q
@@ -318,15 +302,14 @@
 
       // size and destination
       size_t size = space->obj_size(q);
-      size = space->adjust_obj_size(size);
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
+      HeapWord* compaction_top = (HeapWord*)space->make_oop(q)->forwardee();
 
       // prefetch beyond compaction_top
       Prefetch::write(compaction_top, copy_interval);
 
       // copy object and reinit its mark
       assert(q != compaction_top, "everything in this pass should be moving");
-      Copy::aligned_conjoint_words(q, compaction_top, size);
+      Copy::aligned_conjoint_words((HeapWord*) space->make_oop(q), compaction_top, size);
       oop(compaction_top)->init_mark();
       assert(oop(compaction_top)->klass() != NULL, "should have a class");
 
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp	Wed Jul 29 07:53:38 2015 +0200
@@ -319,3 +319,21 @@
 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
   return ShenandoahHeap::heap()->next_compaction_region(this);
 }
+
+void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
+  scan_and_forward(this, cp);
+}
+
+void ShenandoahHeapRegion::adjust_pointers() {
+  // Check first is there is any work to do.
+  if (used() == 0) {
+    return;   // Nothing to do.
+  }
+
+  scan_and_adjust_pointers(this);
+}
+
+void ShenandoahHeapRegion::compact() {
+  scan_and_compact(this);
+}
+
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp	Wed Jul 29 07:53:38 2015 +0200
@@ -10,6 +10,43 @@
 
 class ShenandoahHeapRegion : public ContiguousSpace {
 
+  // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
+
+private:
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    oop obj = oop(addr+1);
+    size_t size = obj->size() + 1;
+    return size;
+  }
+
+    // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  inline size_t adjust_obj_size(size_t size) const {
+    return size + 1;
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return oop(addr+1)->size() + 1;
+  }
+
+  inline oop make_oop(HeapWord* addr) const {
+    return oop(addr+1);
+  }
 public:
   static size_t RegionSizeBytes;
   static size_t RegionSizeShift;
@@ -87,6 +124,14 @@
 
   virtual CompactibleSpace* next_compaction_space() const;
 
+  // Override for scan_and_forward support.
+  void prepare_for_compaction(CompactPoint* cp);
+  void adjust_pointers();
+  void compact();
+
+  virtual oop compact_oop(HeapWord* addr) const {
+    return oop(addr + 1);
+  }
 private:
   void do_reset();
 
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp	Tue Jul 28 16:10:57 2015 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp	Wed Jul 29 07:53:38 2015 +0200
@@ -11,7 +11,7 @@
 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
 #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
-#include "gc/serial/genMarkSweep.hpp"
+#include "gc/serial/markSweep.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/thread.hpp"
@@ -22,9 +22,9 @@
 
 
 void ShenandoahMarkCompact::allocate_stacks() {
-  GenMarkSweep::_preserved_count_max = 0;
-  GenMarkSweep::_preserved_marks = NULL;
-  GenMarkSweep::_preserved_count = 0;
+  MarkSweep::_preserved_count_max = 0;
+  MarkSweep::_preserved_marks = NULL;
+  MarkSweep::_preserved_count = 0;
 }
 
 void ShenandoahMarkCompact::do_mark_compact() {
@@ -54,7 +54,7 @@
     regions[i]->set_is_in_collection_set(false);
   }
   _heap->clear_cset_fast_test();
- 
+
   if (ShenandoahVerify) {
     // Full GC should only be called between regular concurrent cycles, therefore
     // those verifications should be valid.
@@ -77,11 +77,11 @@
   ReferenceProcessor* rp = _heap->ref_processor();
  
   // hook up weak ref data so it can be used during Mark-Sweep
-  assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
+  assert(MarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
   assert(rp == ShenandoahHeap::heap()->ref_processor(), "Precondition"); 
   bool clear_all_softrefs = true;  //fixme
-  GenMarkSweep::_ref_processor = rp;
+  MarkSweep::_ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
   CodeCache::gc_prologue();
@@ -114,7 +114,7 @@
   phase4_compact_objects();
 
  
-  GenMarkSweep::restore_marks();
+  MarkSweep::restore_marks();
   BiasedLocking::restore_marks();
   GenMarkSweep::deallocate_stacks();
 
@@ -122,7 +122,7 @@
   JvmtiExport::gc_epilogue();
 
   // refs processing: clean slate
-  GenMarkSweep::_ref_processor = NULL;
+  MarkSweep::_ref_processor = NULL;
 
  
   if (ShenandoahVerify) {
@@ -147,12 +147,12 @@
   ShenandoahHeap* _heap = ShenandoahHeap::heap();
   ReferenceProcessor* rp = _heap->ref_processor();
 
-  GenMarkSweep::_ref_processor = rp;
+  MarkSweep::_ref_processor = rp;
  
   // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
  
-  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
+  MarkingCodeBlobClosure follow_code_closure(&MarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
   {
     ShenandoahRootProcessor rp(_heap, 1);
     rp.process_strong_roots(&MarkSweep::follow_root_closure,
@@ -166,9 +166,9 @@
   rp->setup_policy(clear_soft_refs);
  
   const ReferenceProcessorStats& stats =
-    rp->process_discovered_references(&GenMarkSweep::is_alive,
-				      &GenMarkSweep::keep_alive,
-				      &GenMarkSweep::follow_stack_closure,
+    rp->process_discovered_references(&MarkSweep::is_alive,
+				      &MarkSweep::keep_alive,
+				      &MarkSweep::follow_stack_closure,
 				      NULL,
 				      _heap->collector_policy()->conc_timer(),
 				      _heap->tracer()->gc_id());
@@ -178,16 +178,16 @@
   _heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
  
   // Unload classes and purge the SystemDictionary.
-  bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
+  bool purged_class = SystemDictionary::do_unloading(&MarkSweep::is_alive);
  
   // Unload nmethods.
-  CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
+  CodeCache::do_unloading(&MarkSweep::is_alive, purged_class);
  
   // Prune dead klasses from subklass/sibling/implementor lists.
-  Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
+  Klass::clean_weak_klass_links(&MarkSweep::is_alive);
  
   // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
-  _heap->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
+  _heap->unlink_string_and_symbol_table(&MarkSweep::is_alive);
  
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
@@ -218,22 +218,42 @@
 class ShenandoahPrepareForCompaction : public ShenandoahHeapRegionClosure {
   CompactPoint _cp;
   ShenandoahHeap* _heap;
+  bool _dead_humongous;
 
 public:
-  ShenandoahPrepareForCompaction() : _heap(ShenandoahHeap::heap()) {
-    _cp.space = _heap->heap_regions()[0];
-    _cp.threshold = _heap->start_of_heap();
+  ShenandoahPrepareForCompaction() :
+    _heap(ShenandoahHeap::heap()),
+    _dead_humongous(false) {
   }
 
   bool doHeapRegion(ShenandoahHeapRegion* r) {
     // We need to save the contents
     if (!r->is_humongous()) {
+      if (_cp.space == NULL) {
+	_cp.space = r;
+	_cp.threshold = _heap->start_of_heap();
+      }
+      _dead_humongous = false;
       r->prepare_for_compaction(&_cp);
     }  else {
       if (r->is_humongous_start()) {
         oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-	r->save_mark_word(obj);
-	obj->forward_to(obj);
+	if (obj->is_gc_marked()) {
+	  obj->forward_to(obj);
+	  _dead_humongous = false;
+	} else {
+	  if (_cp.space == NULL) {
+	    _cp.space = r;
+	    _cp.threshold = _heap->start_of_heap();
+	  }
+	  _dead_humongous = true;
+	  r->reset();
+	}
+      } else {
+	assert(r->is_humongous_continuation(), "expect humongous continuation");
+	if (_dead_humongous) {
+	  r->reset();
+	}
       }
     }
     return false;
@@ -252,7 +272,8 @@
       if (r->is_humongous_start()) {
         // We must adjust the pointers on the single H object.
         oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-        // point all the oops to the new location
+	assert(obj->is_gc_marked(), "should be marked");
+	// point all the oops to the new location
 	MarkSweep::adjust_pointers(obj);
       }
     } else {
@@ -268,27 +289,27 @@
     // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
-
+  CodeBlobToOopClosure adjust_code_closure(&MarkSweep::adjust_pointer_closure,
+					   CodeBlobToOopClosure::FixRelocations);
 
   {
     ShenandoahRootProcessor rp(heap, 1);
-    rp.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
-			 &GenMarkSweep::adjust_cld_closure,
+    rp.process_all_roots(&MarkSweep::adjust_pointer_closure,
+			 &MarkSweep::adjust_cld_closure,
 			 &adjust_code_closure);
   }
 
-  assert(GenMarkSweep::ref_processor() == heap->ref_processor(), "Sanity");
+  assert(MarkSweep::ref_processor() == heap->ref_processor(), "Sanity");
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  heap->weak_roots_iterate(&GenMarkSweep::adjust_pointer_closure);
+  heap->weak_roots_iterate(&MarkSweep::adjust_pointer_closure);
 
   //  if (G1StringDedup::is_enabled()) {
-  //    G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
+  //    G1StringDedup::oops_do(&MarkSweep::adjust_pointer_closure);
   //  }
 
-  GenMarkSweep::adjust_marks();
+  MarkSweep::adjust_marks();
 
   ShenandoahMarkCompactAdjustPointersClosure apc;
   heap->heap_region_iterate(&apc);
@@ -302,34 +323,19 @@
 
 class CompactObjectsClosure : public ShenandoahHeapRegionClosure {
 
-private:
-  bool _dead_humongous;
-
 public:
 
-  CompactObjectsClosure() : _dead_humongous(false) {
+  CompactObjectsClosure() {
   }
 
   bool doHeapRegion(ShenandoahHeapRegion* r) {
     if (r->is_humongous()) {
       if (r->is_humongous_start()) {
         oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-        if (obj->is_gc_marked()) {
-          obj->init_mark();
-	  _dead_humongous = false;
-        } else {
-          assert(r->is_empty(), "Should have been cleared in phase 2.");
-	  r->reset();
-	  _dead_humongous = true;
-	}
-      } else {
-	// r is a continuation of a humongous region
-	if (_dead_humongous) 
-	  r->reset();
+	assert(obj->is_gc_marked(), "expect marked humongous object");
+	obj->init_mark();
       }
-
     } else {
-      _dead_humongous = false;
       r->compact();
     }
 
@@ -356,9 +362,6 @@
       }
 
     } else {
-      // The brooks pointer calculation adaptation 
-      // leaves us one past where we want to be.
-      r->set_top(r->top() - BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
       size_t live = r->used();
       if (live == 0) _heap->add_free_region(r);
       r->setLiveData(live);