changeset 9451:04773a6b9229

Move Shenandoah sources from gc_implementation/shenandoah to gc/shenandoah, as other GCs have done.
author rkennke
date Wed, 19 Aug 2015 23:00:20 +0200
parents 4e1b2fe49012
children f7786999dcd2
files src/cpu/x86/vm/c1_LIRAssembler_x86.cpp src/cpu/x86/vm/c1_Runtime1_x86.cpp src/share/vm/adlc/main.cpp src/share/vm/c1/c1_LIRGenerator.cpp src/share/vm/c1/c1_Runtime1.cpp src/share/vm/gc/shenandoah/brooksPointer.cpp src/share/vm/gc/shenandoah/brooksPointer.hpp src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp src/share/vm/gc/shenandoah/shenandoahBarrierSet.hpp src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp src/share/vm/gc/shenandoah/shenandoahConcurrentMark.cpp src/share/vm/gc/shenandoah/shenandoahConcurrentMark.hpp src/share/vm/gc/shenandoah/shenandoahConcurrentMark.inline.hpp src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp src/share/vm/gc/shenandoah/shenandoahHeap.cpp src/share/vm/gc/shenandoah/shenandoahHeap.hpp src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.hpp src/share/vm/gc/shenandoah/shenandoahHumongous.hpp src/share/vm/gc/shenandoah/shenandoahJNICritical.cpp src/share/vm/gc/shenandoah/shenandoahJNICritical.hpp src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp src/share/vm/gc/shenandoah/shenandoahMarkCompact.hpp src/share/vm/gc/shenandoah/shenandoahRootProcessor.cpp src/share/vm/gc/shenandoah/shenandoahRootProcessor.hpp src/share/vm/gc/shenandoah/shenandoahRuntime.cpp src/share/vm/gc/shenandoah/shenandoahRuntime.hpp src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp src/share/vm/gc_implementation/shenandoah/brooksPointer.cpp src/share/vm/gc_implementation/shenandoah/brooksPointer.hpp src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHumongous.hpp src/share/vm/gc_implementation/shenandoah/shenandoahJNICritical.cpp src/share/vm/gc_implementation/shenandoah/shenandoahJNICritical.hpp src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp src/share/vm/memory/universe.cpp src/share/vm/opto/graphKit.cpp src/share/vm/opto/library_call.cpp src/share/vm/opto/runtime.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/safepoint.cpp src/share/vm/runtime/thread.cpp src/share/vm/services/memoryService.cpp src/share/vm/services/shenandoahMemoryPool.hpp
diffstat 72 files changed, 8663 insertions(+), 8663 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -32,7 +32,7 @@
 #include "c1/c1_ValueStack.hpp"
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -39,9 +39,9 @@
 #include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
--- a/src/share/vm/adlc/main.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/adlc/main.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -237,7 +237,7 @@
   AD.addInclude(AD._HPP_file, "opto/regalloc.hpp");
   AD.addInclude(AD._HPP_file, "opto/subnode.hpp");
   AD.addInclude(AD._HPP_file, "opto/vectornode.hpp");
-  AD.addInclude(AD._HPP_file, "gc_implementation/shenandoah/shenandoahBarrierSet.hpp");
+  AD.addInclude(AD._HPP_file, "gc/shenandoah/shenandoahBarrierSet.hpp");
   AD.addInclude(AD._CPP_CLONE_file, "precompiled.hpp");
   AD.addInclude(AD._CPP_CLONE_file, "adfiles", get_basename(AD._HPP_file._name));
   AD.addInclude(AD._CPP_EXPAND_file, "precompiled.hpp");
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -41,8 +41,8 @@
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/g1/heapRegion.hpp"
 #endif // INCLUDE_ALL_GCS
 
--- a/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -60,7 +60,7 @@
 #include "runtime/vm_version.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/events.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 
 
 // Implementation of StubAssembler
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/brooksPointer.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,73 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "memory/universe.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+
+BrooksPointer::BrooksPointer(HeapWord** hw) : _heap_word(hw) {}
+
+BrooksPointer BrooksPointer::get(oop obj) {
+  HeapWord* hw_obj = (HeapWord*) obj;
+  HeapWord* brooks_ptr = hw_obj - 1;
+  // We know that the value in that memory location is a pointer to another
+  // heapword/oop.
+  return BrooksPointer((HeapWord**) brooks_ptr);
+}
+
+void BrooksPointer::set_forwardee(oop forwardee) {
+  assert(ShenandoahHeap::heap()->is_in(forwardee), "forwardee must be valid oop in the heap");
+  *_heap_word = (HeapWord*) forwardee;
+#ifdef ASSERT
+  if (ShenandoahTraceBrooksPointers) {
+    tty->print_cr("setting_forwardee to "PTR_FORMAT" = "PTR_FORMAT, p2i((HeapWord*) forwardee), p2i(*_heap_word));
+  }
+#endif
+}
+
+HeapWord* BrooksPointer::cas_forwardee(HeapWord* old, HeapWord* forwardee) {
+  assert(ShenandoahHeap::heap()->is_in(forwardee), "forwardee must point to a heap address");
+  
+
+
+  HeapWord* o = old;
+  HeapWord* n = forwardee;
+  HeapWord* result;
+
+#ifdef ASSERT
+  if (ShenandoahTraceBrooksPointers) {
+    tty->print_cr("Attempting to CAS "PTR_FORMAT" value "PTR_FORMAT" from "PTR_FORMAT" to "PTR_FORMAT, p2i(_heap_word), p2i(*_heap_word), p2i(o), p2i(n));
+  }
+#endif
+
+#ifdef ASSERT  
+  if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
+    ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+    ShenandoahHeapRegion* hr = sh->heap_region_containing(old);
+
+    {
+      hr->memProtectionOff();
+      result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
+      hr->memProtectionOn();
+    }
+  } else {
+    result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
+  }
+#else 
+  result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
+#endif
+  
+#ifdef ASSERT
+  if (ShenandoahTraceBrooksPointers) {
+    tty->print_cr("Result of CAS from "PTR_FORMAT" to "PTR_FORMAT" was "PTR_FORMAT" read value was "PTR_FORMAT, p2i(o), p2i(n), p2i(result), p2i(*_heap_word));
+  }
+#endif
+
+  return result;
+}					 
+
+bool BrooksPointer::check_forwardee_is_in_heap(oop forwardee) {
+   return Universe::heap()->is_in(forwardee);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/brooksPointer.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,62 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_BROOKSPOINTER_HPP
+#define SHARE_VM_GC_SHENANDOAH_BROOKSPOINTER_HPP
+
+#include "oops/oop.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+class BrooksPointer {
+
+public:
+  static const uint BROOKS_POINTER_OBJ_SIZE = 1;
+
+private:
+
+  HeapWord** _heap_word;
+
+  BrooksPointer(HeapWord** heap_word);
+
+public:
+
+  bool check_forwardee_is_in_heap(oop forwardee);
+  
+  inline oop get_forwardee_raw() {
+    return oop(*_heap_word);
+  }
+
+  inline oop get_forwardee() {
+    oop forwardee;
+
+#ifdef ASSERT
+    if (ShenandoahVerifyReadsToFromSpace) {
+      ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+      ShenandoahHeapRegion* hr = sh->heap_region_containing(_heap_word);
+
+      {
+        hr->memProtectionOff();
+        forwardee = (oop) (*_heap_word);
+        hr->memProtectionOn();
+      }
+    } else {
+      forwardee = get_forwardee_raw();
+    }
+#else
+    forwardee = get_forwardee_raw();
+#endif
+
+    assert(check_forwardee_is_in_heap(forwardee), "forwardee must be in heap");
+    assert(forwardee->is_oop(), "forwardee must be valid oop");
+    return forwardee;
+  }
+
+  void set_forwardee(oop forwardee);
+  HeapWord* cas_forwardee(HeapWord* old, HeapWord* forwardee);
+
+  static BrooksPointer get(oop obj);
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_BROOKSPOINTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,685 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "memory/universe.hpp"
+#include "utilities/array.hpp"
+
+#define __ masm->
+
+class UpdateRefsForOopClosure: public ExtendedOopClosure {
+
+private:
+  ShenandoahHeap* _heap;
+public:
+  UpdateRefsForOopClosure() {
+    _heap = ShenandoahHeap::heap();
+  }
+
+  void do_oop(oop* p)       {
+    _heap->maybe_update_oop_ref(p);
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+
+};
+
+ShenandoahBarrierSet::ShenandoahBarrierSet() :
+  BarrierSet(BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet))
+{
+}
+
+void ShenandoahBarrierSet::print_on(outputStream* st) const {
+  st->print("ShenandoahBarrierSet");
+}
+
+bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
+  return bsn == BarrierSet::ShenandoahBarrierSet;
+}
+
+bool ShenandoahBarrierSet::has_read_prim_array_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_read_prim_barrier() {
+  return false;
+}
+
+bool ShenandoahBarrierSet::has_read_ref_array_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_read_ref_barrier() {
+  return false;
+}
+
+bool ShenandoahBarrierSet::has_read_region_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_write_prim_array_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_write_prim_barrier() {
+  return false;
+}
+
+bool ShenandoahBarrierSet::has_write_ref_array_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_write_ref_barrier() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_write_ref_pre_barrier() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::has_write_region_opt() {
+  return true;
+}
+
+bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
+  return true;
+}
+
+void ShenandoahBarrierSet::read_prim_array(MemRegion mr) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSet::read_prim_field(HeapWord* hw, size_t s){
+  Unimplemented();
+}
+
+bool ShenandoahBarrierSet::read_prim_needs_barrier(HeapWord* hw, size_t s) {
+  return false;
+}
+
+void ShenandoahBarrierSet::read_ref_array(MemRegion mr) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSet::read_ref_field(void* v) {
+  //    tty->print_cr("read_ref_field: v = "PTR_FORMAT, v);
+  // return *v;
+}
+
+bool ShenandoahBarrierSet::read_ref_needs_barrier(void* v) {
+  Unimplemented();
+  return false;
+}
+
+void ShenandoahBarrierSet::read_region(MemRegion mr) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSet::write_prim_array(MemRegion mr) {
+  Unimplemented();
+}
+
+void ShenandoahBarrierSet::write_prim_field(HeapWord* hw, size_t s , juint x, juint y) {
+  Unimplemented();
+}
+
+bool ShenandoahBarrierSet::write_prim_needs_barrier(HeapWord* hw, size_t s, juint x, juint y) {
+  Unimplemented();
+  return false;
+}
+
+bool ShenandoahBarrierSet::need_update_refs_barrier() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  return heap->is_update_references_in_progress() || (heap->concurrent_mark_in_progress() && heap->need_update_refs());
+}
+
+void ShenandoahBarrierSet::write_ref_array_work(MemRegion mr) {
+  if (! need_update_refs_barrier()) return;
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  for (HeapWord* word = mr.start(); word < mr.end(); word++) {
+    oop* oop_ptr = (oop*) word;
+    heap->maybe_update_oop_ref(oop_ptr);
+  }
+}
+
+template <class T>
+void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) {
+
+#ifdef ASSERT
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+    if (sh->is_in(dst) && 
+	sh->heap_region_containing((HeapWord*) dst)->is_in_collection_set() &&
+        ! sh->cancelled_concgc()) {
+      tty->print_cr("dst = "PTR_FORMAT, p2i(dst));
+      sh->heap_region_containing((HeapWord*) dst)->print();
+      assert(false, "We should have fixed this earlier");   
+    }   
+#endif
+
+  if (! JavaThread::satb_mark_queue_set().is_active()) return;
+  // tty->print_cr("write_ref_array_pre_work: "PTR_FORMAT", "INT32_FORMAT, dst, count);
+  T* elem_ptr = dst;
+  for (int i = 0; i < count; i++, elem_ptr++) {
+    T heap_oop = oopDesc::load_heap_oop(elem_ptr);
+    if (!oopDesc::is_null(heap_oop)) {
+      G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+    }
+    // tty->print_cr("write_ref_array_pre_work: oop: "PTR_FORMAT, heap_oop);
+  }
+}
+
+void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+  if (! dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
+void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+  if (! dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
+template <class T>
+void ShenandoahBarrierSet::write_ref_field_pre_static(T* field, oop newVal) {
+  T heap_oop = oopDesc::load_heap_oop(field);
+
+#ifdef ASSERT
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+    if (sh->is_in(field) && 
+	sh->heap_region_containing((HeapWord*)field)->is_in_collection_set() &&
+        ! sh->cancelled_concgc()) {
+      tty->print_cr("field = "PTR_FORMAT, p2i(field));
+      sh->heap_region_containing((HeapWord*)field)->print();
+      assert(false, "We should have fixed this earlier");   
+    }   
+#endif
+
+  if (!oopDesc::is_null(heap_oop)) {
+    G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop(heap_oop));
+    // tty->print_cr("write_ref_field_pre_static: v = "PTR_FORMAT" o = "PTR_FORMAT" old: "PTR_FORMAT, field, newVal, heap_oop);
+  }
+}
+
+template <class T>
+inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop newVal) {
+  write_ref_field_pre_static(field, newVal);
+}
+
+// These are the more general virtual versions.
+void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
+  write_ref_field_pre_static(field, new_val);
+}
+
+void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
+  write_ref_field_pre_static(field, new_val);
+}
+
+void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
+  guarantee(false, "Not needed");
+}
+
+void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
+  if (! need_update_refs_barrier()) return;
+  assert (! UseCompressedOops, "compressed oops not supported yet");
+  ShenandoahHeap::heap()->maybe_update_oop_ref((oop*) v);
+  // tty->print_cr("write_ref_field_work: v = "PTR_FORMAT" o = "PTR_FORMAT, v, o);
+}
+
+void ShenandoahBarrierSet::write_region_work(MemRegion mr) {
+
+  if (! need_update_refs_barrier()) return;
+
+  // This is called for cloning an object (see jvm.cpp) after the clone
+  // has been made. We are not interested in any 'previous value' because
+  // it would be NULL in any case. But we *are* interested in any oop*
+  // that potentially need to be updated.
+
+  // tty->print_cr("write_region_work: "PTR_FORMAT", "PTR_FORMAT, mr.start(), mr.end());
+  oop obj = oop(mr.start());
+  assert(obj->is_oop(), "must be an oop");
+  UpdateRefsForOopClosure cl;
+  obj->oop_iterate(&cl);
+}
+
+oop ShenandoahBarrierSet::resolve_oop(oop src) {
+  return ShenandoahBarrierSet::resolve_oop_static(src);
+}
+
+oop ShenandoahBarrierSet::maybe_resolve_oop(oop src) {
+  if (Universe::heap()->is_in(src)) {
+    return resolve_oop_static(src);
+  } else {
+    return src;
+  }
+}
+
+oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop_work(oop src) {
+  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+  assert(src != NULL, "only evacuated non NULL oops");
+
+  if (sh->in_cset_fast_test((HeapWord*) src)) {
+    return resolve_and_maybe_copy_oop_work2(src);
+  } else {
+    return src;
+  }
+}
+
+oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop_work2(oop src) {
+  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+  if (! sh->is_evacuation_in_progress()) {
+    // We may get here through a barrier that just took a safepoint that
+    // turned off evacuation. In this case, return right away.
+    return ShenandoahBarrierSet::resolve_oop_static(src);
+  }
+  assert(src != NULL, "only evacuated non NULL oops");
+  assert(sh->heap_region_containing(src)->is_in_collection_set(), "only evacuate objects in collection set");
+  assert(! sh->heap_region_containing(src)->is_humongous(), "never evacuate humongous objects");
+  // TODO: Consider passing thread from caller.
+  oop dst = sh->evacuate_object(src, Thread::current());
+#ifdef ASSERT
+    if (ShenandoahTraceEvacuations) {
+      tty->print_cr("src = "PTR_FORMAT" dst = "PTR_FORMAT" src = "PTR_FORMAT" src-2 = "PTR_FORMAT,
+                 p2i((HeapWord*) src), p2i((HeapWord*) dst), p2i((HeapWord*) src), p2i(((HeapWord*) src) - 2));
+    }
+#endif
+  assert(sh->is_in(dst), "result should be in the heap");
+  return dst;
+}
+
+oop ShenandoahBarrierSet::resolve_and_maybe_copy_oopHelper(oop src) {
+    if (src != NULL) {
+      ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+      oop tmp = resolve_oop_static(src);
+      if (! sh->is_evacuation_in_progress()) {
+        return tmp;
+      }
+      return resolve_and_maybe_copy_oop_work(src);
+    } else {
+      return NULL;
+    }
+}
+
+JRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c2(oopDesc* src))
+  oop result = ((ShenandoahBarrierSet*) oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
+  // tty->print_cr("called C2 write barrier with: %p result: %p copy: %d", (oopDesc*) src, (oopDesc*) result, src != result);
+  return (oopDesc*) result;
+JRT_END
+
+IRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_interp(oopDesc* src))
+  oop result = ((ShenandoahBarrierSet*)oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
+  // tty->print_cr("called interpreter write barrier with: %p result: %p", src, result);
+  return (oopDesc*) result;
+IRT_END
+
+JRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c1(JavaThread* thread, oopDesc* src))
+  oop result = ((ShenandoahBarrierSet*)oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
+  // tty->print_cr("called static write barrier (2) with: "PTR_FORMAT" result: "PTR_FORMAT, p2i(src), p2i((oopDesc*)(result)));
+  return (oopDesc*) result;
+JRT_END
+
+oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop(oop src) {
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
+    oop result;
+    if (src != NULL && sh->is_in(src)) {
+      result = resolve_and_maybe_copy_oopHelper(src);
+      assert(sh->is_in(result), "result should be in the heap");
+    } else {
+      result = src;
+    }
+    assert(result == NULL || (sh->is_in(result) && result->is_oop()), "resolved oop must be NULL, or a valid oop in the heap");
+    return result;
+  }
+
+#ifndef CC_INTERP
+void ShenandoahBarrierSet::compile_resolve_oop_runtime(MacroAssembler* masm, Register dst) {
+
+  __ push(rscratch1);
+
+  if (dst != rax) {
+    __ push(rax);
+  }
+  if (dst != rbx) {
+    __ push(rbx);
+  }
+  if (dst != rcx) {
+    __ push(rcx);
+  }
+  if (dst != rdx) {
+    __ push(rdx);
+  }
+  if (dst != rdi) {
+    __ push(rdi);
+  }
+  if (dst != rsi) {
+    __ push(rsi);
+  }
+  if (dst != rbp) {
+    __ push(rbp);
+  }
+  if (dst != r8) {
+    __ push(r8);
+  }
+  if (dst != r9) {
+    __ push(r9);
+  }
+  if (dst != r11) {
+    __ push(r11);
+  }
+  if (dst != r12) {
+    __ push(r12);
+  }
+  if (dst != r13) {
+    __ push(r13);
+  }
+  if (dst != r14) {
+    __ push(r14);
+  }
+  if (dst != r15) {
+    __ push(r15);
+  }
+
+  __ subptr(rsp, 128);
+  __ movdbl(Address(rsp, 0), xmm0);
+  __ movdbl(Address(rsp, 8), xmm1);
+  __ movdbl(Address(rsp, 16), xmm2);
+  __ movdbl(Address(rsp, 24), xmm3);
+  __ movdbl(Address(rsp, 32), xmm4);
+  __ movdbl(Address(rsp, 40), xmm5);
+  __ movdbl(Address(rsp, 48), xmm6);
+  __ movdbl(Address(rsp, 56), xmm7);
+  __ movdbl(Address(rsp, 64), xmm8);
+  __ movdbl(Address(rsp, 72), xmm9);
+  __ movdbl(Address(rsp, 80), xmm10);
+  __ movdbl(Address(rsp, 88), xmm11);
+  __ movdbl(Address(rsp, 96), xmm12);
+  __ movdbl(Address(rsp, 104), xmm13);
+  __ movdbl(Address(rsp, 112), xmm14);
+  __ movdbl(Address(rsp, 120), xmm15);
+
+  __ mov(c_rarg1, dst);
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::resolve_oop_static), c_rarg1);
+  __ mov(rscratch1, rax);
+
+  __ movdbl(xmm0, Address(rsp, 0));
+  __ movdbl(xmm1, Address(rsp, 8));
+  __ movdbl(xmm2, Address(rsp, 16));
+  __ movdbl(xmm3, Address(rsp, 24));
+  __ movdbl(xmm4, Address(rsp, 32));
+  __ movdbl(xmm5, Address(rsp, 40));
+  __ movdbl(xmm6, Address(rsp, 48));
+  __ movdbl(xmm7, Address(rsp, 56));
+  __ movdbl(xmm8, Address(rsp, 64));
+  __ movdbl(xmm9, Address(rsp, 72));
+  __ movdbl(xmm10, Address(rsp, 80));
+  __ movdbl(xmm11, Address(rsp, 88));
+  __ movdbl(xmm12, Address(rsp, 96));
+  __ movdbl(xmm13, Address(rsp, 104));
+  __ movdbl(xmm14, Address(rsp, 112));
+  __ movdbl(xmm15, Address(rsp, 120));
+  __ addptr(rsp, 128);
+
+  if (dst != r15) {
+    __ pop(r15);
+  }
+  if (dst != r14) {
+    __ pop(r14);
+  }
+  if (dst != r13) {
+    __ pop(r13);
+  }
+  if (dst != r12) {
+    __ pop(r12);
+  }
+  if (dst != r11) {
+    __ pop(r11);
+  }
+  if (dst != r9) {
+    __ pop(r9);
+  }
+  if (dst != r8) {
+    __ pop(r8);
+  }
+  if (dst != rbp) {
+    __ pop(rbp);
+  }
+  if (dst != rsi) {
+    __ pop(rsi);
+  }
+  if (dst != rdi) {
+    __ pop(rdi);
+  }
+  if (dst != rdx) {
+    __ pop(rdx);
+  }
+  if (dst != rcx) {
+    __ pop(rcx);
+  }
+  if (dst != rbx) {
+    __ pop(rbx);
+  }
+  if (dst != rax) {
+    __ pop(rax);
+  }
+
+  __ mov(dst, rscratch1);
+
+  __ pop(rscratch1);
+}
+
+// TODO: The following should really live in an X86 specific subclass.
+void ShenandoahBarrierSet::compile_resolve_oop(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+
+    Label is_null;
+    __ testptr(dst, dst);
+    __ jcc(Assembler::zero, is_null);
+    compile_resolve_oop_not_null(masm, dst);
+    __ bind(is_null);
+  }
+}
+
+void ShenandoahBarrierSet::compile_resolve_oop_not_null(MacroAssembler* masm, Register dst) {
+  if (ShenandoahReadBarrier) {
+    if (ShenandoahVerifyReadsToFromSpace) {
+      compile_resolve_oop_runtime(masm, dst);
+      return;
+    }
+    __ movptr(dst, Address(dst, -8));
+  }
+}
+
+void ShenandoahBarrierSet::compile_resolve_oop_for_write(MacroAssembler* masm, Register dst, bool explicit_null_check, int stack_adjust, int num_state_save, ...) {
+
+  if (! ShenandoahWriteBarrier) {
+    assert(! ShenandoahConcurrentEvacuation, "Can only do this without concurrent evacuation");
+    return compile_resolve_oop(masm, dst);
+  }
+      
+  assert(dst != rscratch1, "different regs");
+  //assert(dst != rscratch2, "Need rscratch2");
+
+  Label done;
+
+  // Resolve oop first.
+  // TODO: Make this not-null-checking as soon as we have implicit null checks in c1!
+
+
+  if (explicit_null_check) {
+    __ testptr(dst, dst);
+    __ jcc(Assembler::zero, done);
+  }
+
+  Address evacuation_in_progress = Address(r15_thread, in_bytes(JavaThread::evacuation_in_progress_offset()));
+
+  __ cmpb(evacuation_in_progress, 0);
+
+  // Now check if evacuation is in progress.
+  compile_resolve_oop_not_null(masm, dst);
+
+  __ jcc(Assembler::equal, done);
+  __ push(rscratch1);
+  __ push(rscratch2);
+
+  __ movptr(rscratch1, dst);
+  __ shrptr(rscratch1, ShenandoahHeapRegion::RegionSizeShift);
+  __ movptr(rscratch2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
+  __ movbool(rscratch2, Address(rscratch2, rscratch1, Address::times_1));
+  __ testb(rscratch2, 0x1);
+
+  __ pop(rscratch2);
+  __ pop(rscratch1);
+
+  __ jcc(Assembler::zero, done);
+
+  intArray save_states = intArray(num_state_save);
+  va_list vl;
+  va_start(vl, num_state_save);
+  for (int i = 0; i < num_state_save; i++) {
+    save_states.at_put(i, va_arg(vl, int));
+  }
+  va_end(vl);
+
+  __ push(rscratch1);
+  for (int i = 0; i < num_state_save; i++) {
+    switch (save_states[i]) {
+    case noreg:
+      __ subptr(rsp, Interpreter::stackElementSize);
+      break;
+    case ss_rax:
+      __ push(rax);
+      break;
+    case ss_rbx:
+      __ push(rbx);
+      break;
+    case ss_rcx:
+      __ push(rcx);
+      break;
+    case ss_rdx:
+      __ push(rdx);
+      break;
+    case ss_rsi:
+      __ push(rsi);
+      break;
+    case ss_rdi:
+      __ push(rdi);
+      break;
+    case ss_r13:
+      __ push(r13);
+      break;
+    case ss_ftos:
+      __ subptr(rsp, wordSize);
+      __ movflt(Address(rsp, 0), xmm0);
+      break;
+    case ss_dtos:
+      __ subptr(rsp, 2 * wordSize);
+      __ movdbl(Address(rsp, 0), xmm0);
+      break;
+    case ss_c_rarg0:
+      __ push(c_rarg0);
+      break;
+    case ss_c_rarg1:
+      __ push(c_rarg1);
+      break;
+    case ss_c_rarg2:
+      __ push(c_rarg2);
+      break;
+    case ss_c_rarg3:
+      __ push(c_rarg3);
+      break;
+    case ss_c_rarg4:
+      __ push(c_rarg4);
+      break;
+
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_interp), dst);
+  __ mov(rscratch1, rax);
+
+  for (int i = num_state_save - 1; i >= 0; i--) {
+    switch (save_states[i]) {
+    case noreg:
+      __ addptr(rsp, Interpreter::stackElementSize);
+      break;
+    case ss_rax:
+      __ pop(rax);
+      break;
+    case ss_rbx:
+      __ pop(rbx);
+      break;
+    case ss_rcx:
+      __ pop(rcx);
+      break;
+    case ss_rdx:
+      __ pop(rdx);
+      break;
+    case ss_rsi:
+      __ pop(rsi);
+      break;
+    case ss_rdi:
+      __ pop(rdi);
+      break;
+    case ss_r13:
+      __ pop(r13);
+      break;
+    case ss_ftos:
+      __ movflt(xmm0, Address(rsp, 0));
+      __ addptr(rsp, wordSize);
+      break;
+    case ss_dtos:
+      __ movdbl(xmm0, Address(rsp, 0));
+      __ addptr(rsp, 2 * Interpreter::stackElementSize);
+      break;
+    case ss_c_rarg0:
+      __ pop(c_rarg0);
+      break;
+    case ss_c_rarg1:
+      __ pop(c_rarg1);
+      break;
+    case ss_c_rarg2:
+      __ pop(c_rarg2);
+      break;
+    case ss_c_rarg3:
+      __ pop(c_rarg3);
+      break;
+    case ss_c_rarg4:
+      __ pop(c_rarg4);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  __ mov(dst, rscratch1);
+  __ pop(rscratch1);
+
+  __ bind(done);
+}
+
+/*
+void ShenandoahBarrierSet::compile_resolve_oop_for_write(MacroAssembler* masm, Register dst) {
+
+  Label is_null;
+  __ testptr(dst, dst);
+  __ jcc(Assembler::zero, is_null);
+  compile_resolve_oop_for_write_not_null(masm, dst);
+  __ bind(is_null);
+
+}
+*/
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahBarrierSet.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,188 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
+
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shared/barrierSet.hpp"
+
+class ShenandoahBarrierSet: public BarrierSet {
+private:
+
+  static inline oop get_shenandoah_forwardee_helper(oop p) {
+    assert(UseShenandoahGC, "must only be called when Shenandoah is used.");
+    assert(Universe::heap()->is_in(p), "We shouldn't be calling this on objects not in the heap");
+    oop forwardee;
+#ifdef ASSERT
+    if (ShenandoahVerifyReadsToFromSpace) {
+      ShenandoahHeap* heap = (ShenandoahHeap *) Universe::heap();
+      ShenandoahHeapRegion* region = heap->heap_region_containing(p);
+      {
+        region->memProtectionOff();
+        forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
+        region->memProtectionOn();
+      }
+    } else {
+      forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
+    }
+#else
+    forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
+#endif
+    return forwardee;
+  }
+
+public:
+
+  ShenandoahBarrierSet();
+
+  void print_on(outputStream* st) const;
+
+  bool is_a(BarrierSet::Name bsn);
+
+  bool has_read_prim_array_opt();
+  bool has_read_prim_barrier();
+  bool has_read_ref_array_opt();
+  bool has_read_ref_barrier();
+  bool has_read_region_opt();
+  bool has_write_prim_array_opt();
+  bool has_write_prim_barrier();
+  bool has_write_ref_array_opt();
+  bool has_write_ref_barrier();
+  bool has_write_ref_pre_barrier();
+  bool has_write_region_opt();
+  bool is_aligned(HeapWord* hw);
+  void read_prim_array(MemRegion mr);
+  void read_prim_field(HeapWord* hw, size_t s);
+  bool read_prim_needs_barrier(HeapWord* hw, size_t s);
+  void read_ref_array(MemRegion mr);
+
+  void read_ref_field(void* v);
+
+  bool read_ref_needs_barrier(void* v);
+  void read_region(MemRegion mr);
+  void resize_covered_region(MemRegion mr);
+  void write_prim_array(MemRegion mr);
+  void write_prim_field(HeapWord* hw, size_t s , juint x, juint y);
+  bool write_prim_needs_barrier(HeapWord* hw, size_t s, juint x, juint y);
+  void write_ref_array_work(MemRegion mr);
+
+  template <class T> void
+  write_ref_array_pre_work(T* dst, int count);
+
+  void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
+
+  void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
+
+
+  template <class T> static void write_ref_field_pre_static(T* field, oop newVal);
+
+  // We export this to make it available in cases where the static
+  // type of the barrier set is known.  Note that it is non-virtual.
+  template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
+
+  // These are the more general virtual versions.
+  void write_ref_field_pre_work(oop* field, oop new_val);
+  void write_ref_field_pre_work(narrowOop* field, oop new_val);
+  void write_ref_field_pre_work(void* field, oop new_val);
+
+  void write_ref_field_work(void* v, oop o, bool release = false);
+  void write_region_work(MemRegion mr);
+
+  virtual oop resolve_oop(oop src);
+
+  template <class T>
+  static inline oop resolve_and_update_oop_static(T p, oop obj) {
+    oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
+    if (forw != obj) {
+      obj = forw;
+      oopDesc::encode_store_heap_oop_not_null(p, obj);
+    }
+    return obj;
+  }
+
+  static inline oop resolve_oop_static_not_null(oop p) {
+    assert(p != NULL, "Must be NULL checked");
+
+    oop result = get_shenandoah_forwardee_helper(p);
+
+    if (result != NULL) {
+#ifdef ASSERT
+      if (result != p) {
+        oop second_forwarding = get_shenandoah_forwardee_helper(result);
+
+        // We should never be forwarded more than once.
+        if (result != second_forwarding) {
+          ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+          tty->print("first reference "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) p));
+          sh->heap_region_containing(p)->print();
+          tty->print("first_forwarding "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) result));
+          sh->heap_region_containing(result)->print();
+          tty->print("final reference "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) second_forwarding));
+          sh->heap_region_containing(second_forwarding)->print();
+          assert(get_shenandoah_forwardee_helper(result) == result, "Only one fowarding per customer");
+        }
+      }
+#endif
+      if (! ShenandoahVerifyReadsToFromSpace) {
+	// is_oop() would trigger a SEGFAULT when we're checking from-space-access.
+	assert(ShenandoahHeap::heap()->is_in(result) && result->is_oop(), "resolved oop must be a valid oop in the heap");
+      }
+    }
+    return result;
+  }
+
+  static inline oop resolve_oop_static(oop p) {
+    if (((HeapWord*) p) != NULL) {
+      return resolve_oop_static_not_null(p);
+    } else {
+      return p;
+    }
+  }
+
+  static inline oop resolve_oop_static_no_check(oop p) {
+    if (((HeapWord*) p) != NULL) {
+      return get_shenandoah_forwardee_helper(p);
+    } else {
+      return p;
+    }
+  }
+
+
+  virtual oop maybe_resolve_oop(oop src);
+  oop resolve_and_maybe_copy_oopHelper(oop src);
+  oop resolve_and_maybe_copy_oop_work(oop src);
+  oop resolve_and_maybe_copy_oop_work2(oop src);
+  virtual oop resolve_and_maybe_copy_oop(oop src);
+
+  static oopDesc* resolve_and_maybe_copy_oop_c2(oopDesc* src);
+  static oopDesc* resolve_and_maybe_copy_oop_interp(oopDesc* src);
+  static oopDesc* resolve_and_maybe_copy_oop_c1(JavaThread* thread, oopDesc* src);
+
+private:
+  bool need_update_refs_barrier();
+
+#ifndef CC_INTERP
+public:
+  // TODO: The following should really live in an X86 specific subclass.
+  virtual void compile_resolve_oop(MacroAssembler* masm, Register dst);
+  virtual void compile_resolve_oop_not_null(MacroAssembler* masm, Register dst);
+  void compile_resolve_oop_for_write(MacroAssembler* masm, Register dst, bool explicit_null_check, int stack_adjust, int num_save_state, ...);
+
+private:
+  void compile_resolve_oop_runtime(MacroAssembler* masm, Register dst);
+
+#endif
+};
+
+class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet {
+
+   oop resolve_oop(oop src) {
+     return src;
+   }
+   oop maybe_resolve_oop(oop src) {
+     return src;
+   }
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,761 @@
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+
+class ShenandoahHeuristics : public CHeapObj<mtGC> {
+
+  NumberSeq _allocation_rate_bytes;
+  NumberSeq _reclamation_rate_bytes;
+
+  size_t _bytes_allocated_since_CM;
+  size_t _bytes_reclaimed_this_cycle;
+
+protected:
+  size_t _bytes_allocated_start_CM;
+  size_t _bytes_allocated_during_CM;
+
+public:
+
+  ShenandoahHeuristics();
+
+  void record_bytes_allocated(size_t bytes);
+  void record_bytes_reclaimed(size_t bytes);
+  void record_bytes_start_CM(size_t bytes);
+  void record_bytes_end_CM(size_t bytes);
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
+  virtual bool update_refs_early();
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set, 
+                                               ShenandoahHeapRegionSet* collection_set, 
+                                               ShenandoahHeapRegionSet* free_set) =0;
+  void print_tracing_info();
+};
+
+ShenandoahHeuristics::ShenandoahHeuristics() :
+  _bytes_allocated_since_CM(0),
+  _bytes_reclaimed_this_cycle(0),
+  _bytes_allocated_start_CM(0),
+  _bytes_allocated_during_CM(0)
+{
+  if (PrintGCDetails)
+    tty->print_cr("initializing heuristics");
+}
+
+void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
+  _timing_data[phase]._start = os::elapsedTime();
+
+  if (PrintGCTimeStamps) {
+    if (phase == init_mark)
+      _tracer->report_gc_start(GCCause::_shenandoah_init_mark, _conc_timer->gc_start());
+    else if (phase == full_gc) 
+      _tracer->report_gc_start(GCCause::_last_ditch_collection, _stw_timer->gc_start());
+
+    gclog_or_tty->gclog_stamp(_tracer->gc_id());
+    gclog_or_tty->print("[GC %s start", _phase_names[phase]);
+    ShenandoahHeap* heap = (ShenandoahHeap*) Universe::heap();
+
+    gclog_or_tty->print(" total = " SIZE_FORMAT " K, used = " SIZE_FORMAT " K free = " SIZE_FORMAT " K", heap->capacity()/ K, heap->used() /K, 
+			((heap->capacity() - heap->used())/K) );
+
+    if (heap->calculateUsed() != heap->used()) {
+      gclog_or_tty->print("calc used = " SIZE_FORMAT " K heap used = " SIZE_FORMAT " K",
+			    heap->calculateUsed() / K, heap->used() / K);
+    }
+    //    assert(heap->calculateUsed() == heap->used(), "Just checking");
+    gclog_or_tty->print_cr("]");
+  }
+}
+
+void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
+  double end = os::elapsedTime();
+  double elapsed = end - _timing_data[phase]._start;
+  _timing_data[phase]._ms.add(elapsed * 1000);
+
+  if (ShenandoahGCVerbose && PrintGCDetails) {
+    tty->print_cr("PolicyPrint: %s "SIZE_FORMAT" took %lf ms", _phase_names[phase],
+                  _timing_data[phase]._count++, elapsed * 1000);
+  }
+  if (PrintGCTimeStamps) {
+    ShenandoahHeap* heap = (ShenandoahHeap*) Universe::heap();
+    gclog_or_tty->gclog_stamp(_tracer->gc_id());
+
+    gclog_or_tty->print("[GC %s end, %lf secs", _phase_names[phase], elapsed );
+    gclog_or_tty->print(" total = " SIZE_FORMAT " K, used = " SIZE_FORMAT " K free = " SIZE_FORMAT " K", heap->capacity()/ K, heap->used() /K,
+			((heap->capacity() - heap->used())/K) );
+
+    if (heap->calculateUsed() != heap->used()) {
+      gclog_or_tty->print("calc used = " SIZE_FORMAT " K heap used = " SIZE_FORMAT " K",
+			    heap->calculateUsed() / K, heap->used() / K);
+    }
+    //    assert(heap->calculateUsed() == heap->used(), "Stashed heap used must be equal to calculated heap used");
+    gclog_or_tty->print_cr("]");
+
+    if (phase == recycle_regions) {
+      _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
+    } else if (phase == full_gc) {
+      _tracer->report_gc_end(_stw_timer->gc_end(), _stw_timer->time_partitions());
+    } else if (phase == conc_mark || phase == conc_evac || phase == conc_uprefs || phase == prepare_evac) {
+      if (_conc_gc_aborted) {
+        _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
+        clear_conc_gc_aborted();
+      }
+    } else if (phase == final_evac) {
+      ShenandoahHeap* heap = ShenandoahHeap::heap();
+      this->record_bytes_end_CM(heap->_bytesAllocSinceCM);
+    }
+  }
+}
+
+void ShenandoahCollectorPolicy::report_concgc_cancelled() {
+  if (PrintGCTimeStamps)  {
+    gclog_or_tty->print("Concurrent GC Cancelled\n");
+    set_conc_gc_aborted();
+    //    _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
+  }
+}
+
+bool ShenandoahHeuristics::update_refs_early() {
+  return ShenandoahUpdateRefsEarly;
+}
+
+void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
+  _bytes_allocated_since_CM = bytes;
+  _bytes_allocated_start_CM = bytes;
+  _allocation_rate_bytes.add(bytes);
+}
+
+void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
+  _bytes_reclaimed_this_cycle = bytes;
+  _reclamation_rate_bytes.add(bytes);
+}
+
+void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
+  _bytes_allocated_start_CM = bytes;
+}
+
+void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
+  _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
+                                                                   : bytes;
+}
+
+class AggressiveHeuristics : public ShenandoahHeuristics {
+public:
+  AggressiveHeuristics() : ShenandoahHeuristics(){
+  if (PrintGCDetails)
+    tty->print_cr("Initializing aggressive heuristics");
+  }
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+    return true;
+  }
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set) {
+    region_set->set_garbage_threshold(8);
+    region_set->choose_collection_and_free_sets(collection_set, free_set);
+  }
+};
+
+class HalfwayHeuristics : public ShenandoahHeuristics {
+public:
+  HalfwayHeuristics() : ShenandoahHeuristics() {
+  if (PrintGCDetails)
+    tty->print_cr("Initializing halfway heuristics");
+  }
+
+  bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t threshold_bytes_allocated = heap->capacity() / 4;
+    if (used * 2 > capacity && heap->_bytesAllocSinceCM > threshold_bytes_allocated)
+      return true;
+    else
+      return false;
+  }
+  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                       ShenandoahHeapRegionSet* collection_set,
+                                       ShenandoahHeapRegionSet* free_set) {
+    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2);
+    region_set->choose_collection_and_free_sets(collection_set, free_set);
+  }
+};
+
+// GC as little as possible
+class LazyHeuristics : public ShenandoahHeuristics {
+public:
+  LazyHeuristics() : ShenandoahHeuristics() {
+    if (PrintGCDetails) {
+      tty->print_cr("Initializing lazy heuristics");
+    }
+  }
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+    size_t targetStartMarking = (capacity / 5) * 4;
+    if (used > targetStartMarking) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set) {
+    region_set->choose_collection_and_free_sets(collection_set, free_set);
+  }
+};
+
+// These are the heuristics in place when we made this class
+class StatusQuoHeuristics : public ShenandoahHeuristics {
+public:
+  StatusQuoHeuristics() : ShenandoahHeuristics() {
+    if (PrintGCDetails) {
+      tty->print_cr("Initializing status quo heuristics");
+    }
+  }
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+    size_t targetStartMarking = capacity / 16;
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t threshold_bytes_allocated = heap->capacity() / 4;
+
+    if (used > targetStartMarking
+        && heap->_bytesAllocSinceCM > threshold_bytes_allocated) {
+      // Need to check that an appropriate number of regions have
+      // been allocated since last concurrent mark too.
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set) {
+    region_set->choose_collection_and_free_sets(collection_set, free_set);
+  }
+};
+
+static uintx clamp(uintx value, uintx min, uintx max) {
+  value = MAX2(value, min);
+  value = MIN2(value, max);
+  return value;
+}
+
+static double get_percent(uintx value) {
+  double _percent = static_cast<double>(clamp(value, 0, 100));
+  return _percent / 100.;
+}
+
+class DynamicHeuristics : public ShenandoahHeuristics {
+private:
+  double _free_threshold_factor;
+  double _garbage_threshold_factor;
+  double _allocation_threshold_factor;
+
+  uintx _free_threshold;
+  uintx _garbage_threshold;
+  uintx _allocation_threshold;
+
+public:
+  DynamicHeuristics() : ShenandoahHeuristics() {
+    if (PrintGCDetails) {
+      tty->print_cr("Initializing dynamic heuristics");
+    }
+
+    _free_threshold = 0;
+    _garbage_threshold = 0;
+    _allocation_threshold = 0;
+
+    _free_threshold_factor = 0.;
+    _garbage_threshold_factor = 0.;
+    _allocation_threshold_factor = 0.;
+  }
+
+  virtual ~DynamicHeuristics() {}
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+
+    bool shouldStartConcurrentMark = false;
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t available = heap->free_regions()->available();
+    uintx factor = heap->need_update_refs() ? ShenandoahFreeThreshold : ShenandoahInitialFreeThreshold;
+    size_t targetStartMarking = (capacity * factor) / 100;
+
+    size_t threshold_bytes_allocated = heap->capacity() * _allocation_threshold_factor;
+    if (available < targetStartMarking &&
+        heap->_bytesAllocSinceCM > threshold_bytes_allocated)
+    {
+      // Need to check that an appropriate number of regions have
+      // been allocated since last concurrent mark too.
+      shouldStartConcurrentMark = true;
+    }
+
+    if (shouldStartConcurrentMark && ShenandoahTracePhases) {
+      tty->print_cr("Start GC at available: "SIZE_FORMAT", factor: "UINTX_FORMAT", update-refs: %s", available, factor, BOOL_TO_STR(heap->need_update_refs()));
+    }
+    return shouldStartConcurrentMark;
+  }
+
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set)
+  {
+    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes * _garbage_threshold_factor);
+    region_set->choose_collection_and_free_sets(collection_set, free_set);
+  }
+
+  void set_free_threshold(uintx free_threshold) {
+    this->_free_threshold_factor = get_percent(free_threshold);
+    this->_free_threshold = free_threshold;
+  }
+
+  void set_garbage_threshold(uintx garbage_threshold) {
+    this->_garbage_threshold_factor = get_percent(garbage_threshold);
+    this->_garbage_threshold = garbage_threshold;
+  }
+
+  void set_allocation_threshold(uintx allocationThreshold) {
+    this->_allocation_threshold_factor = get_percent(allocationThreshold);
+    this->_allocation_threshold = allocationThreshold;
+  }
+
+  uintx get_allocation_threshold() {
+    return this->_allocation_threshold;
+  }
+
+  uintx get_garbage_threshold() {
+    return this->_garbage_threshold;
+  }
+
+  uintx get_free_threshold() {
+    return this->_free_threshold;
+  }
+};
+
+
+class AdaptiveHeuristics : public ShenandoahHeuristics {
+private:
+  size_t _max_live_data;
+  double _used_threshold_factor;
+  double _garbage_threshold_factor;
+  double _allocation_threshold_factor;
+
+  uintx _used_threshold;
+  uintx _garbage_threshold;
+  uintx _allocation_threshold;
+
+public:
+  AdaptiveHeuristics() : ShenandoahHeuristics() {
+    if (PrintGCDetails) {
+      tty->print_cr("Initializing dynamic heuristics");
+    }
+
+    _max_live_data = 0;
+
+    _used_threshold = 0;
+    _garbage_threshold = 0;
+    _allocation_threshold = 0;
+
+    _used_threshold_factor = 0.;
+    _garbage_threshold_factor = 0.1;
+    _allocation_threshold_factor = 0.;
+  }
+
+  virtual ~AdaptiveHeuristics() {}
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
+
+    ShenandoahHeap* _heap = ShenandoahHeap::heap();
+    bool shouldStartConcurrentMark = false;
+
+    size_t max_live_data = _max_live_data;
+    if (max_live_data == 0) {
+      max_live_data = capacity * 0.2; // Very generous initial value.
+    } else {
+      max_live_data *= 1.3; // Add some wiggle room.
+    }
+    size_t max_cycle_allocated = _heap->_max_allocated_gc;
+    if (max_cycle_allocated == 0) {
+      max_cycle_allocated = capacity * 0.3; // Very generous.
+    } else {
+      max_cycle_allocated *= 1.3; // Add 20% wiggle room. Should be enough.
+    }
+    size_t threshold = _heap->capacity() - max_cycle_allocated - max_live_data;
+    if (used > threshold)
+    {
+      shouldStartConcurrentMark = true;
+    }
+
+    return shouldStartConcurrentMark;
+  }
+
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set)
+  {
+    size_t bytes_alloc = ShenandoahHeap::heap()->_bytesAllocSinceCM;
+    size_t min_garbage =  bytes_alloc/* * 1.1*/;
+    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes * _garbage_threshold_factor);
+    region_set->choose_collection_and_free_sets_min_garbage(collection_set, free_set, min_garbage);
+    /*
+    tty->print_cr("garbage to be collected: "SIZE_FORMAT, collection_set->garbage());
+    tty->print_cr("objects to be evacuated: "SIZE_FORMAT, collection_set->live_data());
+    */
+    _max_live_data = MAX2(_max_live_data, collection_set->live_data());
+  }
+
+  void set_used_threshold(uintx used_threshold) {
+    this->_used_threshold_factor = get_percent(used_threshold);
+    this->_used_threshold = used_threshold;
+  }
+
+  void set_garbage_threshold(uintx garbage_threshold) {
+    this->_garbage_threshold_factor = get_percent(garbage_threshold);
+    this->_garbage_threshold = garbage_threshold;
+  }
+
+  void set_allocation_threshold(uintx allocationThreshold) {
+    this->_allocation_threshold_factor = get_percent(allocationThreshold);
+    this->_allocation_threshold = allocationThreshold;
+  }
+
+  uintx get_allocation_threshold() {
+    return this->_allocation_threshold;
+  }
+
+  uintx get_garbage_threshold() {
+    return this->_garbage_threshold;
+  }
+
+  uintx get_used_threshold() {
+    return this->_used_threshold;
+  }
+};
+
+class NewAdaptiveHeuristics : public ShenandoahHeuristics {
+private:
+  size_t _max_live_data;
+  double _target_heap_occupancy_factor;
+  double _allocation_threshold_factor;
+  size_t _last_bytesAllocSinceCM;
+
+  uintx _target_heap_occupancy;
+  uintx _allocation_threshold;
+
+public:
+  NewAdaptiveHeuristics() : ShenandoahHeuristics()
+  {
+    if (PrintGCDetails) {
+      tty->print_cr("Initializing newadaptive heuristics");
+    }
+    _max_live_data = 0;
+    _allocation_threshold = 0;
+    _target_heap_occupancy_factor = 0.;
+    _allocation_threshold_factor = 0.;
+    _last_bytesAllocSinceCM = 0;
+  }
+
+  virtual ~NewAdaptiveHeuristics() {}
+
+  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const
+  {
+      if (this->_bytes_allocated_during_CM > 0) {
+          // Not the first concurrent mark.
+          // _bytes_allocated_during_CM
+          ShenandoahHeap *heap = ShenandoahHeap::heap();
+          size_t threshold_bytes_allocated = heap->capacity() / 4;
+          size_t targetStartMarking = (size_t) capacity * this->_target_heap_occupancy_factor;
+          return (used > targetStartMarking) && (this->_bytes_allocated_during_CM > threshold_bytes_allocated);
+      } else {
+          // First concurrent mark.
+          size_t targetStartMarking = capacity / 2;
+          ShenandoahHeap *heap = ShenandoahHeap::heap();
+          size_t threshold_bytes_allocated = heap->capacity() / 4;
+
+          // Need to check that an appropriate number of regions have
+          // been allocated since last concurrent mark too.
+          return (used > targetStartMarking) && (heap->_bytesAllocSinceCM > threshold_bytes_allocated);
+      }
+  }
+
+  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
+                                               ShenandoahHeapRegionSet* collection_set,
+                                               ShenandoahHeapRegionSet* free_set)
+  {
+    ShenandoahHeap *_heap = ShenandoahHeap::heap();
+    this->_last_bytesAllocSinceCM = ShenandoahHeap::heap()->_bytesAllocSinceCM;
+    if (this->_last_bytesAllocSinceCM > 0) {
+      size_t min_garbage = this->_last_bytesAllocSinceCM;
+      region_set->choose_collection_and_free_sets_min_garbage(collection_set, free_set, min_garbage);
+    } else {
+      region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2);
+      region_set->choose_collection_and_free_sets(collection_set, free_set);
+    }
+    this->_max_live_data = MAX2(this->_max_live_data, collection_set->live_data());
+  }
+
+  void set_target_heap_occupancy(uintx target_heap_occupancy) {
+    this->_target_heap_occupancy_factor = get_percent(target_heap_occupancy);
+    this->_target_heap_occupancy = target_heap_occupancy;
+  }
+
+  void set_allocation_threshold(uintx allocationThreshold) {
+    this->_allocation_threshold_factor = get_percent(allocationThreshold);
+    this->_allocation_threshold = allocationThreshold;
+  }
+
+  uintx get_allocation_threshold() {
+    return this->_allocation_threshold;
+  }
+
+  uintx get_target_heap_occupancy() {
+    return this->_target_heap_occupancy;
+  }
+};
+
+
+static DynamicHeuristics *configureDynamicHeuristics() {
+  DynamicHeuristics *heuristics = new DynamicHeuristics();
+
+  heuristics->set_garbage_threshold(ShenandoahGarbageThreshold);
+  heuristics->set_allocation_threshold(ShenandoahAllocationThreshold);
+  heuristics->set_free_threshold(ShenandoahFreeThreshold);
+  if (ShenandoahLogConfig) {
+    tty->print_cr("Shenandoah dynamic heuristics thresholds: allocation "SIZE_FORMAT", used "SIZE_FORMAT", garbage "SIZE_FORMAT,
+                  heuristics->get_allocation_threshold(),
+                  heuristics->get_free_threshold(),
+                  heuristics->get_garbage_threshold());
+  }
+  return heuristics;
+}
+
+
+static NewAdaptiveHeuristics* configureNewAdaptiveHeuristics() {
+  NewAdaptiveHeuristics* heuristics = new NewAdaptiveHeuristics();
+
+  heuristics->set_target_heap_occupancy(ShenandoahTargetHeapOccupancy);
+  if (ShenandoahLogConfig) {
+    tty->print_cr( "Shenandoah newadaptive heuristics target heap occupancy: "SIZE_FORMAT,
+                   heuristics->get_target_heap_occupancy() );
+  }
+  return heuristics;
+}
+
+
+ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() {
+
+  ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), initial_heap_byte_size());
+
+  initialize_all();
+
+  _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
+  _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
+  _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
+  _user_requested_gcs = 0;
+  _allocation_failure_gcs = 0;
+  _conc_gc_aborted = false;
+
+  _phase_names[init_mark] = "InitMark";
+  _phase_names[final_mark] = "FinalMark";
+  _phase_names[rescan_roots] = "RescanRoots";
+  _phase_names[drain_satb] = "DrainSATB";
+  _phase_names[drain_queues] = "DrainQueues";
+  _phase_names[weakrefs] = "WeakRefs";
+  _phase_names[prepare_evac] = "PrepareEvac";
+  _phase_names[init_evac] = "InitEvac";
+  _phase_names[final_evac] = "FinalEvacuation";
+  _phase_names[final_uprefs] = "FinalUpdateRefs";
+
+  _phase_names[update_roots] = "UpdateRoots";
+  _phase_names[recycle_regions] = "RecycleRegions";
+  _phase_names[reset_bitmaps] = "ResetBitmaps";
+  _phase_names[resize_tlabs] = "ResizeTLABs";
+
+  _phase_names[full_gc] = "FullGC";
+  _phase_names[conc_mark] = "ConcurrentMark";
+  _phase_names[conc_evac] = "ConcurrentEvacuation";
+  _phase_names[conc_uprefs] = "ConcurrentUpdateReferences";
+
+  if (ShenandoahGCHeuristics != NULL) {
+    if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: aggressive");
+      }
+      _heuristics = new AggressiveHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "statusquo") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: statusquo");
+      }
+      _heuristics = new StatusQuoHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "halfway") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: halfway");
+      }
+      _heuristics = new HalfwayHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "lazy") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: lazy");
+      }
+      _heuristics = new LazyHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: dynamic");
+      }
+      _heuristics = configureDynamicHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: adaptive");
+      }
+      _heuristics = new AdaptiveHeuristics();
+    } else if (strcmp(ShenandoahGCHeuristics, "newadaptive") == 0) {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: newadaptive");
+      }
+      _heuristics = configureNewAdaptiveHeuristics();
+    } else {
+      fatal("Unknown -XX:ShenandoahGCHeuristics option");
+    }
+  } else {
+      if (ShenandoahLogConfig) {
+        tty->print_cr("Shenandoah heuristics: statusquo (default)");
+      }
+    _heuristics = new StatusQuoHeuristics();
+  }
+
+}
+
+ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
+  return this;
+}
+
+ShenandoahCollectorPolicy::Name ShenandoahCollectorPolicy::kind() {
+  return CollectorPolicy::ShenandoahCollectorPolicyKind;
+}
+
+BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
+  return BarrierSet::ShenandoahBarrierSet;
+}
+
+HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
+                                                       bool is_tlab,
+                                                       bool* gc_overhead_limit_was_exceeded) {
+  guarantee(false, "Not using this policy feature yet.");
+  return NULL;
+}
+
+HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
+  guarantee(false, "Not using this policy feature yet.");
+  return NULL;
+}
+
+void ShenandoahCollectorPolicy::initialize_alignments() {
+  
+  // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
+  _space_alignment = ShenandoahHeapRegion::RegionSizeBytes;
+  _heap_alignment = ShenandoahHeapRegion::RegionSizeBytes;
+}
+
+void ShenandoahCollectorPolicy::post_heap_initialize() {
+  // Nothing to do here (yet).
+}
+
+void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
+  _heuristics->record_bytes_allocated(bytes);
+}
+
+void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
+  _heuristics->record_bytes_start_CM(bytes);
+}
+
+void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
+  _heuristics->record_bytes_end_CM(bytes);
+}
+
+void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
+  _heuristics->record_bytes_reclaimed(bytes);
+}
+
+void ShenandoahCollectorPolicy::record_user_requested_gc() {
+  _user_requested_gcs++;
+}
+
+void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
+  _allocation_failure_gcs++;
+}
+
+bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
+							     size_t capacity) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  return _heuristics->should_start_concurrent_mark(used, capacity);
+}
+
+bool ShenandoahCollectorPolicy::update_refs_early() {
+  return _heuristics->update_refs_early();
+}
+
+void ShenandoahCollectorPolicy::choose_collection_and_free_sets(
+			     ShenandoahHeapRegionSet* region_set, 
+			     ShenandoahHeapRegionSet* collection_set,
+                             ShenandoahHeapRegionSet* free_set) {
+  _heuristics->choose_collection_and_free_sets(region_set, collection_set, free_set);
+}
+
+void ShenandoahCollectorPolicy::print_tracing_info() {
+  print_summary_sd("Initial Mark Pauses", 0, &(_timing_data[init_mark]._ms));
+  print_summary_sd("Final Mark Pauses", 0, &(_timing_data[final_mark]._ms));
+
+  print_summary_sd("Rescan Roots", 2, &(_timing_data[rescan_roots]._ms));
+  print_summary_sd("Drain SATB", 2, &(_timing_data[drain_satb]._ms));
+  print_summary_sd("Drain Queues", 2, &(_timing_data[drain_queues]._ms));
+  if (ShenandoahProcessReferences) {
+    print_summary_sd("Weak References", 2, &(_timing_data[weakrefs]._ms));
+  }
+  print_summary_sd("Prepare Evacuation", 2, &(_timing_data[prepare_evac]._ms));
+  print_summary_sd("Initial Evacuation", 2, &(_timing_data[init_evac]._ms));
+
+  print_summary_sd("Final Evacuation Pauses", 0, &(_timing_data[final_evac]._ms));
+  print_summary_sd("Final Update Refs Pauses", 0, &(_timing_data[final_uprefs]._ms));
+  print_summary_sd("Update roots", 2, &(_timing_data[update_roots]._ms));
+  print_summary_sd("Recycle regions", 2, &(_timing_data[recycle_regions]._ms));
+  print_summary_sd("Reset bitmaps", 2, &(_timing_data[reset_bitmaps]._ms));
+  print_summary_sd("Resize TLABs", 2, &(_timing_data[resize_tlabs]._ms));
+  gclog_or_tty->print_cr(" ");
+  print_summary_sd("Concurrent Marking Times", 0, &(_timing_data[conc_mark]._ms));
+  print_summary_sd("Concurrent Evacuation Times", 0, &(_timing_data[conc_evac]._ms));
+  print_summary_sd("Concurrent Update References Times", 0, &(_timing_data[conc_uprefs]._ms));
+  print_summary_sd("Full GC Times", 0, &(_timing_data[full_gc]._ms));
+
+  gclog_or_tty->print_cr("User requested GCs: "SIZE_FORMAT, _user_requested_gcs);
+  gclog_or_tty->print_cr("Allocation failure GCs: "SIZE_FORMAT, _allocation_failure_gcs);
+
+  gclog_or_tty->print_cr(" ");
+  double total_sum = _timing_data[init_mark]._ms.sum() +
+    _timing_data[final_mark]._ms.sum() +
+    _timing_data[final_evac]._ms.sum() +
+    _timing_data[final_uprefs]._ms.sum();
+  double total_avg = (_timing_data[init_mark]._ms.avg() +
+                      _timing_data[final_mark]._ms.avg() +
+                      _timing_data[final_evac]._ms.avg() +
+                      _timing_data[final_uprefs]._ms.avg()) / 4.0;
+  double total_max = MAX2(
+                          MAX2(
+                               MAX2(_timing_data[init_mark]._ms.maximum(),
+                                    _timing_data[final_mark]._ms.maximum()),
+                               _timing_data[final_evac]._ms.maximum()),
+                          _timing_data[final_uprefs]._ms.maximum());
+
+  gclog_or_tty->print_cr("%-27s = %8.2lf s, avg = %8.2lf ms, max = %8.2lf ms",
+                         "Total", total_sum / 1000.0, total_avg, total_max);
+
+}
+
+void ShenandoahCollectorPolicy::print_summary_sd(const char* str, uint indent, const NumberSeq* seq)  {
+  double sum = seq->sum();
+  for (uint i = 0; i < indent; i++) gclog_or_tty->print(" ");
+  gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
+                         str, sum / 1000.0, seq->avg());
+  for (uint i = 0; i < indent; i++) gclog_or_tty->print(" ");
+  gclog_or_tty->print_cr("%s = "INT32_FORMAT_W(5)", std dev = %8.2lf ms, max = %8.2lf ms)",
+                         "(num", seq->num(), seq->sd(), seq->maximum());
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,119 @@
+/*
+  Copyright 2014 Red Hat, Inc. and/or its affiliates.
+*/
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
+
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "runtime/arguments.hpp"
+#include "utilities/numberSeq.hpp"
+
+
+class ShenandoahHeap;
+class ShenandoahHeuristics;
+
+class ShenandoahCollectorPolicy: public CollectorPolicy {
+
+public:
+  enum TimingPhase {
+    init_mark,
+    final_mark,
+    rescan_roots,
+    drain_satb,
+    drain_queues,
+    weakrefs,
+    prepare_evac,
+    init_evac,
+
+    final_evac,
+    final_uprefs,
+    update_roots,
+    recycle_regions,
+    reset_bitmaps,
+    resize_tlabs,
+    full_gc,
+    conc_mark,
+    conc_evac,
+    conc_uprefs,
+
+    _num_phases
+  };
+
+private:
+  struct TimingData {
+    NumberSeq _ms;
+    double _start;
+    size_t _count;
+  };
+
+private:
+  TimingData _timing_data[_num_phases];
+  const char* _phase_names[_num_phases];
+
+  size_t _user_requested_gcs;
+  size_t _allocation_failure_gcs;
+
+  ShenandoahHeap* _pgc;
+  ShenandoahHeuristics* _heuristics;
+  ShenandoahTracer* _tracer;
+  STWGCTimer* _stw_timer;
+  ConcurrentGCTimer* _conc_timer;
+  
+  bool _conc_gc_aborted;
+
+public:
+  ShenandoahCollectorPolicy();
+
+  virtual ShenandoahCollectorPolicy* as_pgc_policy();
+
+  virtual ShenandoahCollectorPolicy::Name kind();
+
+  BarrierSet::Name barrier_set_name();
+
+  HeapWord* mem_allocate_work(size_t size,
+			      bool is_tlab,
+			      bool* gc_overhead_limit_was_exceeded);
+
+  HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
+
+  void initialize_alignments();
+
+  void post_heap_initialize();
+
+  void record_phase_start(TimingPhase phase);
+  void record_phase_end(TimingPhase phase);
+  void report_concgc_cancelled();
+
+  void record_user_requested_gc();
+  void record_allocation_failure_gc();
+
+  void record_bytes_allocated(size_t bytes);
+  void record_bytes_reclaimed(size_t bytes);
+  void record_bytes_start_CM(size_t bytes);
+  void record_bytes_end_CM(size_t bytes);
+  bool should_start_concurrent_mark(size_t used, size_t capacity);
+  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set, 
+                                       ShenandoahHeapRegionSet* collection_set,
+                                       ShenandoahHeapRegionSet* free_set);
+
+  bool update_refs_early();
+
+  void print_tracing_info();
+
+  GCTimer* conc_timer(){return _conc_timer;}
+  GCTimer* stw_timer() {return _stw_timer;}
+  ShenandoahTracer* tracer() {return _tracer;}
+
+  void set_conc_gc_aborted() { _conc_gc_aborted = true;}
+  void clear_conc_gc_aborted() {_conc_gc_aborted = false;}
+
+private:
+  void print_summary_sd(const char* str, uint indent, const NumberSeq* seq);
+};
+
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentMark.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,742 @@
+/*
+  Copyright 2014 Red Hat, Inc. and/or its affiliates.
+*/
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "classfile/stringTable.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "code/codeCache.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "gc/shared/taskqueue.inline.hpp"
+
+// Mark the object and add it to the queue to be scanned
+ShenandoahMarkObjsClosure::ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs) :
+  _heap((ShenandoahHeap*)(Universe::heap())),
+  _mark_refs(ShenandoahMarkRefsClosure(q, update_refs)),
+  _live_data(NEW_C_HEAP_ARRAY(size_t, _heap->max_regions(), mtGC))
+{
+  Copy::zero_to_bytes(_live_data, _heap->max_regions() * sizeof(size_t));
+}
+
+ShenandoahMarkObjsClosure::~ShenandoahMarkObjsClosure() {
+  // Merge liveness data back into actual regions.
+
+  // We need to lock the heap here, to avoid race with growing of heap.
+  MutexLockerEx ml(ShenandoahHeap_lock, true);
+  ShenandoahHeapRegion** regions = _heap->heap_regions();
+  for (uint i = 0; i < _heap->num_regions(); i++) {
+    regions[i]->increase_live_data(_live_data[i]);
+  }
+  FREE_C_HEAP_ARRAY(size_t, _live_data);
+}
+
+ShenandoahMarkRefsClosure::ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs) :
+  MetadataAwareOopClosure(((ShenandoahHeap *) Universe::heap())->ref_processor()),
+  _queue(q),
+  _heap((ShenandoahHeap*) Universe::heap()),
+  _scm(_heap->concurrentMark()),
+  _update_refs(update_refs)
+{
+}
+
+void ShenandoahMarkRefsClosure::do_oop(narrowOop* p) {
+  Unimplemented();
+}
+
+
+// Walks over all the objects in the generation updating any
+// references to from space.
+
+class CLDMarkAliveClosure : public CLDClosure {
+private:
+  CLDClosure* _cl;
+public:
+  CLDMarkAliveClosure(CLDClosure* cl) : _cl(cl) {
+  }
+  void do_cld(ClassLoaderData* cld) {
+    ShenandoahIsAliveClosure is_alive;
+    if (cld->is_alive(&is_alive)) {
+      _cl->do_cld(cld);
+    }
+  }
+};
+
+class ShenandoahMarkRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootProcessor* _rp;
+  bool _update_refs;
+public:
+  ShenandoahMarkRootsTask(ShenandoahRootProcessor* rp, bool update_refs) :
+    AbstractGangTask("Shenandoah update roots task"), _update_refs(update_refs),
+    _rp(rp) {
+  }
+
+  void work(uint worker_id) {
+    // tty->print_cr("start mark roots worker: "INT32_FORMAT, worker_id);
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    SCMObjToScanQueue* q = heap->concurrentMark()->get_queue(worker_id);
+    ShenandoahMarkRefsClosure cl(q, _update_refs);
+
+    CodeBlobToOopClosure blobsCl(&cl, true);
+    CLDToOopClosure cldCl(&cl);
+
+    ResourceMark m;
+    if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
+      _rp->process_strong_roots(&cl, &cldCl, &blobsCl);
+    } else {
+      _rp->process_all_roots(&cl, &cldCl, &blobsCl);
+    }
+    // tty->print_cr("finish mark roots worker: "INT32_FORMAT, worker_id);
+  }
+};
+
+class SCMConcurrentMarkingTask : public AbstractGangTask {
+private:
+  ShenandoahConcurrentMark* _cm;
+  ParallelTaskTerminator* _terminator;
+  int _seed;
+  bool _update_refs;
+
+public:
+  SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
+    AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _seed(17) {
+  }
+
+      
+  void work(uint worker_id) {
+
+    SCMObjToScanQueue* q = _cm->get_queue(worker_id);
+    ShenandoahMarkObjsClosure cl(q, _update_refs);
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    while (true) {
+      if (heap->cancelled_concgc() ||
+	  (!_cm->try_queue(q, &cl) &&
+	   !_cm->try_draining_an_satb_buffer(worker_id) &&
+	   !_cm->try_to_steal(worker_id, &cl, &_seed))
+	  ) {
+	if (_terminator->offer_termination()) break;
+      }
+    }
+    if (ShenandoahTracePhases && heap->cancelled_concgc()) {
+      tty->print_cr("Cancelled concurrent marking");
+    }
+  }
+};
+
+void ShenandoahConcurrentMark::prepare_unmarked_root_objs() {
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  bool update_refs = heap->need_update_refs();
+
+  if (update_refs) {
+    COMPILER2_PRESENT(DerivedPointerTable::clear());
+  }
+
+  prepare_unmarked_root_objs_no_derived_ptrs(update_refs);
+
+  if (update_refs) {
+    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+  }
+
+}
+
+void ShenandoahConcurrentMark::prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs) {
+  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (ShenandoahParallelRootScan) {
+
+    ClassLoaderDataGraph::clear_claimed_marks();
+    heap->conc_workers()->set_active_workers(_max_conc_worker_id);
+    ShenandoahRootProcessor root_proc(heap, _max_conc_worker_id);
+    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
+    ShenandoahMarkRootsTask mark_roots(&root_proc, update_refs);
+    heap->conc_workers()->run_task(&mark_roots);
+
+    // Mark through any class loaders that have been found alive.
+    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
+    CLDToOopClosure cldCl(&cl);
+    CLDMarkAliveClosure cld_keep_alive(&cldCl);
+    ClassLoaderDataGraph::roots_cld_do(NULL, &cld_keep_alive);
+
+  } else {
+    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
+    heap->roots_iterate(&cl);
+  }
+
+  if (!(ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark)) {
+    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
+    heap->weak_roots_iterate(&cl);
+  }
+
+  // tty->print_cr("all root marker threads done");
+}
+
+
+void ShenandoahConcurrentMark::initialize() {
+  _max_conc_worker_id = MAX2((uint) ConcGCThreads, 1U);
+  _task_queues = new SCMObjToScanQueueSet((int) _max_conc_worker_id);
+
+  for (uint i = 0; i < _max_conc_worker_id; ++i) {
+    SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
+    task_queue->initialize();
+    _task_queues->register_queue(i, task_queue);
+  }
+  JavaThread::satb_mark_queue_set().set_buffer_size(1014 /* G1SATBBufferSize */);
+}
+
+void ShenandoahConcurrentMark::mark_from_roots() {
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("STOPPING THE WORLD: before marking");
+    tty->print_cr("Starting markFromRoots");
+  }
+
+  ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
+
+  bool update_refs = sh->need_update_refs();
+
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
+  ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
+
+  if (ShenandoahProcessReferences) {
+    ReferenceProcessor* rp = sh->ref_processor();
+    // enable ("weak") refs discovery
+    rp->enable_discovery(true /*verify_no_refs*/);
+    rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
+  }
+  
+  SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
+  sh->conc_workers()->set_active_workers(_max_conc_worker_id);
+  sh->conc_workers()->run_task(&markingTask);
+
+  if (ShenandoahGCVerbose) {
+    tty->print("total workers = %u finished workers = %u\n", 
+	       sh->conc_workers()->started_workers(), 
+	       sh->conc_workers()->finished_workers());
+    TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
+    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
+  }
+
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("Finishing markFromRoots");
+    tty->print_cr("RESUMING THE WORLD: after marking");
+  }
+
+  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
+}
+
+class FinishDrainSATBBuffersTask : public AbstractGangTask {
+private:
+  ShenandoahConcurrentMark* _cm;
+  ParallelTaskTerminator* _terminator;
+public:
+  FinishDrainSATBBuffersTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
+    AbstractGangTask("Finish draining SATB buffers"), _cm(cm), _terminator(terminator) {
+  }
+
+  void work(uint worker_id) {
+    _cm->drain_satb_buffers(worker_id, true);
+  }
+};
+
+class ShenandoahUpdateAliveRefs : public OopClosure {
+private:
+  ShenandoahHeap* _heap;
+public:
+  ShenandoahUpdateAliveRefs() : _heap(ShenandoahHeap::heap()) {
+  }
+  virtual void do_oop(oop* p) {
+    _heap->maybe_update_oop_ref(p);
+  }
+
+  virtual void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+};
+
+void ShenandoahConcurrentMark::finish_mark_from_roots() {
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("Starting finishMarkFromRoots");
+  }
+
+  IsGCActiveMark is_active;
+
+  ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
+
+  // Trace any (new) unmarked root references.
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::rescan_roots);
+  prepare_unmarked_root_objs();
+  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::rescan_roots);
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_satb);
+  {
+    StrongRootsScope scope(_max_conc_worker_id);
+    ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
+    // drain_satb_buffers(0, true);
+    FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
+    sh->conc_workers()->set_active_workers(_max_conc_worker_id);
+    sh->conc_workers()->run_task(&drain_satb_buffers);
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_satb);
+  }
+  
+  // Finally mark everything else we've got in our queues during the previous steps.
+  {
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_queues);
+    ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
+    SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, sh->need_update_refs());
+    sh->conc_workers()->set_active_workers(_max_conc_worker_id);
+    sh->conc_workers()->run_task(&markingTask);
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_queues);
+  }
+
+#ifdef ASSERT
+  for (int i = 0; i < (int) _max_conc_worker_id; i++) {
+    assert(_task_queues->queue(i)->is_empty(), "Should be empty");
+  }
+#endif
+
+  // When we're done marking everything, we process weak references.
+  if (ShenandoahProcessReferences) {
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
+    weak_refs_work();
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
+  }
+
+#ifdef ASSERT
+  for (int i = 0; i < (int) _max_conc_worker_id; i++) {
+    assert(_task_queues->queue(i)->is_empty(), "Should be empty");
+  }
+#endif
+
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("Finishing finishMarkFromRoots");
+#ifdef SLOWDEBUG
+    for (int i = 0; i <(int)_max_conc_worker_id; i++) {
+      tty->print("Queue: "INT32_FORMAT":", i);
+      _task_queues->queue(i)->stats.print(tty, 10);
+      tty->cr();
+      _task_queues->queue(i)->stats.verify();
+    }
+#endif
+  }
+
+  // We still need to update (without marking) alive refs in JNI handles.
+  if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
+    ShenandoahUpdateAliveRefs cl;
+    ShenandoahIsAliveClosure is_alive;
+    JNIHandles::weak_oops_do(&is_alive, &cl);
+  }
+
+#ifdef ASSERT
+  verify_roots();
+
+  if (ShenandoahDumpHeapAfterConcurrentMark) {
+    sh->ensure_parsability(false);
+    sh->print_all_refs("post-mark");
+  }
+#endif
+}
+
+#ifdef ASSERT
+void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
+  oop obj = oopDesc::load_heap_oop(p);
+  if (! oopDesc::is_null(obj)) {
+    guarantee(ShenandoahHeap::heap()->is_marked_current(obj), "oop must be marked");
+    guarantee(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "oop must not be forwarded");
+  }
+}
+
+void ShenandoahConcurrentMark::verify_roots() {
+  ShenandoahVerifyRootsClosure1 cl;
+  CodeBlobToOopClosure blobsCl(&cl, true);
+  CLDToOopClosure cldCl(&cl);
+  ClassLoaderDataGraph::clear_claimed_marks();
+  ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
+  rp.process_roots(&cl, &cl, &cldCl, &cldCl, &cldCl, &blobsCl);
+}
+#endif
+
+class ShenandoahSATBBufferClosure : public SATBBufferClosure {
+private:
+  SCMObjToScanQueue* _queue;
+
+public:
+  ShenandoahSATBBufferClosure(SCMObjToScanQueue* q) :
+    _queue(q)
+  {
+  }
+
+  void do_buffer(void** buffer, size_t size) {
+    // tty->print_cr("draining one satb buffer");
+    for (size_t i = 0; i < size; ++i) {
+      void* entry = buffer[i];
+      oop obj = oop(entry);
+      // tty->print_cr("satb buffer entry: "PTR_FORMAT, p2i((HeapWord*) obj));
+      if (!oopDesc::is_null(obj)) {
+	obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
+	bool pushed = _queue->push(obj);
+	assert(pushed, "overflow queue should always succeed pushing");
+      }
+    }
+  }
+};
+
+class ShenandoahSATBThreadsClosure : public ThreadClosure {
+  ShenandoahSATBBufferClosure* _satb_cl;
+  int _thread_parity;
+
+ public:
+  ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
+    _satb_cl(satb_cl),
+    _thread_parity(Threads::thread_claim_parity()) {}
+
+  void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      if (thread->claim_oops_do(true, _thread_parity)) {
+        JavaThread* jt = (JavaThread*)thread;
+        jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
+      }
+    } else if (thread->is_VM_thread()) {
+      if (thread->claim_oops_do(true, _thread_parity)) {
+        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
+      }
+    }
+  }
+};
+
+void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
+
+  // tty->print_cr("start draining SATB buffers");
+
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  SCMObjToScanQueue* q = get_queue(worker_id);
+  ShenandoahSATBBufferClosure cl(q);
+
+  SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
+  while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
+
+  if (remark) {
+    ShenandoahSATBThreadsClosure tc(&cl);
+    Threads::threads_do(&tc);
+  }
+
+  // tty->print_cr("end draining SATB buffers");
+
+}
+
+bool ShenandoahConcurrentMark::drain_one_satb_buffer(uint worker_id) {
+
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  SCMObjToScanQueue* q = get_queue(worker_id);
+  ShenandoahSATBBufferClosure cl(q);
+
+  SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
+  bool result = satb_mq_set.apply_closure_to_completed_buffer(&cl);
+  return result;
+}
+
+#if TASKQUEUE_STATS
+void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
+  st->print_raw_cr("GC Task Stats");
+  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
+  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
+}
+
+void ShenandoahConcurrentMark::print_taskqueue_stats(outputStream* const st) const {
+  print_taskqueue_stats_hdr(st);
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  TaskQueueStats totals;
+  const int n = sh->max_conc_workers();
+  for (int i = 0; i < n; ++i) {
+    st->print(INT32_FORMAT_W(3), i); 
+    _task_queues->queue(i)->stats.print(st);
+    st->print("\n");
+    totals += _task_queues->queue(i)->stats;
+  }
+  st->print_raw("tot "); totals.print(st); st->cr();
+  DEBUG_ONLY(totals.verify());
+
+}
+
+void ShenandoahConcurrentMark::print_push_only_taskqueue_stats(outputStream* const st) const {
+  print_taskqueue_stats_hdr(st);
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  TaskQueueStats totals;
+  const int n = sh->max_conc_workers();
+  for (int i = 0; i < n; ++i) {
+    st->print(INT32_FORMAT_W(3), i); 
+    _task_queues->queue(i)->stats.print(st);
+    st->print("\n");
+    totals += _task_queues->queue(i)->stats;
+  }
+  st->print_raw("tot "); totals.print(st); st->cr();
+
+  DEBUG_ONLY(totals.verify_only_pushes());
+}
+
+void ShenandoahConcurrentMark::reset_taskqueue_stats() {
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  const int n = sh->max_conc_workers();
+  for (int i = 0; i < n; ++i) {
+    _task_queues->queue(i)->stats.reset();
+  }
+}
+#endif // TASKQUEUE_STATS
+
+// Weak Reference Closures
+class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
+  ShenandoahHeap* _sh;
+  ShenandoahConcurrentMark* _scm;
+  uint _worker_id;
+  int _seed;
+
+public:
+  ShenandoahCMDrainMarkingStackClosure(uint worker_id): _worker_id(worker_id), _seed(17) {
+    _sh = (ShenandoahHeap*) Universe::heap();
+    _scm = _sh->concurrentMark();
+  }
+
+      
+  void do_void() {
+
+    SCMObjToScanQueue* q = _scm->get_queue(_worker_id);
+    ShenandoahMarkObjsClosure cl(q, _sh->need_update_refs());
+    while (true) {
+      if (!_scm->try_queue(q, &cl) &&
+	  !_scm->try_draining_an_satb_buffer(_worker_id) &&
+	  !_scm->try_to_steal(_worker_id, &cl, &_seed)) {
+	break;
+      }
+    }
+  }
+};
+
+
+class ShenandoahCMKeepAliveAndDrainClosure: public OopClosure {
+  SCMObjToScanQueue* _queue;
+  ShenandoahHeap* _sh;
+  ShenandoahConcurrentMark* _scm;
+
+  size_t _ref_count;
+
+public:
+  ShenandoahCMKeepAliveAndDrainClosure(SCMObjToScanQueue* q) :
+    _queue(q) {
+    _sh = (ShenandoahHeap*) Universe::heap();
+    _scm = _sh->concurrentMark();
+    _ref_count = 0;
+  }
+
+  virtual void do_oop(oop* p){ do_oop_work(p);}
+  virtual void do_oop(narrowOop* p) {  
+    assert(false, "narrowOops Aren't implemented");
+  }
+
+
+  void do_oop_work(oop* p) {  
+
+    oop obj;
+    if (_sh->need_update_refs()) {
+      obj = _sh->maybe_update_oop_ref(p);
+    } else {
+      obj = oopDesc::load_heap_oop(p);
+    }
+
+    assert(obj == oopDesc::bs()->resolve_oop(obj), "only get updated oops in weak ref processing");
+
+    if (obj != NULL) {
+      if (Verbose && ShenandoahTraceWeakReferences) {
+	gclog_or_tty->print_cr("\twe're looking at location "
+			       "*"PTR_FORMAT" = "PTR_FORMAT,
+			       p2i(p), p2i((void*) obj));
+	obj->print();
+      }
+      bool pushed = _queue->push(obj);
+      assert(pushed, "overflow queue should always succeed pushing");
+
+      _ref_count++;
+    }    
+  }
+
+  size_t ref_count() { return _ref_count; }
+
+};
+
+class ShenandoahRefProcTaskProxy : public AbstractGangTask {
+
+private:
+  AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
+
+public:
+
+  ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task) :
+    AbstractGangTask("Process reference objects in parallel"),
+    _proc_task(proc_task) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahIsAliveClosure is_alive;
+    ShenandoahCMKeepAliveAndDrainClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
+    ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id);
+    _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
+  }
+};
+
+class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
+
+private:
+  AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
+
+public:
+
+  ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
+    AbstractGangTask("Enqueue reference objects in parallel"),
+    _enqueue_task(enqueue_task) {
+  }
+
+  void work(uint worker_id) {
+    _enqueue_task.work(worker_id);
+  }
+};
+
+class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
+
+private:
+  WorkGang* _workers;
+
+public:
+
+  ShenandoahRefProcTaskExecutor() : _workers(ShenandoahHeap::heap()->conc_workers()) {
+  }
+
+  // Executes a task using worker threads.
+  void execute(ProcessTask& task) {
+    ShenandoahRefProcTaskProxy proc_task_proxy(task);
+    _workers->run_task(&proc_task_proxy);
+  }
+
+  void execute(EnqueueTask& task) {
+    ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
+    _workers->run_task(&enqueue_task_proxy);
+  }
+};
+
+
+void ShenandoahConcurrentMark::weak_refs_work() {
+   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+   ReferenceProcessor* rp = sh->ref_processor();
+
+   // Setup collector policy for softref cleaning.
+   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
+   if (ShenandoahTraceWeakReferences) {
+     tty->print_cr("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
+   }
+   rp->setup_policy(clear_soft_refs);
+
+   uint serial_worker_id = 0;
+   ShenandoahIsAliveClosure is_alive;
+   ShenandoahCMKeepAliveAndDrainClosure keep_alive(get_queue(serial_worker_id));
+   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id);
+   ShenandoahRefProcTaskExecutor par_task_executor;
+   bool processing_is_mt = true;
+   AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
+
+   if (ShenandoahTraceWeakReferences) {
+     gclog_or_tty->print_cr("start processing references");
+   }
+
+   rp->process_discovered_references(&is_alive, &keep_alive, 
+				     &complete_gc, &par_task_executor, 
+				     ShenandoahHeap::heap()->collector_policy()->conc_timer(),
+                                     ShenandoahHeap::heap()->tracer()->gc_id());
+   
+   if (ShenandoahTraceWeakReferences) {
+     gclog_or_tty->print_cr("finished processing references, processed "SIZE_FORMAT" refs", keep_alive.ref_count());
+     gclog_or_tty->print_cr("start enqueuing references");
+   }
+
+   rp->enqueue_discovered_references(executor);
+
+   if (ShenandoahTraceWeakReferences) {
+     gclog_or_tty->print_cr("finished enqueueing references");
+   }
+
+   rp->verify_no_references_recorded();
+   assert(!rp->discovery_enabled(), "Post condition");
+
+   if (ClassUnloadingWithConcurrentMark) {
+     // Unload classes and purge SystemDictionary.
+     bool purged_class = SystemDictionary::do_unloading(&is_alive);
+     // Unload nmethods.
+     CodeCache::do_unloading(&is_alive, purged_class);
+     // Prune dead klasses from subklass/sibling/implementor lists.
+     Klass::clean_weak_klass_links(&is_alive);
+     // Delete entries from dead interned strings.
+     StringTable::unlink(&is_alive);
+     // Clean up unreferenced symbols in symbol table.
+     SymbolTable::unlink();
+
+     ClassLoaderDataGraph::purge();
+   }
+}
+
+void ShenandoahConcurrentMark::cancel() {
+  ShenandoahHeap* sh = ShenandoahHeap::heap();
+
+  // Cancel weak-ref discovery.
+  if (ShenandoahProcessReferences) {
+    ReferenceProcessor* rp = sh->ref_processor();
+    rp->abandon_partial_discovery();
+    rp->disable_discovery();
+  }
+
+  // Clean up marking stacks.
+  SCMObjToScanQueueSet* queues = task_queues();
+  for (uint i = 0; i < _max_conc_worker_id; ++i) {
+    SCMObjToScanQueue* task_queue = queues->queue(i);
+    task_queue->set_empty();
+    task_queue->overflow_stack()->clear();
+  }
+
+  // Cancel SATB buffers.
+  JavaThread::satb_mark_queue_set().abandon_partial_marking();
+}
+
+SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
+  worker_id = worker_id % _max_conc_worker_id;
+  return _task_queues->queue(worker_id);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentMark.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,109 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
+
+#include "gc/shared/taskqueue.hpp"
+#include "gc/shared/workgroup.hpp"
+
+typedef OverflowTaskQueue<oop, mtGC> OopOverflowTaskQueue;
+typedef Padded<OopOverflowTaskQueue> SCMObjToScanQueue;
+typedef GenericTaskQueueSet<SCMObjToScanQueue, mtGC> SCMObjToScanQueueSet;
+
+class ShenandoahConcurrentMark;
+
+#ifdef ASSERT
+class ShenandoahVerifyRootsClosure1 : public OopClosure {
+  void do_oop(oop* p);
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+};
+#endif
+
+class ShenandoahMarkRefsClosure : public MetadataAwareOopClosure {
+  SCMObjToScanQueue* _queue;
+  ShenandoahHeap* _heap;
+  bool _update_refs;
+  ShenandoahConcurrentMark* _scm;
+
+public: 
+  ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs);
+
+  void do_oop(narrowOop* p);
+
+  inline void do_oop(oop* p);
+
+};
+
+class ShenandoahMarkObjsClosure : public ObjectClosure {
+  ShenandoahHeap* _heap;
+  size_t* _live_data;
+  ShenandoahMarkRefsClosure _mark_refs;
+
+public: 
+  ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs);
+
+  ~ShenandoahMarkObjsClosure();
+
+  inline void do_object(oop obj);
+};  
+
+class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
+
+private:
+  // The per-worker-thread work queues
+  SCMObjToScanQueueSet* _task_queues;
+
+  bool                    _aborted;       
+  uint _max_conc_worker_id;
+  ParallelTaskTerminator* _terminator;
+
+public:
+  // We need to do this later when the heap is already created.
+  void initialize();
+
+  void mark_from_roots();
+
+  // Prepares unmarked root objects by marking them and putting
+  // them into the marking task queue.
+  void prepare_unmarked_root_objs();
+  void prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs);
+
+  void finish_mark_from_roots();
+  // Those are only needed public because they're called from closures.
+
+  SCMObjToScanQueue* get_queue(uint worker_id);
+  inline bool try_queue(SCMObjToScanQueue* q, ShenandoahMarkObjsClosure* cl);
+  inline bool try_to_steal(uint worker_id, ShenandoahMarkObjsClosure* cl, int *seed);
+  inline bool try_draining_an_satb_buffer(uint worker_id);
+  void drain_satb_buffers(uint worker_id, bool remark = false);
+  SCMObjToScanQueueSet* task_queues() { return _task_queues;}
+  uint max_conc_worker_id() { return _max_conc_worker_id; }
+
+  void cancel();
+
+private:
+
+#ifdef ASSERT
+  void verify_roots();
+#endif
+
+  bool drain_one_satb_buffer(uint worker_id);
+  void weak_refs_work();
+
+  ParallelTaskTerminator* terminator() { return _terminator;}
+
+#if TASKQUEUE_STATS
+  static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
+  void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
+  void print_push_only_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
+  void reset_taskqueue_stats();
+#endif // TASKQUEUE_STATS
+
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentMark.inline.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,134 @@
+/*
+  Copyright 2015 Red Hat, Inc. and/or its affiliates.
+*/
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
+
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "memory/iterator.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+
+void ShenandoahMarkRefsClosure::do_oop(oop* p) {
+  // We piggy-back reference updating to the marking tasks.
+#ifdef ASSERT
+  oop* old = p;
+#endif
+  oop obj;
+  if (_update_refs) {
+    obj = _heap->maybe_update_oop_ref(p);
+  } else {
+    obj = oopDesc::load_heap_oop(p);
+  }
+  assert(obj == ShenandoahBarrierSet::resolve_oop_static(obj), "need to-space object here");
+
+#ifdef ASSERT
+  if (ShenandoahTraceUpdates) {
+    if (p != old) 
+      tty->print_cr("Update "PTR_FORMAT" => "PTR_FORMAT"  to "PTR_FORMAT" => "PTR_FORMAT, p2i(p), p2i((HeapWord*) *p), p2i(old), p2i((HeapWord*) *old));
+    else
+      tty->print_cr("Not updating "PTR_FORMAT" => "PTR_FORMAT"  to "PTR_FORMAT" => "PTR_FORMAT, p2i(p), p2i((HeapWord*) *p), p2i(old), p2i((HeapWord*) *old));
+  }
+#endif
+
+  // NOTE: We used to assert the following here. This does not always work because
+  // a concurrent Java thread could change the the field after we updated it.
+  // oop obj = oopDesc::load_heap_oop(p);
+  // assert(oopDesc::bs()->resolve_oop(obj) == *p, "we just updated the referrer");
+  // assert(obj == NULL || ! _heap->heap_region_containing(obj)->is_dirty(), "must not point to dirty region");
+
+  //  ShenandoahExtendedMarkObjsClosure cl(_heap->ref_processor(), _worker_id);
+  //  ShenandoahMarkObjsClosure mocl(cl, _worker_id);
+
+  if (obj != NULL) {
+    if (_update_refs) {
+      Prefetch::write(obj, 128);
+    } else {
+      Prefetch::read(obj, 128);
+    }
+
+#ifdef ASSERT
+    uint region_idx  = _heap->heap_region_index_containing(obj);
+    ShenandoahHeapRegion* r = _heap->heap_regions()[region_idx];
+    assert(r->bottom() < (HeapWord*) obj && r->top() > (HeapWord*) obj, "object must be in region");
+#endif
+
+    bool pushed = _queue->push(obj);
+    assert(pushed, "overflow queue should always succeed pushing");
+  }
+}
+
+void ShenandoahMarkObjsClosure::do_object(oop obj) {
+
+  assert(obj != NULL, "expect non-null object");
+
+  assert(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "expect forwarded obj in queue");
+
+#ifdef ASSERT
+  if (_heap->heap_region_containing(obj)->is_in_collection_set()) {
+    tty->print_cr("trying to mark obj: "PTR_FORMAT" (%s) in dirty region: ", p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_marked_current(obj)));
+    //      _heap->heap_region_containing(obj)->print();
+    //      _heap->print_heap_regions();
+  }
+#endif
+  assert(_heap->cancelled_concgc()
+	 || ! _heap->heap_region_containing(obj)->is_in_collection_set(),
+	 "we don't want to mark objects in from-space");
+  assert(_heap->is_in(obj), "referenced objects must be in the heap. No?");
+  if (_heap->mark_current(obj)) {
+#ifdef ASSERT
+    if (ShenandoahTraceConcurrentMarking) {
+      tty->print_cr("marked obj: "PTR_FORMAT, p2i((HeapWord*) obj));
+    }
+#endif
+
+    // Calculate liveness of heap region containing object.
+    uint region_idx  = _heap->heap_region_index_containing(obj);
+#ifdef ASSERT
+    ShenandoahHeapRegion* r = _heap->heap_regions()[region_idx];
+    assert(r->bottom() < (HeapWord*) obj && r->top() > (HeapWord*) obj, "object must be in region");
+#endif
+    _live_data[region_idx] += (obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE) * HeapWordSize;
+    obj->oop_iterate(&_mark_refs);
+  }
+
+#ifdef ASSERT
+  else {
+    if (ShenandoahTraceConcurrentMarking) {
+      tty->print_cr("failed to mark obj (already marked): "PTR_FORMAT, p2i((HeapWord*) obj));
+    }
+    assert(_heap->is_marked_current(obj), "make sure object is marked");
+  }
+#endif
+}
+
+inline bool ShenandoahConcurrentMark::try_queue(SCMObjToScanQueue* q, ShenandoahMarkObjsClosure* cl) {
+  oop obj;
+  if (q->pop_local(obj)) {
+    assert(obj != NULL, "Can't mark null");
+    cl->do_object(obj);
+    return true;
+  } else if (q->pop_overflow(obj)) {
+    cl->do_object(obj);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+inline bool ShenandoahConcurrentMark::try_to_steal(uint worker_id, ShenandoahMarkObjsClosure* cl, int *seed) {
+  oop obj;
+  if (task_queues()->steal(worker_id, seed, obj)) {
+    cl->do_object(obj);
+    return true;
+  } else 
+    return false;
+}
+
+inline bool ShenandoahConcurrentMark:: try_draining_an_satb_buffer(uint worker_id) {
+  return drain_one_satb_buffer(worker_id);
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,201 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "gc/shenandoah/shenandoahConcurrentThread.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahJNICritical.hpp"
+#include "gc/shenandoah/vm_operations_shenandoah.hpp"
+#include "memory/iterator.hpp"
+#include "memory/universe.hpp"
+#include "runtime/vmThread.hpp"
+
+SurrogateLockerThread* ShenandoahConcurrentThread::_slt = NULL;
+
+ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
+  ConcurrentGCThread(),
+  _epoch(0),
+  _concurrent_mark_started(false),
+  _concurrent_mark_in_progress(false),
+  _do_full_gc(false),
+  _concurrent_mark_aborted(false)
+{
+  create_and_start();
+}
+
+ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
+  // This is here so that super is called.
+}
+
+void ShenandoahConcurrentThread::run() {
+  initialize_in_thread();
+
+  wait_for_universe_init();
+
+  // Wait until we have the surrogate locker thread in place.
+  {
+    MutexLockerEx x(CGC_lock, true);
+    while(_slt == NULL && !_should_terminate) {
+      CGC_lock->wait(true, 200);
+    }
+  }
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  while (!_should_terminate) {
+    if (_do_full_gc) {
+      {
+        if (_full_gc_cause == GCCause::_allocation_failure) {
+          heap->shenandoahPolicy()->record_allocation_failure_gc();
+        } else {
+          heap->shenandoahPolicy()->record_user_requested_gc();
+        }
+
+	VM_ShenandoahFullGC full_gc;
+	heap->jni_critical()->execute_in_vm_thread(&full_gc);
+      }
+      MonitorLockerEx ml(ShenandoahFullGC_lock);
+      _do_full_gc = false;
+      ml.notify_all();
+    } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(),
+							       heap->capacity())) 
+      {
+
+	if (ShenandoahGCVerbose) 
+	  tty->print("Capacity = "SIZE_FORMAT" Used = "SIZE_FORMAT"  doing initMark\n", heap->capacity(), heap->used());
+ 
+	if (ShenandoahGCVerbose) tty->print("Starting a mark");
+
+	VM_ShenandoahInitMark initMark;
+	VMThread::execute(&initMark);
+
+        if (ShenandoahConcurrentMarking) {
+          ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
+
+	  VM_ShenandoahStartEvacuation finishMark;
+	  heap->jni_critical()->execute_in_vm_thread(&finishMark);
+	}
+
+        if (cm_has_aborted()) {
+          clear_cm_aborted();
+          assert(heap->is_bitmap_clear(), "need to continue with clear mark bitmap");
+          assert(! heap->concurrent_mark_in_progress(), "concurrent mark must have terminated");
+          continue;
+        }
+        if (! _should_terminate) {
+          // If we're not concurrently evacuating, evacuation is done
+          // from VM_ShenandoahFinishMark within the VMThread above.
+          if (ShenandoahConcurrentEvacuation) {
+            VM_ShenandoahEvacuation evacuation;
+            evacuation.doit();
+          }
+        }
+
+        if (heap->shenandoahPolicy()->update_refs_early() && ! _should_terminate && ! heap->cancelled_concgc()) {
+          if (ShenandoahConcurrentUpdateRefs) {
+            VM_ShenandoahUpdateRefs update_refs;
+            VMThread::execute(&update_refs);
+            heap->update_references();
+          }
+        } else {
+	  if (heap->is_evacuation_in_progress()) {
+	    heap->set_evacuation_in_progress(false);
+	  }
+	}
+
+      } else {
+      Thread::current()->_ParkEvent->park(10) ;
+      // yield();
+    }
+    heap->clear_cancelled_concgc();
+  }
+}
+
+void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
+
+  assert(Thread::current()->is_Java_thread(), "expect Java thread here");
+
+  MonitorLockerEx ml(ShenandoahFullGC_lock);
+  schedule_full_gc();
+  _full_gc_cause = cause;
+  while (_do_full_gc) {
+    ml.wait();
+  }
+  assert(_do_full_gc == false, "expect full GC to have completed");
+}
+
+void ShenandoahConcurrentThread::schedule_full_gc() {
+  _do_full_gc = true;
+  OrderAccess::fence();
+}
+
+void ShenandoahConcurrentThread::print() const {
+  print_on(tty);
+}
+
+void ShenandoahConcurrentThread::print_on(outputStream* st) const {
+  st->print("Shenandoah Concurrent Thread");
+  Thread::print_on(st);
+  st->cr();
+}
+
+void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
+  assert(false, "Wake up in the GC thread that never sleeps :-)");
+}
+
+void ShenandoahConcurrentThread::set_cm_started() {
+    assert(!_concurrent_mark_in_progress, "cycle in progress"); 
+    _concurrent_mark_started = true;  
+}
+  
+void ShenandoahConcurrentThread::clear_cm_started() { 
+    assert(_concurrent_mark_in_progress, "must be starting a cycle"); 
+    _concurrent_mark_started = false; 
+}
+
+bool ShenandoahConcurrentThread::cm_started() {
+  return _concurrent_mark_started;
+}
+
+void ShenandoahConcurrentThread::set_cm_in_progress() { 
+  assert(_concurrent_mark_started, "must be starting a cycle"); 
+  _concurrent_mark_in_progress = true;  
+}
+
+void ShenandoahConcurrentThread::clear_cm_in_progress() { 
+  assert(!_concurrent_mark_started, "must not be starting a new cycle"); 
+  _concurrent_mark_in_progress = false; 
+}
+
+bool ShenandoahConcurrentThread::cm_in_progress() { 
+  return _concurrent_mark_in_progress;  
+}
+
+void ShenandoahConcurrentThread::start() {
+  create_and_start();
+}
+
+void ShenandoahConcurrentThread::yield() {
+  _sts.yield();
+}
+
+void ShenandoahConcurrentThread::safepoint_synchronize() {
+  assert(UseShenandoahGC, "just checking");
+  _sts.synchronize();
+}
+
+void ShenandoahConcurrentThread::safepoint_desynchronize() {
+  assert(UseShenandoahGC, "just checking");
+  _sts.desynchronize();
+}
+
+void ShenandoahConcurrentThread::makeSurrogateLockerThread(TRAPS) {
+  assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");
+  assert(THREAD->is_Java_thread(), "must be a Java thread");
+  assert(_slt == NULL, "SLT already created");
+  _slt = SurrogateLockerThread::make(THREAD);
+}
+
+void ShenandoahConcurrentThread::shutdown() {
+  _should_terminate = true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,84 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
+
+#include "gc/shared/concurrentGCThread.hpp"
+#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "memory/resourceArea.hpp"
+
+// For now we just want to have a concurrent marking thread. 
+// Once we have that working we will build a concurrent evacuation thread.
+
+class ShenandoahConcurrentThread: public ConcurrentGCThread {
+  friend class VMStructs;
+
+ public:
+  virtual void run();
+
+ private:
+  volatile bool                    _concurrent_mark_started;
+  volatile bool                    _concurrent_mark_in_progress;
+  volatile bool                    _concurrent_mark_aborted;
+
+  int _epoch;
+
+  static SurrogateLockerThread* _slt;
+  static SuspendibleThreadSet _sts;
+
+  bool _do_full_gc;
+  GCCause::Cause _full_gc_cause;
+
+  void sleepBeforeNextCycle();
+
+ public:
+  // Constructor
+  ShenandoahConcurrentThread();
+  ~ShenandoahConcurrentThread();
+
+  static void makeSurrogateLockerThread(TRAPS);
+  static SurrogateLockerThread* slt() { return _slt; }
+
+  // Printing
+  void print_on(outputStream* st) const;
+  void print() const;
+
+  void set_cm_started();
+  void clear_cm_started();
+  bool cm_started();
+
+  void set_cm_in_progress();
+  void clear_cm_in_progress();
+  bool cm_in_progress();
+
+  void cm_abort() { _concurrent_mark_aborted = true;}
+  bool cm_has_aborted() { return _concurrent_mark_aborted;}
+  void clear_cm_aborted() { _concurrent_mark_aborted = false;}
+
+  void do_full_gc(GCCause::Cause cause);
+
+  void schedule_full_gc();
+
+  // This flag returns true from the moment a marking cycle is
+  // initiated (during the initial-mark pause when started() is set)
+  // to the moment when the cycle completes (just after the next
+  // marking bitmap has been cleared and in_progress() is
+  // cleared). While this flag is true we will not start another cycle
+  // so that cycles do not overlap. We cannot use just in_progress()
+  // as the CM thread might take some time to wake up before noticing
+  // that started() is set and set in_progress().
+  bool during_cycle()      { return cm_started() || cm_in_progress(); }
+
+  char* name() const { return (char*)"ShenandoahConcurrentThread";}
+  void start();
+  void yield();
+
+  static void safepoint_synchronize();
+  static void safepoint_desynchronize();
+
+  void shutdown();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,2856 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+
+#include "classfile/symbolTable.hpp"
+#include "classfile/stringTable.hpp"
+
+#include "gc/shared/collectedHeap.inline.hpp"
+#include "gc/shared/cmBitMap.inline.hpp"
+#include "gc/shared/gcHeapSummary.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcTraceTime.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHumongous.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahJNICritical.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/vm_operations_shenandoah.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "memory/iterator.hpp"
+#include "memory/oopFactory.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
+#include "memory/universe.hpp"
+#include "utilities/copy.hpp"
+#include "gc/shared/vmGCOperations.hpp"
+#include "runtime/atomic.inline.hpp"
+
+#define __ masm->
+
+ShenandoahHeap* ShenandoahHeap::_pgc = NULL;
+
+void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
+  HeapWord* cur = NULL;
+  for (cur = start; cur < end; cur++) {
+    tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
+  }
+}
+
+void ShenandoahHeap::print_heap_objects(HeapWord* start, HeapWord* end) {
+  HeapWord* cur = NULL;
+  for (cur = start; cur < end; cur = cur + oop(cur)->size()) {
+    oop(cur)->print();
+    print_heap_locations(cur, cur + oop(cur)->size());
+  }
+}
+
+void ShenandoahHeap::print_heap_object(oop p) {
+  HeapWord* hw = (HeapWord*) p;
+  print_heap_locations(hw-1, hw+1+p->size());
+}
+
+
+class PrintHeapRegionsClosure : public
+   ShenandoahHeapRegionClosure {
+private:
+  outputStream* _st;
+public:
+  PrintHeapRegionsClosure() : _st(tty) {}
+  PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    r->print_on(_st);
+    return false;
+  }
+};
+
+class PrintHeapObjectsClosure : public ShenandoahHeapRegionClosure {
+public:
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    tty->print_cr("Region "INT32_FORMAT" top = "PTR_FORMAT" used = "SIZE_FORMAT_HEX" free = "SIZE_FORMAT_HEX, 
+	       r->region_number(), p2i(r->top()), r->used(), r->free());
+    
+    ShenandoahHeap::heap()->print_heap_objects(r->bottom(), r->top());
+    return false;
+  }
+};
+
+jint ShenandoahHeap::initialize() {
+  CollectedHeap::pre_initialize();
+
+  size_t init_byte_size = collector_policy()->initial_heap_byte_size();
+  size_t max_byte_size = collector_policy()->max_heap_byte_size();
+  if (ShenandoahGCVerbose) 
+    tty->print_cr("init_byte_size = "SIZE_FORMAT","SIZE_FORMAT_HEX"  max_byte_size = "INT64_FORMAT","SIZE_FORMAT_HEX, 
+	     init_byte_size, init_byte_size, max_byte_size, max_byte_size);
+
+  Universe::check_alignment(max_byte_size,  
+			    ShenandoahHeapRegion::RegionSizeBytes, 
+			    "shenandoah heap");
+  Universe::check_alignment(init_byte_size, 
+			    ShenandoahHeapRegion::RegionSizeBytes, 
+			    "shenandoah heap");
+
+  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
+						 Arguments::conservative_max_heap_alignment());
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
+
+  set_barrier_set(new ShenandoahBarrierSet());
+  ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
+  _storage.initialize(pgc_rs, init_byte_size);
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("Calling initialize on reserved space base = "PTR_FORMAT" end = "PTR_FORMAT, 
+	       p2i(pgc_rs.base()), p2i(pgc_rs.base() + pgc_rs.size()));
+  }
+
+  _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
+  _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
+  _ordered_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _max_regions, mtGC); 
+  for (size_t i = 0; i < _max_regions; i++) {
+    _ordered_regions[i] = NULL;
+  }
+
+  _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
+  size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
+  assert(init_byte_size == _initialSize, "tautology");
+  _free_regions = new ShenandoahHeapRegionSet(_max_regions);
+  _collection_set = new ShenandoahHeapRegionSet(_max_regions);
+
+  for (size_t i = 0; i < _num_regions; i++) {
+    ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
+    current->initialize_heap_region((HeapWord*) pgc_rs.base() + 
+				    regionSizeWords * i, regionSizeWords, i);
+    _free_regions->append(current);
+    _ordered_regions[i] = current;
+  }
+  _first_region = _ordered_regions[0];
+  _first_region_bottom = _first_region->bottom();
+  assert((((size_t) _first_region_bottom) & (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, err_msg("misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)));
+
+  _numAllocs = 0;
+
+  if (ShenandoahGCVerbose) {
+    tty->print("All Regions\n");
+    print_heap_regions();
+    tty->print("Free Regions\n");
+    _free_regions->print();
+  }
+
+  // The call below uses stuff (the SATB* things) that are in G1, but probably
+  // belong into a shared location.
+  JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
+                                               SATB_Q_FL_lock,
+                                               20 /*G1SATBProcessCompletedThreshold */,
+                                               Shared_SATB_Q_lock);
+
+  // Reserve space for prev and next bitmap.
+  size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
+  MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
+
+  ReservedSpace bitmap(ReservedSpace::allocation_align_size_up(bitmap_size));
+  os::commit_memory_or_exit(bitmap.base(), bitmap.size(), false, err_msg("couldn't allocate mark bitmap"));
+  MemRegion bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
+  _mark_bit_map.initialize(heap_region, bitmap_region);
+
+  _next_mark_bit_map = &_mark_bit_map;
+  reset_mark_bitmap();
+
+  // Initialize fast collection set test structure.
+  _in_cset_fast_test_length = _max_regions;
+  _in_cset_fast_test_base =
+                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
+  _in_cset_fast_test = _in_cset_fast_test_base -
+               ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
+  clear_cset_fast_test();
+
+  _concurrent_gc_thread = new ShenandoahConcurrentThread();
+  return JNI_OK;
+}
+
+ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 
+  CollectedHeap(),
+  _shenandoah_policy(policy), 
+  _concurrent_mark_in_progress(false),
+  _evacuation_in_progress(false),
+  _update_references_in_progress(false),
+  _free_regions(NULL),
+  _collection_set(NULL),
+  _bytesAllocSinceCM(0),
+  _bytes_allocated_during_cm(0),
+  _max_allocated_gc(0),
+  _allocated_last_gc(0),
+  _used_start_gc(0),
+  _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
+  _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
+  _ref_processor(NULL),
+  _in_cset_fast_test(NULL),
+  _in_cset_fast_test_base(NULL),
+  _mark_bit_map(),
+  _cancelled_concgc(false),
+  _need_update_refs(false),
+  _need_reset_bitmaps(false),
+  _jni_critical(new ShenandoahJNICritical())
+
+{
+  if (ShenandoahLogConfig) {
+    tty->print_cr("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
+    tty->print_cr("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
+    tty->print_cr("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
+  }
+  _pgc = this;
+  _scm = new ShenandoahConcurrentMark();
+  _used = 0;
+  // This is odd.  They are concurrent gc threads, but they are also task threads.  
+  // Framework doesn't allow both.
+  _workers = new FlexibleWorkGang("Concurrent GC Threads", ParallelGCThreads,
+                            /* are_GC_task_threads */true,
+                            /* are_ConcurrentGC_threads */false);
+  _conc_workers = new FlexibleWorkGang("Concurrent GC Threads", ConcGCThreads,
+                            /* are_GC_task_threads */true,
+                            /* are_ConcurrentGC_threads */false);
+  if ((_workers == NULL) || (_conc_workers == NULL)) {
+    vm_exit_during_initialization("Failed necessary allocation.");
+  } else {
+    _workers->initialize_workers();
+    _conc_workers->initialize_workers();
+  }
+}
+
+void ShenandoahHeap::reset_mark_bitmap() {
+  _next_mark_bit_map->clearAll();
+}
+
+void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) {
+  _next_mark_bit_map->clearRange(MemRegion(from, to));
+}
+
+class BitmapClearClosure : public BitMapClosure {
+private:
+  CMBitMap* _bm;
+
+public:
+
+  BitmapClearClosure(CMBitMap* bm) : _bm(bm) {
+  }
+
+  bool do_bit(BitMap::idx_t offset) {
+    HeapWord* hw = _bm->offsetToHeapWord(offset);
+    bool is_marked = _bm->isMarked(hw);
+    return ! is_marked;
+  }
+};
+
+bool ShenandoahHeap::is_bitmap_clear() {
+  
+  BitmapClearClosure bitmap_clear_cl(_next_mark_bit_map);
+  return _next_mark_bit_map->iterate(&bitmap_clear_cl);
+}
+
+void ShenandoahHeap::print_on(outputStream* st) const {
+  st->print("Shenandoah Heap");
+  st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
+  st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
+  if (_concurrent_mark_in_progress) {
+    st->print("marking ");
+  }
+  if (_evacuation_in_progress) {
+    st->print("evacuating ");
+  }
+  if (_update_references_in_progress) {
+    st->print("updating-refs ");
+  }
+  if (_cancelled_concgc) {
+    st->print("cancelled ");
+  }
+  st->print("\n");
+
+  if (Verbose) {
+    print_heap_regions(st);
+  }
+}
+
+class InitGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    thread->gclab().initialize(true);
+  }
+};
+
+void ShenandoahHeap::post_initialize() {
+
+  {
+    MutexLockerEx ml(Threads_lock);
+    InitGCLABClosure init_gclabs;
+    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+      init_gclabs.do_thread(thread);
+    }
+    gc_threads_do(&init_gclabs);
+  }
+  _scm->initialize();
+
+  if (ShenandoahProcessReferences) {
+    ref_processing_init();
+  }
+  _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
+}
+
+class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
+  size_t sum;
+public:
+
+  CalculateUsedRegionClosure() {
+    sum = 0;
+  }
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    sum = sum + r->used();
+    return false;
+  }
+
+  size_t getResult() { return sum;}
+};
+
+size_t ShenandoahHeap::calculateUsed() {
+  CalculateUsedRegionClosure cl;
+  heap_region_iterate(&cl);
+  return cl.getResult();
+}
+
+size_t ShenandoahHeap::calculateFree() {
+  return capacity() - calculateUsed();
+}
+
+void ShenandoahHeap::verify_heap_size_consistency() {
+  
+  assert(calculateUsed() == used(),
+         err_msg("heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()));
+}
+
+size_t ShenandoahHeap::used() const {
+  return _used;
+}
+
+void ShenandoahHeap::increase_used(size_t bytes) {
+  _used += bytes;
+  // Atomic::add_ptr(bytes, &_used);
+}
+
+void ShenandoahHeap::set_used(size_t bytes) {
+  _used = bytes;
+}
+
+void ShenandoahHeap::decrease_used(size_t bytes) {
+  assert(_used >= bytes, "never decrease heap size by more than we've left");
+  _used -= bytes;
+  
+  // Atomic::add_ptr(-bytes, &_used);
+}
+
+size_t ShenandoahHeap::capacity() const {
+  return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
+
+}
+
+bool ShenandoahHeap::is_maximal_no_gc() const {
+  Unimplemented();
+  return true;
+}
+
+size_t ShenandoahHeap::max_capacity() const {
+  return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
+}
+
+class IsInRegionClosure : public ShenandoahHeapRegionClosure {
+  const void* _p;
+  bool _result;
+public:
+
+  IsInRegionClosure(const void* p) {
+    _p = p;
+    _result = false;
+  }
+  
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if (r->is_in(_p)) {
+      _result = true;
+      return true;
+    }
+    return false;
+  }
+
+  bool result() { return _result;}
+};
+
+bool ShenandoahHeap::is_in(const void* p) const {
+  //  IsInRegionClosure isIn(p);
+  //  heap_region_iterate(&isIn);
+  //  bool result = isIn.result();
+  
+  //  return isIn.result();
+  HeapWord* first_region_bottom = _first_region->bottom();
+  HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
+  return p > _first_region_bottom && p < last_region_end;
+}
+
+bool ShenandoahHeap::is_in_partial_collection(const void* p ) {
+  Unimplemented();
+  return false;
+}  
+
+bool  ShenandoahHeap::is_scavengable(const void* p) {
+  //  nyi();
+  //  return false;
+  return true;
+}
+
+HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
+  HeapWord* obj = thread->gclab().allocate(size);
+  if (obj != NULL) {
+    return obj;
+  }
+  // Otherwise...
+  return allocate_from_gclab_slow(thread, size);
+}
+
+HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
+  // Retain tlab and allocate object in shared space if
+  // the amount free in the tlab is too large to discard.
+  if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
+    thread->gclab().record_slow_allocation(size);
+    return NULL;
+  }
+
+  // Discard gclab and allocate a new one.
+  // To minimize fragmentation, the last GCLAB may be smaller than the rest.
+  size_t new_gclab_size = thread->gclab().compute_size(size);
+
+  thread->gclab().clear_before_allocation();
+
+  if (new_gclab_size == 0) {
+    return NULL;
+  }
+
+  // Allocate a new GCLAB...
+  HeapWord* obj = allocate_new_gclab(new_gclab_size);
+  if (obj == NULL) {
+    return NULL;
+  }
+
+  if (ZeroTLAB) {
+    // ..and clear it.
+    Copy::zero_to_words(obj, new_gclab_size);
+  } else {
+    // ...and zap just allocated object.
+#ifdef ASSERT
+    // Skip mangling the space corresponding to the object header to
+    // ensure that the returned space is not considered parsable by
+    // any concurrent GC thread.
+    size_t hdr_size = oopDesc::header_size();
+    Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
+#endif // ASSERT
+  }
+  thread->gclab().fill(obj, obj + size, new_gclab_size);
+  return obj;
+}
+
+HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
+  return allocate_new_tlab(word_size, true);
+}
+
+HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
+  return allocate_new_tlab(word_size, false);
+}
+
+HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool mark) {
+  HeapWord* result = allocate_memory(word_size);
+
+  if (result != NULL) {
+    if (mark && (_concurrent_mark_in_progress || _evacuation_in_progress)) {
+      // We mark the whole tlab here, this way we avoid marking every single
+      // allocated object. We mark it from the 2nd word, because the 1st word is always
+      // the brooks ptr of the first object, and it confuses the fast marked-iterator
+      // if we mark that.
+      _next_mark_bit_map->parMarkRange(MemRegion(result + BrooksPointer::BROOKS_POINTER_OBJ_SIZE,
+						 word_size - BrooksPointer::BROOKS_POINTER_OBJ_SIZE));
+    }
+    assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region");
+    _bytesAllocSinceCM += word_size * HeapWordSize;
+
+#ifdef ASSERT
+    if (ShenandoahTraceTLabs)
+      tty->print_cr("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
+#endif
+
+  }
+  return result;
+}
+
+ShenandoahHeap* ShenandoahHeap::heap() {
+  assert(_pgc != NULL, "Unitialized access to ShenandoahHeap::heap()");
+  assert(_pgc->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
+  return _pgc;
+}
+
+class VM_ShenandoahVerifyHeap: public VM_GC_Operation {
+public:
+  VM_ShenandoahVerifyHeap(unsigned int gc_count_before,
+                   unsigned int full_gc_count_before,
+                   GCCause::Cause cause)
+    : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { }
+  virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
+  virtual void doit() {
+    if (ShenandoahGCVerbose)
+      tty->print_cr("verifying heap");
+     Universe::heap()->ensure_parsability(false);
+     Universe::verify();
+  }
+  virtual const char* name() const {
+    return "Shenandoah verify trigger";
+  }
+};
+
+class FindEmptyRegionClosure: public ShenandoahHeapRegionClosure {
+  ShenandoahHeapRegion* _result;
+  size_t _required_size;
+public:
+
+  FindEmptyRegionClosure(size_t required_size) : _required_size(required_size) {
+    _result = NULL;
+  }
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if ((! r->is_in_collection_set()) && r->free() >= _required_size) {
+      _result = r;
+      return true;
+    }
+    return false;
+  }
+  ShenandoahHeapRegion* result() { return _result;}
+
+};
+
+HeapWord* ShenandoahHeap::allocate_memory(size_t word_size) {
+  HeapWord* result = NULL;
+  result = allocate_memory_with_lock(word_size);
+
+  if (result == NULL && ! Thread::current()->is_evacuating()) { // Allocation failed, try full-GC, then retry allocation.
+    // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize);
+    // _free_regions->print();
+    collect(GCCause::_allocation_failure);
+    result = allocate_memory_with_lock(word_size);
+  }
+
+  return result;
+}
+
+HeapWord* ShenandoahHeap::allocate_memory_with_lock(size_t word_size) {
+  return allocate_memory_shenandoah_lock(word_size);
+}
+
+HeapWord* ShenandoahHeap::allocate_memory_heap_lock(size_t word_size) {
+  ShouldNotReachHere();
+  MutexLocker ml(Heap_lock);
+  return allocate_memory_work(word_size);
+}
+
+HeapWord* ShenandoahHeap::allocate_memory_shenandoah_lock(size_t word_size) {
+  MutexLockerEx ml(ShenandoahHeap_lock, true);
+  return allocate_memory_work(word_size);
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::check_skip_humongous(ShenandoahHeapRegion* region) const {
+  while (region != NULL && region->is_humongous()) {
+    region = _free_regions->get_next();
+  }
+  return region;
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::get_next_region_skip_humongous() const {
+  ShenandoahHeapRegion* next = _free_regions->get_next();
+  return check_skip_humongous(next);
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::get_current_region_skip_humongous() const {
+  ShenandoahHeapRegion* current = _free_regions->current();
+  return check_skip_humongous(current);
+}
+
+
+ShenandoahHeapRegion* ShenandoahHeap::check_grow_heap(ShenandoahHeapRegion* current) {
+  if (current == NULL) {
+    if (grow_heap_by()) {
+      current = _free_regions->get_next();
+      assert(current != NULL, "After successfully growing the heap we should have a region");
+      assert(! current->is_humongous(), "new region must not be humongous");
+    } else {
+      current = NULL; // No more room to make a new region. OOM.
+    }
+  }
+  return current;
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::get_current_region() {
+  ShenandoahHeapRegion* current = get_current_region_skip_humongous();
+  return check_grow_heap(current);
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::get_next_region() {
+  ShenandoahHeapRegion* current = get_next_region_skip_humongous();
+  return check_grow_heap(current);
+}
+
+
+HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
+
+  if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
+    assert(! Thread::current()->is_evacuating(), "no humongous allocation for evacuating thread");
+    return allocate_large_memory(word_size);
+  }
+
+  ShenandoahHeapRegion* my_current_region = get_current_region();
+  if (my_current_region == NULL) {
+    return NULL; // No more room to make a new region. OOM.
+  }
+  assert(my_current_region != NULL, "should have a region at this point");
+
+#ifdef ASSERT
+  if (my_current_region->is_in_collection_set()) {
+    print_heap_regions();
+  }
+#endif
+  assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
+  assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
+
+  HeapWord* result;
+
+  result = my_current_region->par_allocate(word_size);
+  while (result == NULL && my_current_region != NULL) {
+    // 2nd attempt. Try next region.
+    size_t remaining = my_current_region->free();
+    my_current_region = get_next_region();
+    if (my_current_region == NULL) {
+      return NULL; // No more room to make a new region. OOM.
+    }
+    _free_regions->decrease_available(remaining);
+    assert(my_current_region != NULL, "should have a region at this point");
+    assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
+    assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
+    result = my_current_region->par_allocate(word_size);
+  }
+
+  if (result != NULL) {
+    my_current_region->increase_live_data(word_size * HeapWordSize);
+    increase_used(word_size * HeapWordSize);
+    _free_regions->decrease_available(word_size * HeapWordSize);
+  }
+  return result;
+}
+
+HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
+  if (ShenandoahTraceHumongous) {
+    gclog_or_tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB", (words * HeapWordSize) / K);
+  }
+
+  uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
+
+  assert(required_regions <= _max_regions, "sanity check");
+
+  HeapWord* result;
+  ShenandoahHeapRegion* free_regions[required_regions];
+
+  bool success = find_contiguous_free_regions(required_regions, free_regions);
+  if (! success) {
+    success = allocate_contiguous_free_regions(required_regions, free_regions);
+  }
+  if (! success) {
+    result = NULL; // Throw OOM, we cannot allocate the huge object.
+  } else {
+    // Initialize huge object flags in the regions.
+    size_t live = words * HeapWordSize;
+    free_regions[0]->set_humongous_start(true);
+    free_regions[0]->increase_live_data(live);
+
+    for (uint i = 0; i < required_regions; i++) {
+      if (i == 0) {
+        free_regions[0]->set_humongous_start(true);
+      } else {
+        free_regions[i]->set_humongous_continuation(true);
+      }
+      free_regions[i]->set_top(free_regions[i]->end());
+      increase_used(ShenandoahHeapRegion::RegionSizeBytes);
+    }
+    _free_regions->decrease_available(ShenandoahHeapRegion::RegionSizeBytes * required_regions);
+    result = free_regions[0]->bottom();
+  }
+  return result;
+}
+
+bool ShenandoahHeap::find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
+  if (ShenandoahTraceHumongous) {
+    gclog_or_tty->print_cr("trying to find "UINT32_FORMAT" contiguous free regions", num_free_regions);
+  }
+  uint free_regions_index = 0;
+  for (uint regions_index = 0; regions_index < _num_regions; regions_index++) {
+    // Claim a free region.
+    ShenandoahHeapRegion* region = _ordered_regions[regions_index];
+    bool free = false;
+    if (region != NULL) {
+      if (region->free() == ShenandoahHeapRegion::RegionSizeBytes) {
+        assert(! region->is_humongous(), "don't reuse occupied humongous regions");
+        free = true;
+      }
+    }
+    if (! free) {
+      // Not contiguous, reset search
+      free_regions_index = 0;
+      continue;
+    }
+    assert(free_regions_index < num_free_regions, "array bounds");
+    free_regions[free_regions_index] = region;
+    free_regions_index++;
+
+    if (free_regions_index == num_free_regions) {
+      if (ShenandoahTraceHumongous) {
+        gclog_or_tty->print_cr("found "UINT32_FORMAT" contiguous free regions:", num_free_regions);
+        for (uint i = 0; i < num_free_regions; i++) {
+          gclog_or_tty->print(UINT32_FORMAT": " , i);
+          free_regions[i]->print_on(gclog_or_tty);
+        }
+      }
+      return true;
+    }
+
+  }
+  if (ShenandoahTraceHumongous) {
+    gclog_or_tty->print_cr("failed to find "UINT32_FORMAT" free regions", num_free_regions);
+  }
+  return false;
+}
+
+bool ShenandoahHeap::allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
+  // We need to be smart here to avoid interleaved allocation of regions when concurrently
+  // allocating for large objects. We get the new index into regions array using CAS, where can
+  // subsequently safely allocate new regions.
+  int new_regions_index = ensure_new_regions(num_free_regions);
+  if (new_regions_index == -1) {
+    return false;
+  }
+
+  int last_new_region = new_regions_index + num_free_regions;
+
+  // Now we can allocate new regions at the found index without being scared that
+  // other threads allocate in the same contiguous region.
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("allocate contiguous regions:");
+  }
+  for (int i = new_regions_index; i < last_new_region; i++) {
+    ShenandoahHeapRegion* region = new ShenandoahHeapRegion();
+    HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * i;
+    region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, i);
+    _ordered_regions[i] = region;
+    uint index = i - new_regions_index;
+    assert(index < num_free_regions, "array bounds");
+    free_regions[index] = region;
+
+    if (ShenandoahGCVerbose) {
+      region->print();
+    }
+  }
+  return true;
+}
+
+HeapWord* ShenandoahHeap::mem_allocate_locked(size_t size,
+					      bool* gc_overhead_limit_was_exceeded) {
+
+  // This was used for allocation while holding the Heap_lock.
+  // HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
+
+  HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
+  HeapWord* result = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  if (filler != NULL) {
+    initialize_brooks_ptr(filler, result);
+    _bytesAllocSinceCM += size * HeapWordSize;
+#ifdef ASSERT
+    if (ShenandoahTraceAllocations) {
+      if (*gc_overhead_limit_was_exceeded)
+	tty->print("gc_overhead_limit_was_exceeded");
+      tty->print_cr("mem_allocate_locked object of size "SIZE_FORMAT" uat addr "PTR_FORMAT, size, p2i(result));
+    }
+#endif
+
+    assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region");
+    if (_concurrent_mark_in_progress || _evacuation_in_progress) {
+      mark_current_no_checks(oop(result));
+    }
+
+    return result;
+  } else {
+    tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytesAllocSinceCM);
+    {
+      MutexLockerEx ml(ShenandoahHeap_lock, true);
+      print_heap_regions();
+      tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->length());
+      _free_regions->print();
+    }
+    assert(false, "Out of memory");
+    return NULL;
+  }
+}
+
+class PrintOopContents: public OopClosure {
+public:
+  void do_oop(oop* o) {
+    oop obj = *o;
+    tty->print_cr("References oop "PTR_FORMAT, p2i((HeapWord*) obj));
+    obj->print();
+  }
+
+  void do_oop(narrowOop* o) {
+    assert(false, "narrowOops aren't implemented");
+  }
+};
+
+HeapWord*  ShenandoahHeap::mem_allocate(size_t size, 
+					bool*  gc_overhead_limit_was_exceeded) {
+
+#ifdef ASSERT
+  if (ShenandoahVerify && _numAllocs > 1000000) {
+    _numAllocs = 0;
+  //   VM_ShenandoahVerifyHeap op(0, 0, GCCause::_allocation_failure);
+  //   if (Thread::current()->is_VM_thread()) {
+  //     op.doit();
+  //   } else {
+  //     // ...and get the VM thread to execute it.
+  //     VMThread::execute(&op);
+  //   }
+  }
+  _numAllocs++;
+#endif
+
+  // MutexLockerEx ml(ShenandoahHeap_lock, true);
+  HeapWord* result = mem_allocate_locked(size, gc_overhead_limit_was_exceeded);
+  return result;
+}
+
+class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
+private:
+  ShenandoahHeap* _heap;
+  Thread* _thread;
+  public:
+  ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
+    _heap(heap), _thread(Thread::current()) { 
+  }
+
+  void do_object(oop p) {
+
+#ifdef ASSERT
+    if (ShenandoahTraceEvacuations) {
+      tty->print_cr("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT, p2i((HeapWord*) p));
+    }
+#endif
+
+    if (_heap->is_marked_current(p) && p == ShenandoahBarrierSet::resolve_oop_static_not_null(p)) {
+      _heap->evacuate_object(p, _thread);
+    }
+  }
+};
+      
+//fixme
+void ShenandoahHeap::initialize_brooks_ptr(HeapWord* filler, HeapWord* obj, bool new_obj) {
+  BrooksPointer brooks_ptr = BrooksPointer::get(oop(obj));
+  brooks_ptr.set_forwardee(oop(obj));
+}
+
+void ShenandoahHeap::initialize_brooks_ptr(oop p) {
+  BrooksPointer brooks_ptr = BrooksPointer::get(p);
+  brooks_ptr.set_forwardee(p);
+}
+
+class VerifyEvacuatedObjectClosure : public ObjectClosure {
+
+public:
+  
+  void do_object(oop p) {
+    if (ShenandoahHeap::heap()->is_marked_current(p)) {
+      oop p_prime = oopDesc::bs()->resolve_oop(p);
+      assert(p != p_prime, "Should point to evacuated copy");
+#ifdef ASSERT
+      if (p->klass() != p_prime->klass()) {
+	tty->print_cr("copy has different class than original:");
+	p->klass()->print_on(tty);
+	p_prime->klass()->print_on(tty);
+      }
+#endif
+      assert(p->klass() == p_prime->klass(), err_msg("Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)));
+      //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
+      assert(p->size() == p_prime->size(), "Should be the same size");
+      assert(p_prime == oopDesc::bs()->resolve_oop(p_prime), "One forward once");
+    }
+  }
+};
+
+void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
+  if (ShenandoahGCVerbose) {
+    tty->print("Verifying From Region\n");
+    from_region->print();
+  }
+
+  VerifyEvacuatedObjectClosure verify_evacuation;
+  from_region->object_iterate_interruptible(&verify_evacuation, false);
+}
+
+void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
+
+  assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier");
+
+  ParallelEvacuateRegionObjectClosure evacuate_region(this);
+  
+#ifdef ASSERT
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("parallel_evacuate_region starting from_region "INT32_FORMAT": free_regions = "SIZE_FORMAT,  from_region->region_number(), _free_regions->available_regions());
+  }
+#endif
+
+  marked_object_iterate(from_region, &evacuate_region);
+
+#ifdef ASSERT
+  if (ShenandoahVerify && ! cancelled_concgc()) {
+    verify_evacuated_region(from_region);
+  }
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("parallel_evacuate_region after from_region = "INT32_FORMAT": free_regions = "SIZE_FORMAT, from_region->region_number(), _free_regions->available_regions());
+  }
+#endif
+}
+
+class ParallelEvacuationTask : public AbstractGangTask {
+private:
+  ShenandoahHeap* _sh;
+  ShenandoahHeapRegionSet* _cs;
+  
+public:  
+  ParallelEvacuationTask(ShenandoahHeap* sh, 
+			 ShenandoahHeapRegionSet* cs) :
+    AbstractGangTask("Parallel Evacuation Task"), 
+    _cs(cs),
+    _sh(sh) {}
+  
+  void work(uint worker_id) {
+
+    ShenandoahHeapRegion* from_hr = _cs->claim_next();
+
+    while (from_hr != NULL) {
+      if (ShenandoahGCVerbose) {
+     	tty->print_cr("Thread "INT32_FORMAT" claimed Heap Region "INT32_FORMAT,
+     		   worker_id,
+     		   from_hr->region_number());
+	from_hr->print();
+      }
+
+      assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early");
+      _sh->parallel_evacuate_region(from_hr);
+
+      if (_sh->cancelled_concgc()) {
+	if (ShenandoahTracePhases) {
+	  tty->print_cr("Cancelled concurrent evacuation");
+	}
+        break;
+      }
+      from_hr = _cs->claim_next();
+    }
+
+    Thread::current()->gclab().make_parsable(true);
+  }
+};
+
+class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahHeap* _heap;
+  size_t _bytes_reclaimed;
+public:
+  RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+
+    // If evacuation has been cancelled, we can't recycle regions, we only
+    // clear their collection-set status.
+    if (_heap->cancelled_concgc()) {
+      r->set_is_in_collection_set(false);
+      return false;
+    }
+
+    if (r->is_in_collection_set()) {
+      //      tty->print_cr("recycling region "INT32_FORMAT":", r->region_number());
+      //      r->print_on(tty);
+      //      tty->print_cr(" ");
+      _heap->decrease_used(r->used());
+      _bytes_reclaimed += r->used();
+      r->recycle();
+      _heap->free_regions()->append(r);
+    }
+
+    return false;
+  }
+  size_t bytes_reclaimed() { return _bytes_reclaimed;}
+  void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
+};
+
+void ShenandoahHeap::recycle_dirty_regions() {
+  RecycleDirtyRegionsClosure cl;
+  cl.clear_bytes_reclaimed();
+
+  heap_region_iterate(&cl);
+
+  _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
+  clear_cset_fast_test();
+}
+
+ShenandoahHeapRegionSet* ShenandoahHeap::free_regions() {
+  return _free_regions;
+}
+
+void ShenandoahHeap::print_heap_regions(outputStream* st) const {
+  PrintHeapRegionsClosure pc1(st);
+  heap_region_iterate(&pc1);
+}
+
+class PrintAllRefsOopClosure: public ExtendedOopClosure {
+private:
+  int _index;
+  const char* _prefix;
+
+public:
+  PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
+
+  void do_oop(oop* p)       {
+    oop o = *p;
+    if (o != NULL) {
+      if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
+	tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass()));
+      } else {
+	//        tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set()));
+	tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o));
+      }
+    } else {
+      tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
+    }
+    _index++;
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+
+};
+
+class PrintAllRefsObjectClosure : public ObjectClosure {
+  const char* _prefix;
+
+public:
+  PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
+
+  void do_object(oop p) {
+    if (ShenandoahHeap::heap()->is_in(p)) {
+	tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass()));
+	PrintAllRefsOopClosure cl(_prefix);
+	p->oop_iterate(&cl);
+      }
+  }
+};
+
+void ShenandoahHeap::print_all_refs(const char* prefix) {
+  tty->print_cr("printing all references in the heap");
+  tty->print_cr("root references:");
+
+  ensure_parsability(false);
+
+  PrintAllRefsOopClosure cl(prefix);
+  roots_iterate(&cl);
+
+  tty->print_cr("heap references:");
+  PrintAllRefsObjectClosure cl2(prefix);
+  object_iterate(&cl2);
+}
+
+class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
+private:
+  ShenandoahHeap*  _heap;
+
+public:
+  VerifyAfterMarkingOopClosure() :
+    _heap(ShenandoahHeap::heap()) { }
+
+  void do_oop(oop* p)       {
+    oop o = *p;
+    if (o != NULL) {
+      if (! _heap->is_marked_current(o)) {
+	_heap->print_heap_regions();
+	_heap->print_all_refs("post-mark");
+	tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s", 
+		      p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_current(o)));
+	_heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
+
+        tty->print_cr("oop class: %s", o->klass()->internal_name());
+	if (_heap->is_in(p)) {
+	  oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
+	  tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
+	  referrer->print();
+	  _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
+	}
+        tty->print_cr("heap region containing object:");
+	_heap->heap_region_containing(o)->print();
+        tty->print_cr("heap region containing referrer:");
+	_heap->heap_region_containing(p)->print();
+        tty->print_cr("heap region containing forwardee:");
+	_heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->print();
+      }
+      assert(o->is_oop(), "oop must be an oop");
+      assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
+      if (! (o == oopDesc::bs()->resolve_oop(o))) {
+        tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o),  BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->resolve_oop(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->is_in_collection_set()));
+        tty->print_cr("oop class: %s", o->klass()->internal_name());
+      }
+      assert(o == oopDesc::bs()->resolve_oop(o), "oops must not be forwarded");
+      assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions");
+      assert(_heap->is_marked_current(o), "live oops must be marked current");
+    }
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+
+};
+
+class IterateMarkedCurrentObjectsClosure: public ObjectClosure {
+private:
+  ShenandoahHeap* _heap;
+  ExtendedOopClosure* _cl;
+public:
+  IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) :
+    _heap(ShenandoahHeap::heap()), _cl(cl) {};
+
+  void do_object(oop p) {
+    if (_heap->is_marked_current(p)) {
+      p->oop_iterate(_cl);
+    }
+  }
+
+};
+
+class IterateMarkedObjectsClosure: public ObjectClosure {
+private:
+  ShenandoahHeap* _heap;
+  ExtendedOopClosure* _cl;
+public:
+  IterateMarkedObjectsClosure(ExtendedOopClosure* cl) :
+    _heap(ShenandoahHeap::heap()), _cl(cl) {};
+
+  void do_object(oop p) {
+    if (_heap->is_marked_current(p)) {
+      p->oop_iterate(_cl);
+    }
+  }
+
+};
+
+void ShenandoahHeap::verify_heap_after_marking() {
+
+  verify_heap_size_consistency();
+
+  if (ShenandoahGCVerbose) {
+    tty->print("verifying heap after marking\n");
+  }
+  ensure_parsability(false);
+  VerifyAfterMarkingOopClosure cl;
+  roots_iterate(&cl);
+
+  IterateMarkedCurrentObjectsClosure marked_oops(&cl);
+  object_iterate(&marked_oops);
+}
+
+void ShenandoahHeap::prepare_for_concurrent_evacuation() {
+  if (!cancelled_concgc()) {
+
+    recycle_dirty_regions();
+
+      ensure_parsability(true);
+
+      // NOTE: This needs to be done during a stop the world pause, because
+      // putting regions into the collection set concurrently with Java threads
+      // will create a race. In particular, acmp could fail because when we
+      // resolve the first operand, the containing region might not yet be in
+      // the collection set, and thus return the original oop. When the 2nd
+      // operand gets resolved, the region could be in the collection set
+      // and the oop gets evacuated. If both operands have originally been
+      // the same, we get false negatives.
+      ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
+      regions.reclaim_humongous_regions();
+      _collection_set->clear();
+      _free_regions->clear();
+      _shenandoah_policy->choose_collection_and_free_sets(&regions, _collection_set, _free_regions);
+
+      if (PrintGCTimeStamps) {
+	gclog_or_tty->print("Collection set used = " SIZE_FORMAT " K live = " SIZE_FORMAT " K reclaimable = " SIZE_FORMAT " K\n",
+			    _collection_set->used() / K, _collection_set->live_data() / K, _collection_set->garbage() / K);
+      }
+
+      if (_collection_set->length() == 0)
+	cancel_concgc();
+  
+      _bytesAllocSinceCM = 0;
+
+      Universe::update_heap_info_at_gc();
+    }
+}
+    
+
+class ShenandoahUpdateRootsClosure: public ExtendedOopClosure {
+
+  void do_oop(oop* p)       {
+    ShenandoahHeap::heap()->maybe_update_oop_ref(p);
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+};
+
+void ShenandoahHeap::update_roots() {
+
+  COMPILER2_PRESENT(DerivedPointerTable::clear());
+
+  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
+
+  ShenandoahUpdateRootsClosure cl;
+  CodeBlobToOopClosure blobsCl(&cl, false);
+  CLDToOopClosure cldCl(&cl);
+
+  ClassLoaderDataGraph::clear_claimed_marks();
+
+  {
+    ShenandoahRootProcessor rp(this, 1);
+    rp.process_all_roots(&cl, &cldCl, &blobsCl);
+    ShenandoahIsAliveClosure is_alive;
+    JNIHandles::weak_oops_do(&is_alive, &cl);
+  }
+
+  COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+}
+
+class ShenandoahUpdateObjectsClosure : public ObjectClosure {
+  ShenandoahHeap* _heap;
+
+public:
+  ShenandoahUpdateObjectsClosure() :
+    _heap(ShenandoahHeap::heap()) {
+  }
+
+  void do_object(oop p) {
+    ShenandoahUpdateRootsClosure refs_cl;
+    assert(ShenandoahHeap::heap()->is_in(p), "only update objects in heap (where else?)");
+
+    if (_heap->is_marked_current(p)) {
+      p->oop_iterate(&refs_cl);
+    }
+  }
+
+};
+
+class ParallelUpdateRefsTask : public AbstractGangTask {
+private:
+  ShenandoahHeapRegionSet* _regions;
+
+public:
+  ParallelUpdateRefsTask(ShenandoahHeapRegionSet* regions) :
+    AbstractGangTask("Parallel Update References Task"), 
+  _regions(regions) {
+  }
+
+  void work(uint worker_id) {
+    ShenandoahUpdateObjectsClosure update_refs_cl;
+    ShenandoahHeapRegion* region = _regions->claim_next();
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    while (region != NULL && ! heap->cancelled_concgc()) {
+      if ((! region->is_in_collection_set()) && ! region->is_humongous_continuation()) {
+	heap->marked_object_iterate_careful(region, &update_refs_cl);
+      }
+      heap->reset_mark_bitmap_range(region->bottom(), region->end());
+      region = _regions->claim_next();
+    }
+    if (ShenandoahTracePhases && heap->cancelled_concgc()) {
+      tty->print_cr("Cancelled concurrent update references");
+    }
+  }
+};
+
+class RetireTLABClosure : public ThreadClosure {
+private:
+  bool _retire;
+
+public:
+  RetireTLABClosure(bool retire) : _retire(retire) {
+  }
+
+  void do_thread(Thread* thread) {
+    thread->gclab().make_parsable(_retire);
+  }
+};
+
+void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
+  CollectedHeap::ensure_parsability(retire_tlabs);
+
+  RetireTLABClosure cl(retire_tlabs);
+  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+    cl.do_thread(thread);
+  }
+  gc_threads_do(&cl);
+}
+
+void ShenandoahHeap::prepare_for_update_references() {
+  ensure_parsability(true);
+
+  ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
+  regions.set_concurrent_iteration_safe_limits();
+
+  if (ShenandoahVerifyReadsToFromSpace) {
+    set_from_region_protection(false);
+
+    // We need to update the roots so that they are ok for C2 when returning from the safepoint.
+    update_roots();
+
+    set_from_region_protection(true);
+
+  } else {
+    // We need to update the roots so that they are ok for C2 when returning from the safepoint.
+    update_roots();
+  }
+
+  set_update_references_in_progress(true);
+}
+
+void ShenandoahHeap::update_references() {
+
+  ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
+  ParallelUpdateRefsTask task = ParallelUpdateRefsTask(&regions);
+  conc_workers()->set_active_workers(_max_conc_workers);
+  _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_uprefs);
+  conc_workers()->run_task(&task);
+  _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_uprefs);
+  conc_workers()->set_active_workers(_max_conc_workers);
+
+  if (! cancelled_concgc()) {
+    VM_ShenandoahUpdateRootRefs update_roots;
+    if (ShenandoahConcurrentUpdateRefs) {
+      VMThread::execute(&update_roots);
+    } else {
+      update_roots.doit();
+    }
+
+    _allocated_last_gc = used() - _used_start_gc;
+    size_t max_allocated_gc = MAX2(_max_allocated_gc, _allocated_last_gc);
+    /*
+      tty->print_cr("prev max_allocated_gc: "SIZE_FORMAT", new max_allocated_gc: "SIZE_FORMAT", allocated_last_gc: "SIZE_FORMAT" diff %f", _max_allocated_gc, max_allocated_gc, _allocated_last_gc, ((double) max_allocated_gc/ (double) _allocated_last_gc));
+    */
+    _max_allocated_gc = max_allocated_gc;
+
+    // Update-references completed, no need to update-refs during marking.
+    set_need_update_refs(false);
+    set_need_reset_bitmaps(false);
+  }
+
+  Universe::update_heap_info_at_gc();
+
+  set_update_references_in_progress(false);
+}
+
+
+class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
+private:
+  ShenandoahHeap* _heap;
+  Thread* _thread;
+public:
+  ShenandoahEvacuateUpdateRootsClosure() :
+    _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
+  }
+
+  void do_oop(oop* p) {
+    assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
+
+    oop obj = oopDesc::load_heap_oop(p);
+    if (obj != NULL && _heap->in_cset_fast_test((HeapWord*) obj)) {
+      assert(_heap->is_marked_current(obj), err_msg("only evacuate marked objects %d %d", _heap->is_marked_current(obj), _heap->is_marked_current(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))));
+      oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
+      if (resolved == obj) {
+	resolved = _heap->evacuate_object(obj, _thread);
+      }
+      oopDesc::store_heap_oop(p, resolved);
+    }
+#ifdef ASSERT
+    else if (! oopDesc::is_null(obj)) {
+      // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj)));
+    }
+#endif
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+};
+
+class ShenandoahEvacuateUpdateStrongRootsTask : public AbstractGangTask {
+  ShenandoahRootProcessor* _rp;
+public:
+
+  ShenandoahEvacuateUpdateStrongRootsTask(ShenandoahRootProcessor* rp) :
+    AbstractGangTask("Shenandoah evacuate and update strong roots"),
+    _rp(rp)
+  {
+    // Nothing else to do.
+  }
+
+  void work(uint worker_id) {
+    ShenandoahEvacuateUpdateRootsClosure cl;
+    CodeBlobToOopClosure blobsCl(&cl, false);
+    CLDToOopClosure cldCl(&cl);
+
+    _rp->process_all_roots(&cl, &cldCl, &blobsCl);
+  }
+};
+
+class ShenandoahEvacuateUpdateWeakRootsTask : public AbstractGangTask {
+public:
+
+  ShenandoahEvacuateUpdateWeakRootsTask() : AbstractGangTask("Shenandoah evacuate and update weak roots") {
+    // Nothing else to do.
+  }
+
+  void work(uint worker_id) {
+    ShenandoahEvacuateUpdateRootsClosure cl;
+    ShenandoahIsAliveClosure is_alive;
+    JNIHandles::weak_oops_do(&is_alive, &cl);
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    if (ShenandoahProcessReferences) {
+      heap->ref_processor()->weak_oops_do(&cl);
+    }
+  }
+};
+
+void ShenandoahHeap::evacuate_and_update_roots() {
+
+  COMPILER2_PRESENT(DerivedPointerTable::clear());
+  
+  if (ShenandoahVerifyReadsToFromSpace) {
+    set_from_region_protection(false);
+  }
+
+  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
+  ClassLoaderDataGraph::clear_claimed_marks();
+
+  {
+    ShenandoahRootProcessor rp(this, _max_parallel_workers);
+    ShenandoahEvacuateUpdateStrongRootsTask strong_roots_task(&rp);
+    workers()->set_active_workers(_max_parallel_workers);
+    workers()->run_task(&strong_roots_task);
+  }
+
+  // We process weak roots using only 1 worker thread, multi-threaded weak roots
+  // processing is not implemented yet. We can't use the VMThread itself, because
+  // we need to grab the Heap_lock.
+  {
+    ShenandoahEvacuateUpdateWeakRootsTask weak_roots_task;
+    workers()->set_active_workers(1);
+    workers()->run_task(&weak_roots_task);
+    workers()->set_active_workers(_max_parallel_workers);
+  }
+
+  if (ShenandoahVerifyReadsToFromSpace) {
+    set_from_region_protection(true);
+  }
+
+  COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+
+}
+
+
+void ShenandoahHeap::do_evacuation() {
+  assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
+
+  parallel_evacuate();
+
+  if (! ShenandoahConcurrentEvacuation) {
+    // We need to make sure that after leaving the safepoint, all
+    // GC roots are up-to-date. This is an assumption built into
+    // the hotspot compilers, especially C2, that allows it to
+    // do optimizations like lifting barriers outside of a loop.
+
+    if (ShenandoahVerifyReadsToFromSpace) {
+      set_from_region_protection(false);
+
+      update_roots();
+
+      set_from_region_protection(true);
+
+    } else {
+      update_roots();
+    }
+  }
+
+  if (ShenandoahVerify && ! cancelled_concgc()) {
+    VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
+    if (Thread::current()->is_VM_thread()) {
+      verify_after_evacuation.doit();
+    } else {
+      VMThread::execute(&verify_after_evacuation);
+    }
+  }
+
+}
+
+void ShenandoahHeap::parallel_evacuate() {
+
+  if (! cancelled_concgc()) {
+    assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
+
+    if (ShenandoahGCVerbose) {
+      tty->print_cr("starting parallel_evacuate");
+      //    PrintHeapRegionsClosure pc1;
+      //    heap_region_iterate(&pc1);
+    }
+
+    _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
+
+    if (ShenandoahGCVerbose) {
+      tty->print("Printing all available regions");
+      print_heap_regions();
+    }
+
+    if (ShenandoahPrintCollectionSet) {
+      tty->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->length());
+      _collection_set->print();
+      
+      tty->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->length());
+      _free_regions->print();
+
+      //    if (_collection_set->length() == 0)
+      //      print_heap_regions();      
+    }
+
+    ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
+
+    conc_workers()->set_active_workers(_max_conc_workers);
+    conc_workers()->run_task(&evacuationTask);
+    //workers()->set_active_workers(_max_parallel_workers);
+
+    if (ShenandoahGCVerbose) {
+
+      tty->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->available_regions());
+      _collection_set->print();
+
+      tty->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", _free_regions->available_regions());
+      _free_regions->print();
+
+      tty->print_cr("finished parallel_evacuate");
+      print_heap_regions();
+
+      tty->print_cr("all regions after evacuation:");
+      print_heap_regions();
+    }
+
+    _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
+  }
+}
+
+class VerifyEvacuationClosure: public ExtendedOopClosure {
+private:
+  ShenandoahHeap*  _heap;
+  ShenandoahHeapRegion* _from_region;
+
+public:
+  VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
+    _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
+
+  void do_oop(oop* p)       {
+    oop heap_oop = oopDesc::load_heap_oop(p);
+    if (! oopDesc::is_null(heap_oop)) {
+      guarantee(! _from_region->is_in(heap_oop), err_msg("no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)));
+    }
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+
+};
+
+void ShenandoahHeap::roots_iterate(ExtendedOopClosure* cl) {
+
+  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
+
+  CodeBlobToOopClosure blobsCl(cl, false);
+  CLDToOopClosure cldCl(cl);
+
+  ClassLoaderDataGraph::clear_claimed_marks();
+
+  ShenandoahRootProcessor rp(this, 1);
+  rp.process_all_roots(cl, &cldCl, &blobsCl);
+}
+
+void ShenandoahHeap::weak_roots_iterate(ExtendedOopClosure* cl) {
+  if (ShenandoahProcessReferences) {
+    ref_processor()->weak_oops_do(cl);
+  }
+  ShenandoahAlwaysTrueClosure always_true;
+  JNIHandles::weak_oops_do(&always_true, cl);
+}
+
+void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
+
+  VerifyEvacuationClosure rootsCl(from_region);
+  roots_iterate(&rootsCl);
+
+}
+
+bool ShenandoahHeap::supports_tlab_allocation() const {
+  return true;
+}
+
+
+size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
+  ShenandoahHeapRegion* current = get_current_region_skip_humongous();
+  if (current == NULL) 
+    return 0;
+  else if (current->free() > MinTLABSize) {
+    return current->free();
+  } else {
+    return MinTLABSize;
+  }
+}
+
+size_t ShenandoahHeap::max_tlab_size() const {
+  return ShenandoahHeapRegion::RegionSizeBytes;
+}
+
+class ResizeGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    thread->gclab().resize();
+  }
+};
+
+void ShenandoahHeap::resize_all_tlabs() {
+  CollectedHeap::resize_all_tlabs();
+
+  if (PrintTLAB && Verbose) {
+    tty->print_cr("Resizing Shenandoah GCLABs...");
+  }
+
+  ResizeGCLABClosure cl;
+  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+    cl.do_thread(thread);
+  }
+  gc_threads_do(&cl);
+
+  if (PrintTLAB && Verbose) {
+    tty->print_cr("Done resizing Shenandoah GCLABs...");
+  }
+}
+
+class AccumulateStatisticsGCLABClosure : public ThreadClosure {
+public:
+  void do_thread(Thread* thread) {
+    thread->gclab().accumulate_statistics();
+    thread->gclab().initialize_statistics();
+  }
+};
+
+void ShenandoahHeap::accumulate_statistics_all_gclabs() {
+
+  AccumulateStatisticsGCLABClosure cl;
+  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+    cl.do_thread(thread);
+  }
+  gc_threads_do(&cl);
+}
+
+bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
+  return true;
+}
+
+oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
+  // Overridden to do nothing.
+  return new_obj;
+}
+
+bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  return true;
+}
+
+bool ShenandoahHeap::card_mark_must_follow_store() const {
+  return false;
+}
+
+bool ShenandoahHeap::supports_heap_inspection() const {
+  return false;
+}
+
+size_t ShenandoahHeap::unsafe_max_alloc() {
+  return ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
+}
+
+void ShenandoahHeap::collect(GCCause::Cause cause) {
+  if (GCCause::is_user_requested_gc(cause)) {
+    if (! DisableExplicitGC) {
+      if (ShenandoahTraceFullGC) {
+        gclog_or_tty->print_cr("Shenandoah-full-gc: requested full GC");
+      }
+      cancel_concgc();
+      _concurrent_gc_thread->do_full_gc(cause);
+    }
+  } else if (cause == GCCause::_allocation_failure) {
+
+    if (ShenandoahTraceFullGC) {
+      gclog_or_tty->print_cr("Shenandoah-full-gc: full GC for allocation failure heap free: "SIZE_FORMAT", available: "SIZE_FORMAT, capacity() - used(), free_regions()->available());
+    }
+    cancel_concgc();
+    collector_policy()->set_should_clear_all_soft_refs(true);
+      _concurrent_gc_thread->do_full_gc(cause);
+
+  } else if (cause == GCCause::_gc_locker) {
+
+    if (ShenandoahTraceJNICritical) {
+      gclog_or_tty->print_cr("Resuming deferred evacuation after JNI critical regions");
+    }
+
+    jni_critical()->notify_jni_critical();
+  }
+}
+
+void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
+  //assert(false, "Shouldn't need to do full collections");
+}
+
+AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
+  Unimplemented();
+  return NULL;
+  
+}
+
+ShenandoahCollectorPolicy* ShenandoahHeap::collector_policy() const {
+  return _shenandoah_policy;
+}
+
+
+HeapWord* ShenandoahHeap::block_start(const void* addr) const {
+  Space* sp = space_containing(addr);
+  if (sp != NULL) {
+    return sp->block_start(addr);
+  }
+  return NULL;
+}
+
+size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
+  Space* sp = space_containing(addr);
+  assert(sp != NULL, "block_size of address outside of heap");
+  return sp->block_size(addr);
+}
+
+bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
+  Space* sp = space_containing(addr);
+  return sp->block_is_obj(addr);
+}
+
+jlong ShenandoahHeap::millis_since_last_gc() {
+  return 0;
+}
+
+void ShenandoahHeap::prepare_for_verify() {
+  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
+    ensure_parsability(false);
+  }
+}
+
+void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
+  workers()->print_worker_threads_on(st);
+  conc_workers()->print_worker_threads_on(st);
+}
+
+void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
+  workers()->threads_do(tcl);
+  conc_workers()->threads_do(tcl);
+}
+
+void ShenandoahHeap::print_tracing_info() const {
+  if (PrintGCDetails) {
+    _shenandoah_policy->print_tracing_info();
+  }
+}
+
+class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
+private:
+  ShenandoahHeap*  _heap;
+  VerifyOption     _vo;
+  bool             _failures;
+public:
+  // _vo == UsePrevMarking -> use "prev" marking information,
+  // _vo == UseNextMarking -> use "next" marking information,
+  // _vo == UseMarkWord    -> use mark word from object header.
+  ShenandoahVerifyRootsClosure(VerifyOption vo) :
+    _heap(ShenandoahHeap::heap()),
+    _vo(vo),
+    _failures(false) { }
+
+  bool failures() { return _failures; }
+
+  void do_oop(oop* p)       {
+    if (*p != NULL) {
+      oop heap_oop = oopDesc::load_heap_oop(p);
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if (!obj->is_oop()) {
+        { // Just for debugging.
+	  gclog_or_tty->print_cr("Root location "PTR_FORMAT
+				 "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
+	  //	  obj->print_on(gclog_or_tty);
+        }
+      }
+      guarantee(obj->is_oop(), "is_oop");
+    }
+  }
+
+  void do_oop(narrowOop* p) {
+    Unimplemented();
+  }
+
+};
+
+class ShenandoahVerifyHeapClosure: public ObjectClosure {
+private:
+  ShenandoahVerifyRootsClosure _rootsCl;
+public:
+  ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
+    _rootsCl(rc) {};
+
+  void do_object(oop p) {
+    _rootsCl.do_oop(&p);
+  }
+};
+
+class ShenandoahVerifyKlassClosure: public KlassClosure {
+  OopClosure *_oop_closure;
+ public:
+  ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
+  void do_klass(Klass* k) {
+    k->oops_do(_oop_closure);
+  }
+};
+
+void ShenandoahHeap::verify(bool silent , VerifyOption vo) {
+  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
+
+    ShenandoahVerifyRootsClosure rootsCl(vo);
+
+    assert(Thread::current()->is_VM_thread(),
+	   "Expected to be executed serially by the VM thread at this point");
+
+    roots_iterate(&rootsCl);
+
+    bool failures = rootsCl.failures();
+    if (ShenandoahGCVerbose)
+      gclog_or_tty->print("verify failures: %s", BOOL_TO_STR(failures));
+
+    ShenandoahVerifyHeapClosure heapCl(rootsCl);
+
+    object_iterate(&heapCl);
+    // TODO: Implement rest of it.
+#ifdef ASSERT_DISABLED
+    verify_live();
+#endif
+  } else {
+    if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
+  }
+}
+
+size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
+  return _free_regions->available();
+}
+
+class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
+  ObjectClosure* _cl;
+public:
+  ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    r->object_iterate_interruptible(_cl, false);
+    return false;
+  }
+};
+
+class ShenandoahIterateUpdateClosure: public ShenandoahHeapRegionClosure {
+  ObjectClosure* _cl;
+public:
+  ShenandoahIterateUpdateClosure(ObjectClosure *cl) : _cl(cl) {}
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if ((! r->is_in_collection_set()) && !r->is_humongous_continuation()) {
+      r->object_iterate_interruptible(_cl, false);
+    }
+    return false;
+  }
+};
+
+void ShenandoahHeap::cleanup_after_cancelconcgc() {
+  if (need_update_refs()) {
+  ShenandoahUpdateObjectsClosure update_refs_cl;  
+  ShenandoahIterateUpdateClosure blk(&update_refs_cl);
+  heap_region_iterate(&blk, false, false);
+  }
+}
+
+class ShenandoahIterateObjectClosureCarefulRegionClosure: public ShenandoahHeapRegionClosure {
+  ObjectClosureCareful* _cl;
+public:
+  ShenandoahIterateObjectClosureCarefulRegionClosure(ObjectClosureCareful* cl) : _cl(cl) {}
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    r->object_iterate_careful(_cl);
+    return false;
+  }
+};
+
+void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
+  ShenandoahIterateObjectClosureRegionClosure blk(cl);
+  heap_region_iterate(&blk, false, true);
+}
+
+void ShenandoahHeap::object_iterate_careful(ObjectClosureCareful* cl) {
+  ShenandoahIterateObjectClosureCarefulRegionClosure blk(cl);
+  heap_region_iterate(&blk, false, true);
+}
+
+void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
+  Unimplemented();
+}
+
+void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) {
+  marked_object_iterate(region, cl, region->bottom(), region->top());
+}
+
+void ShenandoahHeap::marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl) {
+  marked_object_iterate(region, cl, region->bottom(), region->concurrent_iteration_safe_limit());
+}
+
+void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl,
+					   HeapWord* addr, HeapWord* limit) {
+  addr += BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  HeapWord* last_addr = NULL;
+  size_t last_size = 0;
+  while (addr < limit) {
+    addr = _next_mark_bit_map->getNextMarkedWordAddress(addr, limit);
+    if (addr < limit) {
+      oop obj = oop(addr);
+      assert(is_marked_current(obj), "object expected to be marked");
+      cl->do_object(obj);
+      last_addr = addr;
+      last_size = obj->size();
+      addr += obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+    } else {
+      break;
+    }
+  }
+}
+
+class ShenandoahIterateOopClosureRegionClosure : public ShenandoahHeapRegionClosure {
+  MemRegion _mr;
+  ExtendedOopClosure* _cl;
+  bool _skip_unreachable_objects;
+public:
+  ShenandoahIterateOopClosureRegionClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
+    _cl(cl), _skip_unreachable_objects(skip_unreachable_objects) {}
+  ShenandoahIterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl) 
+    :_mr(mr), _cl(cl) {}
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    r->oop_iterate_skip_unreachable(_cl, _skip_unreachable_objects);
+    return false;
+  }
+};
+
+void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions, bool skip_unreachable_objects) {
+  ShenandoahIterateOopClosureRegionClosure blk(cl, skip_unreachable_objects);
+  heap_region_iterate(&blk, skip_dirty_regions, true);
+}
+
+void ShenandoahHeap::oop_iterate(MemRegion mr, 
+				 ExtendedOopClosure* cl) {
+  ShenandoahIterateOopClosureRegionClosure blk(mr, cl);
+  heap_region_iterate(&blk, false, true);
+}
+
+void  ShenandoahHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
+  Unimplemented();
+}
+
+class SpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
+  SpaceClosure* _cl;
+public:
+  SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    _cl->do_space(r);
+    return false;
+  }
+};
+
+void  ShenandoahHeap::space_iterate(SpaceClosure* cl) {
+  SpaceClosureRegionClosure blk(cl);
+  heap_region_iterate(&blk);
+}
+
+ShenandoahHeapRegion*
+ShenandoahHeap::heap_region_containing(const void* addr) const {
+  uint index = heap_region_index_containing(addr);
+  ShenandoahHeapRegion* result = _ordered_regions[index];
+#ifdef ASSERT
+  if (!(addr >= result->bottom() && addr < result->end())) {
+    tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions[0]->bottom()), _num_regions);
+  }
+#endif
+  assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
+  return result;
+}
+
+Space*  ShenandoahHeap::space_containing(const void* oop) const {
+  Space* res = heap_region_containing(oop);
+  return res;
+}
+
+void  ShenandoahHeap::gc_prologue(bool b) {
+  Unimplemented();
+}
+
+void  ShenandoahHeap::gc_epilogue(bool b) {
+  Unimplemented();
+}
+
+// Apply blk->doHeapRegion() on all committed regions in address order,
+// terminating the iteration early if doHeapRegion() returns true.
+void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
+  for (size_t i = 0; i < _num_regions; i++) {
+    ShenandoahHeapRegion* current  = _ordered_regions[i];
+    if (skip_humongous_continuation && current->is_humongous_continuation()) {
+      continue;
+    }
+    if (skip_dirty_regions && current->is_in_collection_set()) {
+      continue;
+    }
+    if (blk->doHeapRegion(current)) { 
+      return;
+    }
+  }
+}
+
+/**
+ * Maybe we need that at some point...
+oop* ShenandoahHeap::resolve_oop_ptr(oop* p) {
+  if (is_in(p) && heap_region_containing(p)->is_dirty()) {
+    // If the reference is in an object in from-space, we need to first
+    // find its to-space counterpart.
+    // TODO: This here is slow (linear search inside region). Make it faster.
+    oop from_space_oop = oop_containing_oop_ptr(p);
+    HeapWord* to_space_obj = (HeapWord*) oopDesc::bs()->resolve_oop(from_space_oop);
+    return (oop*) (to_space_obj + ((HeapWord*) p - ((HeapWord*) from_space_oop)));
+  } else {
+    return p;
+  }
+}
+
+oop ShenandoahHeap::oop_containing_oop_ptr(oop* p) {
+  HeapWord* from_space_ref = (HeapWord*) p;
+  ShenandoahHeapRegion* region = heap_region_containing(from_space_ref);
+  HeapWord* from_space_obj = NULL;
+  for (HeapWord* curr = region->bottom(); curr < from_space_ref; ) {
+    oop curr_obj = (oop) curr;
+    if (curr < from_space_ref && from_space_ref < (curr + curr_obj->size())) {
+      from_space_obj = curr;
+      break;
+    } else {
+      curr += curr_obj->size();
+    }
+  }
+  assert (from_space_obj != NULL, "must not happen");
+  oop from_space_oop = (oop) from_space_obj;
+  assert (from_space_oop->is_oop(), "must be oop");
+  assert(ShenandoahBarrierSet::is_brooks_ptr(oop(((HeapWord*) from_space_oop) - BrooksPointer::BROOKS_POINTER_OBJ_SIZE)), "oop must have a brooks ptr");
+  return from_space_oop;
+}
+ */
+
+class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
+  ShenandoahHeap* sh;
+public:
+  ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
+  
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    r->clearLiveData();
+    return false;
+  }
+};
+
+
+void ShenandoahHeap::start_concurrent_marking() {
+
+  set_concurrent_mark_in_progress(true);
+  // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
+  if (UseTLAB) {
+    accumulate_statistics_all_tlabs();
+    ensure_parsability(true);
+  }
+
+  _shenandoah_policy->record_bytes_allocated(_bytesAllocSinceCM);
+  _used_start_gc = used();
+
+#ifdef ASSERT
+  if (ShenandoahDumpHeapBeforeConcurrentMark) {
+    ensure_parsability(false);
+    print_all_refs("pre-mark");
+  }
+#endif
+  
+  ClearLivenessClosure clc(this);
+  heap_region_iterate(&clc);
+
+  // print_all_refs("pre -mark");
+
+  // oopDesc::_debug = true;
+
+  concurrentMark()->prepare_unmarked_root_objs();
+
+  //  print_all_refs("pre-mark2");
+}
+
+
+class VerifyLivenessClosure : public ExtendedOopClosure {
+
+  ShenandoahHeap* _sh;
+
+public:
+  VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {}
+
+  template<class T> void do_oop_nv(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
+                err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s",
+                        BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
+                        BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)))
+                );
+      obj = oopDesc::bs()->resolve_oop(obj);
+      guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
+      guarantee(obj->is_oop(), "is_oop");
+      ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+      if (! sh->is_marked_current(obj)) {
+        sh->print_on(tty);
+      }
+      assert(sh->is_marked_current(obj), err_msg("Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s", 
+                                               p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))));
+    }
+  }
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+};
+
+void ShenandoahHeap::verify_live() {
+
+  VerifyLivenessClosure cl;
+  roots_iterate(&cl);
+
+  IterateMarkedObjectsClosure marked_oops(&cl);
+  object_iterate(&marked_oops);
+
+}
+
+class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
+
+  ShenandoahHeap* _sh;
+
+public:
+  VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
+
+  template<class T> void do_oop_nv(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
+                err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
+                        BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
+                        BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)))
+                );
+      obj = oopDesc::bs()->resolve_oop(obj);
+      guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
+      guarantee(obj->is_oop(), "is_oop");
+      guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
+    }
+  }
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+};
+
+class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure {
+
+  ShenandoahHeap* _sh;
+
+public:
+  VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {}
+
+  template<class T> void do_oop_nv(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()),
+                err_msg("no live reference must point to from-space, is_marked: %s",
+                        BOOL_TO_STR(_sh->is_marked_current(obj))));
+      if (obj != oopDesc::bs()->resolve_oop(obj) && _sh->is_in(p)) {
+        tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p));
+      }
+      guarantee(obj == oopDesc::bs()->resolve_oop(obj), "no live reference must point to forwarded object");
+      guarantee(obj->is_oop(), "is_oop");
+      guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
+    }
+  }
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+};
+
+void ShenandoahHeap::verify_heap_after_evacuation() {
+
+  verify_heap_size_consistency();
+
+  ensure_parsability(false);
+
+  VerifyAfterEvacuationClosure cl;
+  roots_iterate(&cl);
+
+  IterateMarkedCurrentObjectsClosure marked_oops(&cl);
+  object_iterate(&marked_oops);
+
+}
+
+class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
+public:
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    assert(! r->is_in_collection_set(), "no region must be in collection set");
+    assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set");
+    return false;
+  }
+};
+
+void ShenandoahHeap::verify_regions_after_update_refs() {
+  VerifyRegionsAfterUpdateRefsClosure verify_regions;
+  heap_region_iterate(&verify_regions);
+}
+
+void ShenandoahHeap::verify_heap_after_update_refs() {
+
+  verify_heap_size_consistency();
+
+  ensure_parsability(false);
+
+  VerifyAfterUpdateRefsClosure cl;
+
+  roots_iterate(&cl);
+  weak_roots_iterate(&cl);
+  oop_iterate(&cl, true, true);
+
+}
+
+void ShenandoahHeap::accumulate_statistics_all_tlabs() {
+  CollectedHeap::accumulate_statistics_all_tlabs();
+}
+
+void ShenandoahHeap::stop_concurrent_marking() {
+  assert(concurrent_mark_in_progress(), "How else could we get here?");
+  if (! cancelled_concgc()) {
+    // If we needed to update refs, and concurrent marking has been cancelled,
+    // we need to finish updating references.
+    set_need_update_refs(false);
+  }
+  set_concurrent_mark_in_progress(false);
+
+
+  if (UseTLAB) {
+    shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::resize_tlabs);
+    accumulate_statistics_all_tlabs();
+    resize_all_tlabs();
+    shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::resize_tlabs);
+  }
+
+  if (ShenandoahGCVerbose) {
+    print_heap_regions();
+  }
+
+#ifdef ASSERT
+  if (ShenandoahVerify && ! _cancelled_concgc) {
+    verify_heap_after_marking();
+  }
+
+#endif
+}
+
+bool ShenandoahHeap::concurrent_mark_in_progress() {
+  return _concurrent_mark_in_progress;
+}
+
+void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
+  if (ShenandoahTracePhases) {
+    if (in_progress) {
+      gclog_or_tty->print_cr("Shenandoah starting concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
+    } else {
+      gclog_or_tty->print_cr("Shenandoah finishing concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
+    }
+  }
+
+  _concurrent_mark_in_progress = in_progress;
+  JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress);
+}
+
+void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
+  if (ShenandoahTracePhases) {
+    if (ShenandoahConcurrentEvacuation) {
+      if (in_progress) {
+        gclog_or_tty->print_cr("Shenandoah starting concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
+      } else {
+        gclog_or_tty->print_cr("Shenandoah finishing concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
+      }
+    } else {
+      if (in_progress) {
+        gclog_or_tty->print_cr("Shenandoah starting non-concurrent evacuation");
+      } else {
+        gclog_or_tty->print_cr("Shenandoah finishing non-concurrent evacuation");
+      }
+    }
+  }
+  JavaThread::set_evacuation_in_progress_all_threads(in_progress);
+  _evacuation_in_progress = in_progress;
+  OrderAccess::fence();
+}
+
+bool ShenandoahHeap::is_evacuation_in_progress() {
+  return _evacuation_in_progress;
+}
+
+bool ShenandoahHeap::is_update_references_in_progress() {
+  return _update_references_in_progress;
+}
+
+void ShenandoahHeap::set_update_references_in_progress(bool update_refs_in_progress) {
+  if (ShenandoahTracePhases) {
+    if (ShenandoahConcurrentUpdateRefs) {
+      if (update_refs_in_progress) {
+        gclog_or_tty->print_cr("Shenandoah starting concurrent reference-updating");
+      } else {
+        gclog_or_tty->print_cr("Shenandoah finishing concurrent reference-updating");
+      }
+    } else {
+      if (update_refs_in_progress) {
+        gclog_or_tty->print_cr("Shenandoah starting non-concurrent reference-updating");
+      } else {
+        gclog_or_tty->print_cr("Shenandoah finishing non-concurrent reference-updating");
+      }
+    }
+  }
+  _update_references_in_progress = update_refs_in_progress;
+}
+
+void ShenandoahHeap::post_allocation_collector_specific_setup(HeapWord* hw) {
+  oop obj = oop(hw);
+
+  // Assuming for now that objects can't be created already locked
+  assert(! obj->has_displaced_mark(), "hopefully new objects don't have displaced mark");
+  // tty->print_cr("post_allocation_collector_specific_setup:: "PTR_FORMAT, p2i(obj));
+
+  if (_concurrent_mark_in_progress || _evacuation_in_progress) {
+    mark_current_no_checks(obj);
+  }
+}
+
+void ShenandoahHeap::verify_copy(oop p,oop c){
+    assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly");
+    assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct");
+    if (p->klass() != c->klass()) {
+      print_heap_regions();
+    }
+    assert(p->klass() == c->klass(), err_msg("verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()));
+    assert(p->size() == c->size(), "verify size");
+    // Object may have been locked between copy and verification
+    //    assert(p->mark() == c->mark(), "verify mark");
+    assert(c == oopDesc::bs()->resolve_oop(c), "verify only forwarded once");
+  }
+
+void ShenandoahHeap::oom_during_evacuation() {
+  // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC");
+  // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
+  collector_policy()->set_should_clear_all_soft_refs(true);
+  concurrent_thread()->schedule_full_gc();
+  cancel_concgc();
+
+  if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
+    tty->print_cr("OOM during evacuation. Let Java thread wait until evacuation settlded..");
+    while (! conc_workers()->is_idle()) { // wait.
+      Thread::current()->_ParkEvent->park(1) ;
+    }
+  }
+
+}
+
+void ShenandoahHeap::copy_object(oop p, HeapWord* s) {
+  HeapWord* filler = s;
+  assert(s != NULL, "allocation of brooks pointer must not fail");
+  HeapWord* copy = s + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+
+  guarantee(copy != NULL, "allocation of copy object must not fail");
+  Copy::aligned_disjoint_words((HeapWord*) p, copy, p->size());
+  initialize_brooks_ptr(filler, copy);
+
+#ifdef ASSERT
+  if (ShenandoahTraceEvacuations) {
+    tty->print_cr("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
+  }
+#endif
+}
+
+oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
+  ShenandoahHeapRegion* hr;
+  size_t required;
+
+#ifdef ASSERT
+  if (ShenandoahVerifyReadsToFromSpace) {
+    hr = heap_region_containing(p);
+    {
+      hr->memProtectionOff();    
+      required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
+      hr->memProtectionOn();    
+    }
+  } else {
+    required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
+  }
+#else
+    required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
+#endif
+
+  assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
+
+  // Don't even attempt to evacuate anything if evacuation has been cancelled.
+  if (_cancelled_concgc) {
+    return ShenandoahBarrierSet::resolve_oop_static(p);
+  }
+
+  bool alloc_from_gclab = true;
+  thread->set_evacuating(true);
+  HeapWord* filler = allocate_from_gclab(thread, required);
+  if (filler == NULL) {
+    filler = allocate_memory(required);
+    alloc_from_gclab = false;
+  }
+  thread->set_evacuating(false);
+
+  if (filler == NULL) {
+    oom_during_evacuation();
+    // If this is a Java thread, it should have waited
+    // until all GC threads are done, and then we
+    // return the forwardee.
+    oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
+    return resolved;
+  }
+
+  HeapWord* copy = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  
+#ifdef ASSERT
+  if (ShenandoahVerifyReadsToFromSpace) {
+    hr->memProtectionOff();
+    copy_object(p, filler);
+    hr->memProtectionOn();
+  } else {
+    copy_object(p, filler);    
+  }
+#else 
+    copy_object(p, filler);    
+#endif
+
+  HeapWord* result = BrooksPointer::get(p).cas_forwardee((HeapWord*) p, copy);
+
+  oop return_val;
+  if (result == (HeapWord*) p) {
+    return_val = oop(copy);
+
+    mark_current(return_val);
+
+#ifdef ASSERT
+    if (ShenandoahTraceEvacuations) {
+      tty->print("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
+    }
+    assert(return_val->is_oop(), "expect oop");
+    assert(p->klass() == return_val->klass(), err_msg("Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy)));
+#endif
+  }  else {
+    if (alloc_from_gclab) {
+      thread->gclab().rollback(required);
+    }
+#ifdef ASSERT
+    if (ShenandoahTraceEvacuations) {
+      tty->print_cr("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
+    }
+#endif
+    return_val = (oopDesc*) result;
+  }
+
+  return return_val;
+}
+
+HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj, bool new_obj) {
+  HeapWord* result = obj + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  initialize_brooks_ptr(obj, result, new_obj);
+  return result;
+}
+
+uint ShenandoahHeap::oop_extra_words() {
+  return BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+}
+
+bool ShenandoahHeap::grow_heap_by() {
+  int new_region_index = ensure_new_regions(1);
+  if (new_region_index != -1) {
+    ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
+    HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
+    new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
+    if (ShenandoahGCVerbose) {
+      tty->print_cr("allocating new region at index: "INT32_FORMAT, new_region_index);
+      new_region->print();
+    }
+    _ordered_regions[new_region_index] = new_region;
+    _free_regions->append(new_region);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+int ShenandoahHeap::ensure_new_regions(int new_regions) {
+
+  size_t num_regions = _num_regions;
+  size_t new_num_regions = num_regions + new_regions;
+  if (new_num_regions >= _max_regions) {
+    // Not enough regions left.
+    return -1;
+  }
+
+  size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("expanding storage by "SIZE_FORMAT_HEX" bytes, for "INT32_FORMAT" new regions", expand_size, new_regions);
+  }
+  bool success = _storage.expand_by(expand_size);
+  assert(success, "should always be able to expand by requested size");
+
+  _num_regions = new_num_regions;
+
+  return num_regions;
+
+}
+
+#ifndef CC_INTERP
+void ShenandoahHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) {
+  __ incrementq(obj, BrooksPointer::BROOKS_POINTER_OBJ_SIZE * HeapWordSize);
+  __ movptr(Address(obj, -1 * HeapWordSize), obj);
+}
+#endif
+
+bool  ShenandoahIsAliveClosure:: do_object_b(oop obj) { 
+
+  ShenandoahHeap* sh = ShenandoahHeap::heap();
+  if (sh->need_update_refs()) {
+    obj = ShenandoahBarrierSet::resolve_oop_static(obj);
+  }
+
+#ifdef ASSERT
+  if (obj != ShenandoahBarrierSet::resolve_oop_static(obj)) {
+    ShenandoahHeap* sh = ShenandoahHeap::heap();
+  }
+#endif
+  assert(obj == ShenandoahBarrierSet::resolve_oop_static(obj), "needs to be in to-space");
+
+    HeapWord* addr = (HeapWord*) obj;
+
+    if (ShenandoahTraceWeakReferences) {
+
+      if (addr != NULL) {
+	if(sh->is_in(addr)) {
+	  if (sh->is_obj_ill(obj)) {
+	    HandleMark hm;
+	    tty->print_cr("ShenandoahIsAliveClosure Found an ill object "PTR_FORMAT, p2i((HeapWord*) obj));
+	    obj->print();
+	  }
+	  else 
+	    tty->print_cr("found a healthy object "PTR_FORMAT, p2i((HeapWord*) obj));
+
+	} else {
+	  tty->print_cr("found an object outside the heap "PTR_FORMAT, p2i((HeapWord*) obj));
+	}
+      } else {
+	tty->print_cr("found a null object "PTR_FORMAT, p2i((HeapWord*) obj));
+      }
+    }
+
+    return addr != NULL && sh->is_marked_current(obj); //(!sh->is_in(addr) || !sh->is_obj_ill(obj));
+}
+
+void ShenandoahHeap::ref_processing_init() {
+  MemRegion mr = reserved_region();
+
+  // Concurrent Mark ref processor
+//   _ref_processor =
+//     new ReferenceProcessor(mr,    // span
+//                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
+//                                 // mt processing
+//                            (int) ParallelGCThreads,
+//                                 // degree of mt processing
+//                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
+//                                 // mt discovery
+//                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
+//                                 // degree of mt discovery
+//                            false,
+//                                 // Reference discovery is not atomic
+// 			   &isAlive);
+//                                 // is alive closure
+//                                 // (for efficiency/performance)
+  _ref_processor =
+    new ReferenceProcessor(mr,    // span
+			   ParallelRefProcEnabled && (ConcGCThreads > 1),
+			   // mt processing
+                           (int) ConcGCThreads,
+			   // degree of mt processing
+			   (ConcGCThreads > 1),
+			   // mt discovery
+			   (int) ConcGCThreads,
+			   // degree of mt discovery
+			   false,
+			   // Reference discovery is not atomic
+ 			   &isAlive);
+  // is alive closure
+  // (for efficiency/performance)
+
+
+
+}
+
+#ifdef ASSERT
+void ShenandoahHeap::set_from_region_protection(bool protect) {
+  for (uint i = 0; i < _num_regions; i++) {
+    ShenandoahHeapRegion* region = _ordered_regions[i];
+    if (region != NULL && region->is_in_collection_set()) {
+      if (protect) {
+        region->memProtectionOn();
+      } else {
+        region->memProtectionOff();
+      }
+    }
+  }
+}
+#endif
+
+void ShenandoahHeap::acquire_pending_refs_lock() {
+  _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
+}
+
+void ShenandoahHeap::release_pending_refs_lock() {
+  _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
+}
+
+ShenandoahHeapRegion** ShenandoahHeap::heap_regions() {
+  return _ordered_regions;
+}
+
+size_t ShenandoahHeap::num_regions() {
+  return _num_regions;
+}
+
+size_t ShenandoahHeap::max_regions() {
+  return _max_regions;
+}
+
+GCTracer* ShenandoahHeap::tracer() {
+  return collector_policy()->tracer();
+}
+
+size_t ShenandoahHeap::tlab_used(Thread* thread) const {
+  return _free_regions->used();
+}
+
+void ShenandoahHeap::cancel_concgc() {
+  // only report it once
+  if (!_cancelled_concgc) {
+    if (ShenandoahTracePhases) {
+      tty->print_cr("Cancelling GC");
+    }
+    _cancelled_concgc = true;
+    OrderAccess::fence();
+    _shenandoah_policy->report_concgc_cancelled();
+  }
+  
+  if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
+    while (! conc_workers()->is_idle()) { // wait.
+      Thread::current()->_ParkEvent->park(1) ;
+    }
+  }
+}
+
+bool ShenandoahHeap::cancelled_concgc() {
+  bool cancelled = _cancelled_concgc;
+  return cancelled;
+}
+
+void ShenandoahHeap::clear_cancelled_concgc() {
+  _cancelled_concgc = false;
+}
+
+int ShenandoahHeap::max_workers() {
+  return _max_workers;
+}
+
+int ShenandoahHeap::max_parallel_workers() {
+  return _max_parallel_workers;
+}
+int ShenandoahHeap::max_conc_workers() {
+  return _max_conc_workers;
+}
+
+void ShenandoahHeap::shutdown() {
+  // We set this early here, to let GC threads terminate before we ask the concurrent thread
+  // to terminate, which would otherwise block until all GC threads come to finish normally.
+  _cancelled_concgc = true;
+  _concurrent_gc_thread->shutdown();
+  cancel_concgc();
+}
+
+class ShenandoahStringSymbolTableUnlinkTask : public AbstractGangTask {
+private:
+  BoolObjectClosure* _is_alive;
+  int _initial_string_table_size;
+  int _initial_symbol_table_size;
+
+  bool  _process_strings;
+  int _strings_processed;
+  int _strings_removed;
+
+  bool  _process_symbols;
+  int _symbols_processed;
+  int _symbols_removed;
+
+public:
+  ShenandoahStringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
+    AbstractGangTask("String/Symbol Unlinking"),
+    _is_alive(is_alive),
+    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
+    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
+
+    _initial_string_table_size = StringTable::the_table()->table_size();
+    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
+    if (process_strings) {
+      StringTable::clear_parallel_claimed_index();
+    }
+    if (process_symbols) {
+      SymbolTable::clear_parallel_claimed_index();
+    }
+  }
+
+  ~ShenandoahStringSymbolTableUnlinkTask() {
+    guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
+              err_msg("claim value %d after unlink less than initial string table size %d",
+                      StringTable::parallel_claimed_index(), _initial_string_table_size));
+    guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
+              err_msg("claim value %d after unlink less than initial symbol table size %d",
+                      SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
+
+    if (G1TraceStringSymbolTableScrubbing) {
+      gclog_or_tty->print_cr("Cleaned string and symbol table, "
+                             "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+                             "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+                             strings_processed(), strings_removed(),
+                             symbols_processed(), symbols_removed());
+    }
+  }
+
+  void work(uint worker_id) {
+    int strings_processed = 0;
+    int strings_removed = 0;
+    int symbols_processed = 0;
+    int symbols_removed = 0;
+    if (_process_strings) {
+      StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
+      Atomic::add(strings_processed, &_strings_processed);
+      Atomic::add(strings_removed, &_strings_removed);
+    }
+    if (_process_symbols) {
+      SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
+      Atomic::add(symbols_processed, &_symbols_processed);
+      Atomic::add(symbols_removed, &_symbols_removed);
+    }
+  }
+
+  size_t strings_processed() const { return (size_t)_strings_processed; }
+  size_t strings_removed()   const { return (size_t)_strings_removed; }
+
+  size_t symbols_processed() const { return (size_t)_symbols_processed; }
+  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
+};
+
+void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
+
+  workers()->set_active_workers(_max_parallel_workers);
+  ShenandoahStringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
+  workers()->run_task(&shenandoah_unlink_task);
+
+  //  if (G1StringDedup::is_enabled()) {
+  //    G1StringDedup::unlink(is_alive);
+  //  }
+}
+
+bool ShenandoahHeap::is_obj_ill(const oop obj) const {
+  return ! is_marked_current(obj);
+}
+
+void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
+  _need_update_refs = need_update_refs;
+}
+
+void ShenandoahHeap::set_need_reset_bitmaps(bool need_reset_bitmaps) {
+  _need_reset_bitmaps = need_reset_bitmaps;
+}
+
+bool ShenandoahHeap::need_reset_bitmaps() const {
+  return _need_reset_bitmaps;
+}
+
+ShenandoahJNICritical* ShenandoahHeap::jni_critical() {
+  return _jni_critical;
+}
+
+ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
+  HeapWord* next_addr = r->bottom() + ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
+  ShenandoahHeapRegion* next = heap_region_containing(next_addr);
+  if (next->is_humongous()) {
+    return next_compaction_region(next);
+  } else {
+    return next;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,452 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
+
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahConcurrentThread.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+
+#include "gc/shared/cmBitMap.hpp"
+#include "gc/g1/heapRegionBounds.inline.hpp"
+
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/space.hpp"
+#include "oops/oop.hpp"
+#include "oops/markOop.hpp"
+
+
+class SpaceClosure;
+class GCTracer;
+
+class ShenandoahJNICritical;
+
+class ShenandoahJNICritical;
+
+class ShenandoahAlwaysTrueClosure : public BoolObjectClosure {
+public:
+  bool do_object_b(oop p) { return true; }
+};
+
+
+class ShenandoahIsAliveClosure: public BoolObjectClosure {
+
+public:
+  bool do_object_b(oop obj);
+};
+
+
+class ShenandoahHeapRegionClosure : public StackObj {
+  bool _complete;
+  void incomplete() {_complete = false;}
+
+public:
+  ShenandoahHeapRegionClosure(): _complete(true) {}
+
+  // typically called on each region until it returns true;
+  virtual bool doHeapRegion(ShenandoahHeapRegion* r) = 0;
+
+  bool complete() { return _complete;}
+};
+
+// A "ShenandoahHeap" is an implementation of a java heap for HotSpot.
+// It uses a new pauseless GC algorithm based on Brooks pointers.
+// Derived from G1
+
+// 
+// CollectedHeap  
+//    SharedHeap
+//      ShenandoahHeap
+
+class ShenandoahHeap : public CollectedHeap {
+
+private:
+
+  static ShenandoahHeap* _pgc;
+  ShenandoahCollectorPolicy* _shenandoah_policy;
+  VirtualSpace _storage;
+  ShenandoahHeapRegion* _first_region;
+  HeapWord* _first_region_bottom;
+  // Ordered array of regions  (name confusing with _regions)
+  ShenandoahHeapRegion** _ordered_regions;
+
+  // Sortable array of regions
+  ShenandoahHeapRegionSet* _free_regions;
+  ShenandoahHeapRegionSet* _collection_set;
+  ShenandoahHeapRegion* _currentAllocationRegion;
+  ShenandoahConcurrentMark* _scm;
+
+
+
+  ShenandoahConcurrentThread* _concurrent_gc_thread;
+
+  size_t _num_regions;
+  size_t _max_regions;
+  size_t _initialSize;
+#ifndef NDEBUG
+  uint _numAllocs;
+#endif
+  WorkGangBarrierSync barrierSync;
+  int _max_parallel_workers;
+  int _max_conc_workers;
+  int _max_workers;
+
+  FlexibleWorkGang* _conc_workers;
+  FlexibleWorkGang* _workers;
+
+
+  volatile size_t _used;
+
+  CMBitMap _mark_bit_map;
+  CMBitMap* _next_mark_bit_map;
+
+  bool* _in_cset_fast_test;
+  bool* _in_cset_fast_test_base;
+  uint _in_cset_fast_test_length;
+
+  bool _cancelled_concgc;
+
+  ShenandoahJNICritical* _jni_critical;
+
+public:
+  size_t _bytesAllocSinceCM;
+  size_t _bytes_allocated_during_cm;
+  size_t _bytes_allocated_during_cm_start;
+  size_t _max_allocated_gc;
+  size_t _allocated_last_gc;
+  size_t _used_start_gc;
+
+public:
+  ShenandoahHeap(ShenandoahCollectorPolicy* policy);
+  HeapWord* allocate_from_gclab(Thread* thread, size_t size);
+  HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
+  HeapWord* allocate_new_tlab(size_t word_size);
+  HeapWord* allocate_new_gclab(size_t word_size);
+private:
+  HeapWord* allocate_new_tlab(size_t word_size, bool mark);
+public:
+  HeapWord* allocate_memory(size_t word_size);
+
+  bool find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
+  bool allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
+
+  // For now we are ignoring eden.
+  inline bool should_alloc_in_eden(size_t size) { return false;}
+  void print_on(outputStream* st) const ;
+
+  ShenandoahHeap::Name kind() const {
+    return CollectedHeap::ShenandoahHeap;
+  }
+  
+  static ShenandoahHeap* heap();
+
+  ShenandoahCollectorPolicy *shenandoahPolicy() { return _shenandoah_policy;}
+
+  jint initialize();
+  static size_t conservative_max_heap_alignment() {
+    return HeapRegionBounds::max_size();
+  }
+
+  void post_initialize();
+  size_t capacity() const;
+  size_t used() const;
+  bool is_maximal_no_gc() const;
+  size_t max_capacity() const;
+  virtual bool is_in(const void* p) const;
+  bool is_in_partial_collection(const void* p);
+  bool is_scavengable(const void* addr);
+  virtual HeapWord* mem_allocate(size_t size, bool* what);
+  HeapWord* mem_allocate_locked(size_t size, bool* what);
+  virtual size_t unsafe_max_alloc();
+  bool can_elide_tlab_store_barriers() const;
+  virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
+  bool can_elide_initializing_store_barrier(oop new_obj);
+  bool card_mark_must_follow_store() const;
+  bool supports_heap_inspection() const;
+  void collect(GCCause::Cause);
+  void do_full_collection(bool clear_all_soft_refs);
+  AdaptiveSizePolicy* size_policy();
+  ShenandoahCollectorPolicy* collector_policy() const;
+
+  void ensure_parsability(bool retire_tlabs);
+
+  void add_free_region(ShenandoahHeapRegion* r) {_free_regions->append(r);}
+  void clear_free_regions() {_free_regions->clear();}
+
+  void oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions,
+                   bool skip_unreachable_objects);
+  void oop_iterate(ExtendedOopClosure* cl) {
+    oop_iterate(cl, false, false);
+  }
+
+  void roots_iterate(ExtendedOopClosure* cl);
+  void weak_roots_iterate(ExtendedOopClosure* cl);
+  
+  void object_iterate(ObjectClosure* cl);
+  void object_iterate_careful(ObjectClosureCareful* cl);
+  void object_iterate_no_from_space(ObjectClosure* cl);
+  void safe_object_iterate(ObjectClosure* cl);
+
+  void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl);
+  void marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl);
+private:
+  void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl, HeapWord* start, HeapWord* limit);
+
+public:
+  HeapWord* block_start(const void* addr) const;
+  size_t block_size(const HeapWord* addr) const;
+  bool block_is_obj(const HeapWord* addr) const;
+  jlong millis_since_last_gc();
+  void prepare_for_verify();
+  void print_gc_threads_on(outputStream* st) const;
+  void gc_threads_do(ThreadClosure* tcl) const;
+  void print_tracing_info() const;
+  void verify(bool silent,  VerifyOption vo);
+  bool supports_tlab_allocation() const;
+  virtual size_t tlab_capacity(Thread *thr) const;
+  void oop_iterate(MemRegion mr, ExtendedOopClosure* ecl);
+  void object_iterate_since_last_GC(ObjectClosure* cl);
+  void space_iterate(SpaceClosure* scl);
+  virtual size_t unsafe_max_tlab_alloc(Thread *thread) const;
+  virtual size_t max_tlab_size() const;
+
+  void resize_all_tlabs();
+  void accumulate_statistics_all_tlabs();
+  void accumulate_statistics_all_gclabs();
+
+  HeapWord* tlab_post_allocation_setup(HeapWord* obj, bool new_obj);
+
+  uint oop_extra_words();
+
+#ifndef CC_INTERP
+  void compile_prepare_oop(MacroAssembler* masm, Register obj = rax);
+#endif
+
+  Space* space_containing(const void* oop) const;
+  void gc_prologue(bool b);
+  void gc_epilogue(bool b);
+
+  void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions = false, bool skip_humongous_continuation = false) const;
+  ShenandoahHeapRegion* heap_region_containing(const void* addr) const;  
+  inline uint heap_region_index_containing(const void* addr) const;  
+
+/**
+ * Maybe we need that at some point...
+
+  oop* resolve_oop_ptr(oop* p);
+
+  oop oop_containing_oop_ptr(oop* p);
+
+*/
+
+  void temp();
+
+  volatile unsigned int _concurrent_mark_in_progress;
+
+  volatile unsigned int _evacuation_in_progress;
+  volatile bool _update_references_in_progress;
+  bool _need_update_refs;
+  bool _need_reset_bitmaps;
+
+  void start_concurrent_marking();
+  void stop_concurrent_marking();
+  ShenandoahConcurrentMark* concurrentMark() { return _scm;}
+  ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; }
+
+  ShenandoahJNICritical* jni_critical();
+
+  size_t bump_object_age(HeapWord* start, HeapWord* end);
+
+  inline bool mark_current(oop obj) const;
+  inline bool mark_current_no_checks(oop obj) const;
+  inline bool is_marked_current(oop obj) const;
+  
+  ReferenceProcessor* _ref_processor;
+  bool is_marked_prev(oop obj) const;
+
+  bool is_obj_ill(const oop obj) const;
+
+  void reset_mark_bitmap();
+  void reset_mark_bitmap_range(HeapWord* from, HeapWord* to);
+
+  bool is_bitmap_clear();
+
+  virtual void post_allocation_collector_specific_setup(HeapWord* obj);
+
+  void mark_object_live(oop obj, bool enqueue);
+
+  void prepare_for_concurrent_evacuation();
+  void do_evacuation();
+  void parallel_evacuate();
+
+  void initialize_brooks_ptr(HeapWord* brooks_ptr, HeapWord* object, bool new_obj = true);
+  void initialize_brooks_ptr(oop p);
+
+  inline oop maybe_update_oop_ref(oop* p);
+  void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region);
+  void parallel_evacuate_region(ShenandoahHeapRegion* from_region);
+  void verify_evacuated_region(ShenandoahHeapRegion* from_region);
+
+  void print_heap_regions(outputStream* st = tty) const;
+
+  void print_all_refs(const char* prefix);
+
+  void print_heap_objects(HeapWord* start, HeapWord* end);
+  void print_heap_locations(HeapWord* start, HeapWord* end);
+  void print_heap_object(oop p);
+
+  oop  evacuate_object(oop src, Thread* thread);
+  bool is_in_collection_set(const void* p) {
+    return heap_region_containing(p)->is_in_collection_set();
+  }
+  
+  void copy_object(oop p, HeapWord* s);
+  void verify_copy(oop p, oop c);
+  //  void assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy);
+  void verify_heap_size_consistency();
+  void verify_heap_after_marking();
+  void verify_heap_after_evacuation();
+  void verify_heap_after_update_refs();
+  void verify_regions_after_update_refs();
+
+  static ByteSize ordered_regions_offset() { return byte_offset_of(ShenandoahHeap, _ordered_regions); }
+  static ByteSize first_region_bottom_offset() { return byte_offset_of(ShenandoahHeap, _first_region_bottom); }
+
+  // Where the first object may be placed.
+  HeapWord* start_of_heap() { return _first_region_bottom + 1;}
+  void cleanup_after_cancelconcgc();
+  void increase_used(size_t bytes);
+  void decrease_used(size_t bytes);
+  void set_used(size_t bytes);
+
+  int ensure_new_regions(int num_new_regions);
+
+  void set_evacuation_in_progress(bool in_progress);
+  bool is_evacuation_in_progress();
+
+  bool is_update_references_in_progress();
+  void set_update_references_in_progress(bool update_refs_in_progress);
+
+  inline bool need_update_refs() const;
+  void set_need_update_refs(bool update_refs);
+
+  bool need_reset_bitmaps() const;
+  void set_need_reset_bitmaps(bool need_reset_bitmaps);
+
+  ReferenceProcessor* ref_processor() { return _ref_processor;}	
+  virtual void ref_processing_init();
+  ShenandoahIsAliveClosure isAlive;
+  void evacuate_and_update_roots();
+  void prepare_for_update_references();
+
+  void update_references();
+
+  ShenandoahHeapRegionSet* free_regions();
+
+  void update_roots();
+
+  void acquire_pending_refs_lock();
+  void release_pending_refs_lock();
+
+  int max_workers();
+  int max_conc_workers();
+  int max_parallel_workers();
+  FlexibleWorkGang* conc_workers() const{ return _conc_workers;}
+  FlexibleWorkGang* workers() const{ return _workers;}
+
+  ShenandoahHeapRegion** heap_regions();
+  size_t num_regions();
+  size_t max_regions();
+
+  ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r);
+
+  void recycle_dirty_regions();
+
+  void register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) {
+    assert(_in_cset_fast_test_base != NULL, "sanity");
+    assert(r->is_in_collection_set(), "invariant");
+    uint index = r->region_number();
+    assert(index < _in_cset_fast_test_length, "invariant");
+    assert(!_in_cset_fast_test_base[index], "invariant");
+    _in_cset_fast_test_base[index] = true;
+  }
+  bool in_cset_fast_test(HeapWord* obj) {
+    assert(_in_cset_fast_test != NULL, "sanity");
+    if (is_in(obj)) {
+      // no need to subtract the bottom of the heap from obj,
+      // _in_cset_fast_test is biased
+      uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
+      bool ret = _in_cset_fast_test[index];
+      // let's make sure the result is consistent with what the slower
+      // test returns
+      assert( ret || !is_in_collection_set(obj), "sanity");
+      assert(!ret ||  is_in_collection_set(obj), "sanity");
+      return ret;
+    } else {
+      return false;
+    }
+  }
+
+  static address in_cset_fast_test_addr() {
+    return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
+  }
+
+  void clear_cset_fast_test() {
+    assert(_in_cset_fast_test_base != NULL, "sanity");
+    memset(_in_cset_fast_test_base, false,
+           (size_t) _in_cset_fast_test_length * sizeof(bool));
+  }
+
+  GCTracer* tracer();
+  ShenandoahHeapRegionSet* collection_set() { return _collection_set; }
+  size_t tlab_used(Thread* ignored) const;
+
+private:
+
+  bool grow_heap_by();
+
+  void verify_evacuation(ShenandoahHeapRegion* from_region);
+  void set_concurrent_mark_in_progress(bool in_progress);
+
+  void oom_during_evacuation();
+  void cancel_concgc();
+public:
+  bool cancelled_concgc();
+  void clear_cancelled_concgc();
+
+  void shutdown();
+
+  bool concurrent_mark_in_progress();
+  size_t calculateUsed();
+  size_t calculateFree();
+
+private:
+  void verify_live();
+  void verify_liveness_after_concurrent_mark();
+
+  HeapWord* allocate_memory_with_lock(size_t word_size);
+  HeapWord* allocate_memory_heap_lock(size_t word_size);
+  HeapWord* allocate_memory_shenandoah_lock(size_t word_size);
+  HeapWord* allocate_memory_work(size_t word_size);
+  HeapWord* allocate_large_memory(size_t word_size);
+  ShenandoahHeapRegion* check_skip_humongous(ShenandoahHeapRegion* region) const;
+  ShenandoahHeapRegion* get_next_region_skip_humongous() const;
+  ShenandoahHeapRegion* get_current_region_skip_humongous() const;
+  ShenandoahHeapRegion* check_grow_heap(ShenandoahHeapRegion* current);
+  ShenandoahHeapRegion* get_next_region();
+  ShenandoahHeapRegion* get_current_region();
+
+  void set_from_region_protection(bool protect);
+
+public:
+  // Delete entries for dead interned string and clean up unreferenced symbols
+  // in symbol table, possibly in parallel.
+  void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
+  
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,102 @@
+/*
+Copyright 2015 Red Hat, Inc. and/or its affiliates.
+ */
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
+
+#include "gc/shared/cmBitMap.inline.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "runtime/atomic.inline.hpp"
+
+/*
+ * Marks the object. Returns true if the object has not been marked before and has
+ * been marked by this thread. Returns false if the object has already been marked,
+ * or if a competing thread succeeded in marking this object.
+ */
+inline bool ShenandoahHeap::mark_current(oop obj) const {
+#ifdef ASSERT
+  if (obj != oopDesc::bs()->resolve_oop(obj)) {
+    tty->print_cr("heap region containing obj:");
+    ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
+    obj_region->print();
+    tty->print_cr("heap region containing forwardee:");
+    ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->resolve_oop(obj));
+    forward_region->print();    
+  }
+#endif
+
+  assert(obj == oopDesc::bs()->resolve_oop(obj), "only mark forwarded copy of objects");
+  return mark_current_no_checks(obj);
+}
+
+inline bool ShenandoahHeap::mark_current_no_checks(oop obj) const {
+  return _next_mark_bit_map->parMark((HeapWord*) obj);
+}
+
+inline bool ShenandoahHeap::is_marked_current(oop obj) const {
+  return _next_mark_bit_map->isMarked((HeapWord*) obj);
+}
+
+inline bool ShenandoahHeap::need_update_refs() const {
+  return _need_update_refs;
+}
+
+inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
+  uintptr_t region_start = ((uintptr_t) addr); // & ~(ShenandoahHeapRegion::RegionSizeBytes - 1);
+  uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
+#ifdef ASSERT
+  if (!(index < _num_regions)) {
+    tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions[0]->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
+  }
+#endif
+  assert(index < _num_regions, "heap region index must be in range");
+  return index;
+}
+
+oop ShenandoahHeap::maybe_update_oop_ref(oop* p) {
+
+  assert((! is_in(p)) || (! heap_region_containing(p)->is_in_collection_set()),
+         "never update refs in from-space, unless evacuation has been cancelled"); 
+
+  oop heap_oop = oopDesc::load_heap_oop(p); // read p
+  if (! oopDesc::is_null(heap_oop)) {
+
+#ifdef ASSERT
+    if (! is_in(heap_oop)) {
+      print_heap_regions();
+      tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
+      assert(is_in(heap_oop), "object must be in heap");
+    }
+#endif
+    assert(is_in(heap_oop), "only ever call this on objects in the heap");
+    assert((! (is_in(p) && heap_region_containing(p)->is_in_collection_set())), "we don't want to update references in from-space");
+    oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
+    if (forwarded_oop != heap_oop) {
+      // tty->print_cr("updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
+      assert(forwarded_oop->is_oop(), "oop required");
+      assert(is_in(forwarded_oop), "forwardee must be in heap");
+      assert(! heap_region_containing(forwarded_oop)->is_in_collection_set(), "forwardee must not be in collection set");
+      // If this fails, another thread wrote to p before us, it will be logged in SATB and the
+      // reference be updated later.
+      oop result = (oop) Atomic::cmpxchg_ptr(forwarded_oop, p, heap_oop);
+
+      if (result == heap_oop) { // CAS successful.
+	  return forwarded_oop;
+      } else {
+	return result;
+      }
+    } else {
+      return forwarded_oop;
+    }
+    /*
+      else {
+      tty->print_cr("not updating ref: "PTR_FORMAT, p2i(heap_oop));
+      }
+    */
+  }
+  return NULL;
+}
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,339 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "memory/allocation.hpp"
+#include "gc/g1/heapRegionBounds.inline.hpp"
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "memory/universe.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+
+size_t ShenandoahHeapRegion::RegionSizeShift = 0;
+size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
+
+jint ShenandoahHeapRegion::initialize_heap_region(HeapWord* start, 
+						  size_t regionSizeWords, int index) {
+
+  reserved = MemRegion((HeapWord*) start, regionSizeWords);
+  ContiguousSpace::initialize(reserved, true, false);
+  liveData = 0;
+  _is_in_collection_set = false;
+  _region_number = index;
+#ifdef ASSERT
+  _mem_protection_level = 1; // Off, level 1.
+#endif
+  return JNI_OK;
+}
+
+int ShenandoahHeapRegion::region_number() {
+  return _region_number;
+}
+
+bool ShenandoahHeapRegion::rollback_allocation(uint size) {
+  set_top(top() - size);
+  return true;
+}
+
+void ShenandoahHeapRegion::clearLiveData() {
+  setLiveData(0);
+}
+
+void ShenandoahHeapRegion::setLiveData(size_t s) {
+  Atomic::store_ptr(s, (intptr_t*) &liveData);
+}
+
+void ShenandoahHeapRegion::increase_live_data(size_t s) {
+  size_t new_live_data = Atomic::add(s, &liveData);
+  assert(new_live_data <= used() || is_humongous(), "can't have more live data than used");
+}
+
+size_t ShenandoahHeapRegion::getLiveData() const {
+  return liveData;
+}
+
+size_t ShenandoahHeapRegion::garbage() const {
+  assert(used() >= getLiveData() || is_humongous(), err_msg("Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, getLiveData(), used()));
+  size_t result = used() - getLiveData();
+  return result;
+}
+
+bool ShenandoahHeapRegion::is_in_collection_set() const {
+  return _is_in_collection_set;
+}
+
+#include <sys/mman.h>
+
+#ifdef ASSERT
+
+void ShenandoahHeapRegion::memProtectionOn() {
+  /*
+  tty->print_cr("protect memory on region level: "INT32_FORMAT, _mem_protection_level);
+  print(tty);
+  */
+  MutexLockerEx ml(ShenandoahMemProtect_lock, true);
+  assert(_mem_protection_level >= 1, "invariant");
+
+  if (--_mem_protection_level == 0) {
+    if (ShenandoahVerifyWritesToFromSpace) {
+      assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
+      os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
+    } else {
+      assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
+      assert(! ShenandoahConcurrentEvacuation, "concurrent evacuation needs to be turned off for verifying from-space-reads");
+      os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
+    }
+  }
+}
+
+void ShenandoahHeapRegion::memProtectionOff() {
+  /*
+  tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
+  print(tty);
+  */
+  MutexLockerEx ml(ShenandoahMemProtect_lock, true);
+  assert(_mem_protection_level >= 0, "invariant");
+  if (_mem_protection_level++ == 0) {
+    os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
+  }
+}
+
+#endif
+
+void ShenandoahHeapRegion::set_is_in_collection_set(bool b) {
+  assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
+
+  _is_in_collection_set = b;
+
+  if (b) {
+    // tty->print_cr("registering region in fast-cset");
+    // print();
+    ShenandoahHeap::heap()->register_region_with_in_cset_fast_test(this);
+  }
+
+#ifdef ASSERT
+  if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {    
+    if (b) {
+      memProtectionOn();
+      assert(_mem_protection_level == 0, "need to be protected here");
+    } else {
+      assert(_mem_protection_level == 0, "need to be protected here");
+      memProtectionOff();
+    }
+  }
+#endif
+}
+
+ByteSize ShenandoahHeapRegion::is_in_collection_set_offset() {
+  return byte_offset_of(ShenandoahHeapRegion, _is_in_collection_set);
+}
+
+void ShenandoahHeapRegion::print_on(outputStream* st) const {
+  st->print_cr("ShenandoahHeapRegion: "PTR_FORMAT"/"INT32_FORMAT, p2i(this), _region_number);
+
+  if (is_in_collection_set())
+    st->print("C");
+  if (is_humongous_start()) {
+    st->print("H");
+  }
+  if (is_humongous_continuation()) {
+    st->print("h");
+  }
+  //else
+    st->print(" ");
+
+  st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
+               getLiveData(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
+}
+
+
+class SkipUnreachableObjectToOopClosure: public ObjectClosure {
+  ExtendedOopClosure* _cl;
+  bool _skip_unreachable_objects;
+  ShenandoahHeap* _heap;
+
+public:
+  SkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
+    _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
+  
+  void do_object(oop obj) {
+    
+    if ((! _skip_unreachable_objects) || _heap->is_marked_current(obj)) {
+      if (_skip_unreachable_objects) {
+        assert(_heap->is_marked_current(obj), "obj must be live");
+      }
+      obj->oop_iterate(_cl);
+    }
+    
+  }
+};
+
+void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
+  HeapWord* p = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  while (p < top() && !(allow_cancel && heap->cancelled_concgc())) {
+    blk->do_object(oop(p));
+#ifdef ASSERT
+    if (ShenandoahVerifyReadsToFromSpace) {
+      memProtectionOff();
+      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+      memProtectionOn();
+    } else {
+      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+    }
+#else
+      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+#endif
+  }
+}
+
+HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
+  HeapWord * limit = concurrent_iteration_safe_limit();
+  assert(limit <= top(), "sanity check");
+  for (HeapWord* p = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; p < limit;) {
+    size_t size = blk->do_object_careful(oop(p));
+    if (size == 0) {
+      return p;  // failed at p
+    } else {
+      p += size + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+    }
+  }
+  return NULL; // all done
+}
+
+void ShenandoahHeapRegion::oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects) {
+  SkipUnreachableObjectToOopClosure cl2(cl, skip_unreachable_objects);
+  object_iterate_interruptible(&cl2, false);
+}
+
+void ShenandoahHeapRegion::fill_region() {
+  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+  
+  if (free() > (BrooksPointer::BROOKS_POINTER_OBJ_SIZE + CollectedHeap::min_fill_size())) {
+    HeapWord* filler = allocate(BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+    HeapWord* obj = allocate(end() - top());
+    sh->fill_with_object(obj, end() - obj);
+    sh->initialize_brooks_ptr(filler, obj);
+  } 
+}
+
+void ShenandoahHeapRegion::set_humongous_start(bool start) {
+  _humongous_start = start;
+}
+
+void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
+  _humongous_continuation = continuation;
+}
+
+bool ShenandoahHeapRegion::is_humongous() const {
+  return _humongous_start || _humongous_continuation;
+}
+
+bool ShenandoahHeapRegion::is_humongous_start() const {
+  return _humongous_start;
+}
+
+bool ShenandoahHeapRegion::is_humongous_continuation() const {
+  return _humongous_continuation;
+}
+
+void ShenandoahHeapRegion::do_reset() {
+  Space::initialize(reserved, true, false);
+  clearLiveData();
+  _humongous_start = false;
+  _humongous_continuation = false;
+}
+
+void ShenandoahHeapRegion::recycle() {
+  do_reset();
+  set_is_in_collection_set(false);
+}
+
+void ShenandoahHeapRegion::reset() {
+  assert(_mem_protection_level == 1, "needs to be unprotected here");
+  do_reset();
+  _is_in_collection_set = false;
+}
+
+HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
+  assert(MemRegion(bottom(), end()).contains(p),
+         err_msg("p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
+                 p2i(p), p2i(bottom()), p2i(end())));
+  if (p >= top()) {
+    return top();
+  } else {
+    HeapWord* last = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+    HeapWord* cur = last;
+    while (cur <= p) {
+      last = cur;
+      cur += oop(cur)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+    }
+    assert(oop(last)->is_oop(),
+           err_msg(PTR_FORMAT" should be an object start", p2i(last)));
+    return last;
+  }
+}
+
+void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
+  uintx region_size = ShenandoahHeapRegionSize;
+  if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
+    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
+    region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
+                       (uintx) HeapRegionBounds::min_size());
+  }
+
+  int region_size_log = log2_long((jlong) region_size);
+  // Recalculate the region size to make sure it's a power of
+  // 2. This means that region_size is the largest power of 2 that's
+  // <= what we've calculated so far.
+  region_size = ((uintx)1 << region_size_log);
+
+  // Now make sure that we don't go over or under our limits.
+  if (region_size < HeapRegionBounds::min_size()) {
+    region_size = HeapRegionBounds::min_size();
+  } else if (region_size > HeapRegionBounds::max_size()) {
+    region_size = HeapRegionBounds::max_size();
+  }
+
+  // And recalculate the log.
+  region_size_log = log2_long((jlong) region_size);
+
+  // Now, set up the globals.
+  guarantee(RegionSizeShift == 0, "we should only set it once");
+  RegionSizeShift = region_size_log;
+
+  guarantee(RegionSizeBytes == 0, "we should only set it once");
+  RegionSizeBytes = (size_t)region_size;
+
+  if (ShenandoahLogConfig) {
+    tty->print_cr("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
+    tty->print_cr("Region size shift: "SIZE_FORMAT, RegionSizeShift);
+    tty->print_cr("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
+    tty->print_cr("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
+  }
+}
+
+CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
+  return ShenandoahHeap::heap()->next_compaction_region(this);
+}
+
+void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
+  scan_and_forward(this, cp);
+}
+
+void ShenandoahHeapRegion::adjust_pointers() {
+  // Check first is there is any work to do.
+  if (used() == 0) {
+    return;   // Nothing to do.
+  }
+
+  scan_and_adjust_pointers(this);
+}
+
+void ShenandoahHeapRegion::compact() {
+  scan_and_compact(this);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,142 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
+
+#include "gc/shared/space.hpp"
+#include "memory/universe.hpp"
+#include "utilities/sizes.hpp"
+
+class ShenandoahHeapRegion : public ContiguousSpace {
+
+  // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
+
+private:
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    oop obj = oop(addr+1);
+    size_t size = obj->size() + 1;
+    return size;
+  }
+
+    // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  inline size_t adjust_obj_size(size_t size) const {
+    return size + 1;
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return oop(addr+1)->size() + 1;
+  }
+
+  inline oop make_oop(HeapWord* addr) const {
+    return oop(addr+1);
+  }
+public:
+  static size_t RegionSizeBytes;
+  static size_t RegionSizeShift;
+
+private:
+  int _region_number;
+  volatile size_t liveData;
+  MemRegion reserved;
+  bool _is_in_collection_set;
+
+  bool _humongous_start;
+  bool _humongous_continuation;
+
+#ifdef ASSERT
+  int _mem_protection_level;
+#endif
+
+public:
+  static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
+
+  jint initialize_heap_region(HeapWord* start, size_t regionSize, int index);
+
+
+  int region_number();
+
+  // Roll back the previous allocation of an object with specified size.
+  // Returns TRUE when successful, FALSE if not successful or not supported.
+  bool rollback_allocation(uint size);
+
+  void clearLiveData();
+  void setLiveData(size_t s);
+  void increase_live_data(size_t s);
+
+  size_t getLiveData() const;
+
+  void print_on(outputStream* st) const;
+
+  size_t garbage() const;
+
+  void recycle();
+  void reset();
+
+  void oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects);
+
+  void object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel);
+
+  HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
+
+  HeapWord* block_start_const(const void* p) const;
+
+  // Just before GC we need to fill the current region.
+  void fill_region();
+
+  bool is_in_collection_set() const;
+
+  void set_is_in_collection_set(bool b);
+
+  void set_humongous_start(bool start);
+  void set_humongous_continuation(bool continuation);
+
+  bool is_humongous() const;
+  bool is_humongous_start() const;
+  bool is_humongous_continuation() const;
+
+#ifdef ASSERT
+  void memProtectionOn();
+  void memProtectionOff();
+#endif
+
+  static ByteSize is_in_collection_set_offset();
+  // The following are for humongous regions.  We need to save the 
+  markOop saved_mark_word;
+  void save_mark_word(oop obj) {saved_mark_word = obj->mark();}
+  markOop mark_word() {return saved_mark_word;}
+
+  virtual CompactibleSpace* next_compaction_space() const;
+
+  // Override for scan_and_forward support.
+  void prepare_for_compaction(CompactPoint* cp);
+  void adjust_pointers();
+  void compact();
+
+  virtual oop compact_oop(HeapWord* addr) const {
+    return oop(addr + 1);
+  }
+private:
+  void do_reset();
+
+};
+
+
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,343 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "memory/resourceArea.hpp"
+#include "utilities/quickSort.hpp"
+
+ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions) :
+  _max_regions(max_regions),
+  _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
+  _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
+  _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
+  _available(0), _used(0)
+{
+
+  _next = &_regions[0];
+  _current = NULL;
+  _next_free = &_regions[0];
+}
+
+ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions, ShenandoahHeapRegion** regions, size_t num_regions) :
+  _max_regions(num_regions),
+  _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
+  _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
+  _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2) {
+
+  // Make copy of the regions array so that we can sort without destroying the original.
+  memcpy(_regions, regions, sizeof(ShenandoahHeapRegion*) * num_regions);
+
+  _next = &_regions[0];
+  _current = NULL;
+  _next_free = &_regions[num_regions];
+}
+
+ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() {
+  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, _regions);
+}
+
+int compareHeapRegionsByGarbage(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
+  if (a == NULL) {
+    if (b == NULL) {
+      return 0;
+    } else {
+      return 1;
+    }
+  } else if (b == NULL) {
+    return -1;
+  }
+
+  size_t garbage_a = a->garbage();
+  size_t garbage_b = b->garbage();
+  
+  if (garbage_a > garbage_b) 
+    return -1;
+  else if (garbage_a < garbage_b)
+    return 1;
+  else return 0;
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegionSet::current() {
+  ShenandoahHeapRegion** current = _current;
+  if (current == NULL) {
+    return get_next();
+  } else {
+    return *(limit_region(current));
+  }
+}
+
+size_t ShenandoahHeapRegionSet::length() {
+  return _next_free - _regions;
+}
+
+size_t ShenandoahHeapRegionSet::available_regions() {
+  return (_regions + _max_regions) - _next_free;
+}
+
+void ShenandoahHeapRegionSet::append(ShenandoahHeapRegion* region) {
+  assert(_next_free < _regions + _max_regions, "need space for additional regions");
+  assert(SafepointSynchronize::is_at_safepoint() || ShenandoahHeap_lock->owned_by_self() || ! Universe::is_fully_initialized(), "only append regions to list while world is stopped");
+
+  // Grab next slot.
+  ShenandoahHeapRegion** next_free = _next_free;
+  _next_free++;
+
+  // Insert new region into slot.
+  *next_free = region;
+
+  _available += region->free();
+}
+
+void ShenandoahHeapRegionSet::clear() {
+  _current = NULL;
+  _next = _regions;
+  _next_free = _regions;
+  _available = 0;
+  _used = 0;
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegionSet::claim_next() {
+  ShenandoahHeapRegion** next = (ShenandoahHeapRegion**) Atomic::add_ptr(sizeof(ShenandoahHeapRegion**), &_next);
+  next--;
+  if (next < _next_free) {
+    return *next;
+  } else {
+    return NULL;
+  }
+}
+
+ShenandoahHeapRegion* ShenandoahHeapRegionSet::get_next() {
+
+  ShenandoahHeapRegion** next = _next;
+  if (next < _next_free) {
+    _current = next;
+    _next++;
+    return *next;
+  } else {
+    return NULL;
+  }
+}
+
+ShenandoahHeapRegion** ShenandoahHeapRegionSet::limit_region(ShenandoahHeapRegion** region) {
+  if (region >= _next_free) {
+    return NULL;
+  } else {
+    return region;
+  }
+}
+
+void ShenandoahHeapRegionSet::print() {
+  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
+    if (i == _current) {
+      tty->print_cr("C->");
+    }
+    if (i == _next) {
+      tty->print_cr("N->");
+    }
+    (*i)->print();
+  }
+}
+
+void ShenandoahHeapRegionSet::choose_collection_and_free_sets(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set) {
+  col_set->choose_collection_set(_regions, length());
+  free_set->choose_free_set(_regions, length());
+  //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
+
+}
+
+void ShenandoahHeapRegionSet::choose_collection_and_free_sets_min_garbage(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set, size_t min_garbage) {
+  col_set->choose_collection_set_min_garbage(_regions, length(), min_garbage);
+  free_set->choose_free_set(_regions, length());
+  //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
+}
+
+void ShenandoahHeapRegionSet::choose_collection_set(ShenandoahHeapRegion** regions, size_t length) {
+
+  clear();
+
+  assert(length <= _max_regions, "must not blow up array");
+
+  ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
+
+  memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
+
+  QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
+
+  ShenandoahHeapRegion** r = tmp;
+  ShenandoahHeapRegion** end = tmp + length;
+
+  // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
+
+  while (r < end) {
+    ShenandoahHeapRegion* region = *r;
+    if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
+      //      tty->print("choose region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
+      //		 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
+
+      assert(! region->is_humongous(), "no humongous regions in collection set");
+
+      if (region->getLiveData() == 0) {
+        // We can recycle it right away and put it in the free set.
+        ShenandoahHeap::heap()->decrease_used(region->used());
+        region->recycle();
+      } else {
+        append(region);
+        region->set_is_in_collection_set(true);
+      }
+      //    } else {
+      //      tty->print("rejected region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
+      //		 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
+    }
+    r++;
+  }
+
+  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
+
+}
+
+void ShenandoahHeapRegionSet::choose_collection_set_min_garbage(ShenandoahHeapRegion** regions, size_t length, size_t min_garbage) {
+
+  clear();
+
+  assert(length <= _max_regions, "must not blow up array");
+
+  ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
+
+  memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
+
+  QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
+
+  ShenandoahHeapRegion** r = tmp;
+  ShenandoahHeapRegion** end = tmp + length;
+
+  // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
+
+  size_t garbage = 0;
+  while (r < end && garbage < min_garbage) {
+    ShenandoahHeapRegion* region = *r;
+    if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
+      append(region);
+      garbage += region->garbage();
+      region->set_is_in_collection_set(true);
+    }
+    r++;
+  }
+
+  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
+
+  /*
+  tty->print_cr("choosen region with "SIZE_FORMAT" garbage given "SIZE_FORMAT" min_garbage", garbage, min_garbage);
+  */
+}
+
+
+void ShenandoahHeapRegionSet::choose_free_set(ShenandoahHeapRegion** regions, size_t length) {
+
+  clear();
+  ShenandoahHeapRegion** end = regions + length;
+
+  for (ShenandoahHeapRegion** r = regions; r < end; r++) {
+    ShenandoahHeapRegion* region = *r;
+    if ((! region->is_in_collection_set())
+        && (! region->is_humongous())) {
+      append(region);
+    }
+  }
+}  
+
+void ShenandoahHeapRegionSet::reclaim_humongous_regions() {
+
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  for (ShenandoahHeapRegion** r = _regions; r < _next_free; r++) {
+    // We can immediately reclaim humongous objects/regions that are no longer reachable.
+    ShenandoahHeapRegion* region = *r;
+    if (region->is_humongous_start()) {
+      oop humongous_obj = oop(region->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+      if (! heap->is_marked_current(humongous_obj)) {
+        reclaim_humongous_region_at(r);
+      }
+    }
+  }
+
+}
+
+void ShenandoahHeapRegionSet::reclaim_humongous_region_at(ShenandoahHeapRegion** r) {
+  assert((*r)->is_humongous_start(), "reclaim regions starting with the first one");
+
+  oop humongous_obj = oop((*r)->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+  size_t size = humongous_obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+  uint required_regions = (size * HeapWordSize) / ShenandoahHeapRegion::RegionSizeBytes  + 1;
+
+  if (ShenandoahTraceHumongous) {
+    tty->print_cr("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
+  }
+
+  assert((*r)->getLiveData() == 0, "liveness must be zero");
+
+  for (ShenandoahHeapRegion** i = r; i < r + required_regions; i++) {
+    ShenandoahHeapRegion* region = *i;
+
+    assert(i == r ? region->is_humongous_start() : region->is_humongous_continuation(),
+           "expect correct humongous start or continuation");
+
+    if (ShenandoahTraceHumongous) {
+      region->print();
+    }
+
+    region->reset();
+    ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
+  }
+}
+
+void ShenandoahHeapRegionSet::set_concurrent_iteration_safe_limits() {
+  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
+    ShenandoahHeapRegion* region = *i;
+    region->set_concurrent_iteration_safe_limit(region->top());
+  }
+}
+
+size_t ShenandoahHeapRegionSet::garbage() {
+  size_t garbage = 0;
+  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
+    ShenandoahHeapRegion* region = *i;
+    garbage += region->garbage();
+  }
+  return garbage;
+}
+
+size_t ShenandoahHeapRegionSet::used() {
+  size_t used = 0;
+  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
+    ShenandoahHeapRegion* region = *i;
+    used += region->used();
+  }
+  return used;
+}
+
+size_t ShenandoahHeapRegionSet::live_data() {
+  size_t live = 0;
+  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
+    ShenandoahHeapRegion* region = *i;
+    live += region->getLiveData();
+  }
+  return live;
+}
+
+void ShenandoahHeapRegionSet::decrease_available(size_t num_bytes) {
+  assert(_available >= num_bytes, "can't use more than available");
+  _available -= num_bytes;
+  _used += num_bytes;
+}
+
+size_t ShenandoahHeapRegionSet::available() const {
+  assert(ShenandoahHeap::heap()->capacity() - ShenandoahHeap::heap()->used()>= _available, "must not be > heap free");
+  return _available;
+}
+
+size_t ShenandoahHeapRegionSet::used() const {
+  assert(ShenandoahHeap::heap()->used() >= _used, "must not be > heap used");
+  return _used;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,102 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
+
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+
+class ShenandoahHeapRegionSet : public CHeapObj<mtGC> {
+private:
+  ShenandoahHeapRegion** _regions;
+  // current region to be returned from get_next()
+  ShenandoahHeapRegion** _current;
+  ShenandoahHeapRegion** _next;
+
+  // last inserted region.
+  ShenandoahHeapRegion** _next_free;
+  ShenandoahHeapRegion** _concurrent_next_free;
+
+  // Maximum size of the set.
+  const size_t _max_regions;
+
+  size_t _garbage_threshold;
+  size_t _free_threshold;
+
+  size_t _available;
+  size_t _used;
+
+  void choose_collection_set(ShenandoahHeapRegion** regions, size_t length);
+  void choose_collection_set_min_garbage(ShenandoahHeapRegion** regions, size_t length, size_t min_garbage);
+  void choose_free_set(ShenandoahHeapRegion** regions, size_t length);
+
+public:
+  ShenandoahHeapRegionSet(size_t max_regions);
+
+  ShenandoahHeapRegionSet(size_t max_regions, ShenandoahHeapRegion** regions, size_t num_regions);
+
+  ~ShenandoahHeapRegionSet();
+
+  void set_garbage_threshold(size_t minimum_garbage) { _garbage_threshold = minimum_garbage;}
+  void set_free_threshold(size_t minimum_free) { _free_threshold = minimum_free;}
+
+  /**
+   * Appends a region to the set. This is implemented to be concurrency-safe.
+   */
+  void append(ShenandoahHeapRegion* region);
+
+  void clear();
+
+  size_t length();
+  size_t used_regions() {
+    return _current - _regions;
+  }
+  size_t available_regions();
+  void print();
+
+  size_t garbage();
+  size_t used();
+  size_t live_data();
+  size_t reclaimed() {return _reclaimed;}
+
+  /**
+   * Returns a pointer to the current region.
+   */
+   ShenandoahHeapRegion* current();
+
+  /**
+   * Gets the next region for allocation (from free-list).
+   * If multiple threads are competing, one will succeed to
+   * increment to the next region, the others will fail and return
+   * the region that the succeeding thread got.
+   */
+  ShenandoahHeapRegion* get_next();
+
+  /**
+   * Claims next region for processing. This is implemented to be concurrency-safe.
+   */
+  ShenandoahHeapRegion* claim_next();
+
+  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set);
+  void choose_collection_and_free_sets_min_garbage(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set, size_t min_garbage);
+
+  // Check for unreachable humongous regions and reclaim them.
+  void reclaim_humongous_regions();
+
+  void set_concurrent_iteration_safe_limits();
+
+  void decrease_available(size_t num_bytes);
+
+  size_t available() const;
+  size_t used() const;
+
+private:
+  void reclaim_humongous_region_at(ShenandoahHeapRegion** r);
+
+  ShenandoahHeapRegion** limit_region(ShenandoahHeapRegion** region);
+  size_t _reclaimed;
+
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahHumongous.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,18 @@
+
+/*
+Copyright 2015 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHUMONGOUS_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHUMONGOUS_HPP
+
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+
+class ShenandoahHumongous : public AllStatic {
+
+public:
+  static uint required_regions(size_t bytes) {
+    return (bytes + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
+  }
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahJNICritical.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,102 @@
+/*
+Copyright 2015 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "gc/shenandoah/shenandoahJNICritical.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+#include "gc/shared/gcLocker.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
+
+class VM_ShenandoahJNICriticalOperation : public VM_Operation {
+private:
+  VM_Operation* _target;
+public:
+  VM_ShenandoahJNICriticalOperation(VM_Operation* target);
+  VMOp_Type type() const;
+  bool doit_prologue();
+  void doit_epilogue();
+  void doit();
+  const char* name() const;
+};
+
+ShenandoahJNICritical::ShenandoahJNICritical() : _op_waiting_for_jni_critical(NULL) {
+}
+
+/*
+ * This is called by the Java thread who leaves the last JNI critical block.
+ */
+void ShenandoahJNICritical::notify_jni_critical() {
+  assert(Thread::current()->is_Java_thread(), "call only from Java thread");
+  assert(_op_waiting_for_jni_critical != NULL, "must be waiting for jni critical notification");  
+
+  MonitorLockerEx ml(ShenandoahJNICritical_lock, true);
+
+  VMThread::execute(_op_waiting_for_jni_critical);
+  _op_waiting_for_jni_critical = NULL;
+
+  ml.notify_all();
+
+}
+
+/*
+ * This is called by the VM thread, if it determines that the task must wait
+ * for JNI critical regions to be left.
+ */
+void ShenandoahJNICritical::set_waiting_for_jni_before_gc(VM_Operation* op) {
+  assert(Thread::current()->is_VM_thread(), "call only from VM thread");
+  _op_waiting_for_jni_critical = op;
+}
+
+/**
+ * This is called by the Shenandoah concurrent thread in order
+ * to execute a VM_Operation on the VM thread, that needs to perform
+ * a JNI critical region check.
+ */
+void ShenandoahJNICritical::execute_in_vm_thread(VM_Operation* op) {
+  MonitorLockerEx ml(ShenandoahJNICritical_lock, true);
+  VM_ShenandoahJNICriticalOperation jni_op(op);
+  VMThread::execute(&jni_op);
+  while (_op_waiting_for_jni_critical != NULL) {
+    ml.wait(true);
+  }
+}
+
+
+VM_ShenandoahJNICriticalOperation::VM_ShenandoahJNICriticalOperation(VM_Operation* target)
+  : _target(target) {
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahJNICriticalOperation::type() const {
+  return _target->type();
+}
+
+const char* VM_ShenandoahJNICriticalOperation::name() const {
+  return _target->name();
+}
+
+bool VM_ShenandoahJNICriticalOperation::doit_prologue() {
+  return _target->doit_prologue();
+}
+
+void VM_ShenandoahJNICriticalOperation::doit_epilogue() {
+  _target->doit_epilogue();
+}
+
+void VM_ShenandoahJNICriticalOperation::doit() {
+  if (! GC_locker::check_active_before_gc()) {
+    _target->doit();
+  } else {
+
+    if (ShenandoahTraceJNICritical) {
+      gclog_or_tty->print_cr("Deferring JNI critical op because of active JNI critical regions");
+    }
+
+    // This makes the GC background thread wait, and kick off evacuation as
+    // soon as JNI notifies us that critical regions have all been left.
+    ShenandoahHeap *sh = ShenandoahHeap::heap();
+    sh->jni_critical()->set_waiting_for_jni_before_gc(this);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahJNICritical.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,22 @@
+/*
+Copyright 2015 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
+
+#include "gc/shared/vmGCOperations.hpp"
+#include "memory/allocation.hpp"
+
+class ShenandoahJNICritical : public CHeapObj<mtGC> {
+private:
+  VM_Operation* _op_waiting_for_jni_critical;
+
+public:
+  ShenandoahJNICritical();
+  void notify_jni_critical();
+  void set_waiting_for_jni_before_gc(VM_Operation* op);
+  void execute_in_vm_thread(VM_Operation* op);
+};
+
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,397 @@
+/*
+  Copyright 2014 Red Hat, Inc. and/or its affiliates.
+*/
+
+#include "code/codeCache.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "gc/shenandoah/brooksPointer.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/vm_operations_shenandoah.hpp"
+#include "gc/serial/markSweep.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/copy.hpp"
+#include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/workgroup.hpp"
+
+
+
+void ShenandoahMarkCompact::allocate_stacks() {
+  MarkSweep::_preserved_count_max = 0;
+  MarkSweep::_preserved_marks = NULL;
+  MarkSweep::_preserved_count = 0;
+}
+
+void ShenandoahMarkCompact::do_mark_compact() {
+  ShenandoahHeap* _heap = ShenandoahHeap::heap();
+
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+  IsGCActiveMark is_active;
+
+  // if concgc gets cancelled between phases the bitmap doesn't get cleared up.
+  _heap->reset_mark_bitmap();
+ 
+  assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
+  assert(_heap->is_bitmap_clear(), "require cleared bitmap");
+  assert(!_heap->concurrent_mark_in_progress(), "can't do full-GC while marking is in progress");
+  assert(!_heap->is_evacuation_in_progress(), "can't do full-GC while evacuation is in progress");
+  assert(!_heap->is_update_references_in_progress(), "can't do full-GC while updating of references is in progress");
+  BarrierSet* _old_barrier_set = oopDesc::bs();
+
+  oopDesc::set_bs(new ShenandoahMarkCompactBarrierSet());
+ 
+  _heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::full_gc);
+ 
+  // We need to clear the is_in_collection_set flag in all regions.
+  ShenandoahHeapRegion** regions = _heap->heap_regions();
+  size_t num_regions = _heap->num_regions();
+  for (size_t i = 0; i < num_regions; i++) {
+    regions[i]->set_is_in_collection_set(false);
+  }
+  _heap->clear_cset_fast_test();
+
+  if (ShenandoahVerify) {
+    // Full GC should only be called between regular concurrent cycles, therefore
+    // those verifications should be valid.
+    _heap->verify_heap_after_evacuation();
+    _heap->verify_heap_after_update_refs();
+  }
+ 
+  if (ShenandoahTraceFullGC) {
+    gclog_or_tty->print_cr("Shenandoah-full-gc: start with heap used: "SIZE_FORMAT" MB", _heap->used() / M);
+    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 1: marking the heap");
+    // _heap->print_heap_regions();
+  }
+ 
+  if (UseTLAB) {
+    _heap->accumulate_statistics_all_tlabs();
+    _heap->ensure_parsability(true);
+  }
+  
+  _heap->cleanup_after_cancelconcgc();
+  
+  ReferenceProcessor* rp = _heap->ref_processor();
+ 
+  // hook up weak ref data so it can be used during Mark-Sweep
+  assert(MarkSweep::ref_processor() == NULL, "no stomping");
+  assert(rp != NULL, "should be non-NULL");
+  assert(rp == ShenandoahHeap::heap()->ref_processor(), "Precondition"); 
+  bool clear_all_softrefs = true;  //fixme
+  MarkSweep::_ref_processor = rp;
+  rp->setup_policy(clear_all_softrefs);
+
+  CodeCache::gc_prologue();
+  allocate_stacks();
+
+  // We should save the marks of the currently locked biased monitors.
+  // The marking doesn't preserve the marks of biased objects.
+  BiasedLocking::preserve_marks();
+
+  phase1_mark_heap();
+ 
+  if (ShenandoahTraceFullGC) {
+    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 2: calculating target addresses");
+  }
+  phase2_calculate_target_addresses();
+ 
+  if (ShenandoahTraceFullGC) {
+    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 3: updating references");
+  }
+
+  // Don't add any more derived pointers during phase3
+  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
+
+  phase3_update_references();
+ 
+  if (ShenandoahTraceFullGC) {
+    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 4: compacting objects");
+  }
+
+  phase4_compact_objects();
+
+ 
+  MarkSweep::restore_marks();
+  BiasedLocking::restore_marks();
+  GenMarkSweep::deallocate_stacks();
+
+  CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
+
+  // refs processing: clean slate
+  MarkSweep::_ref_processor = NULL;
+
+ 
+  if (ShenandoahVerify) {
+    _heap->verify_heap_after_evacuation();
+    _heap->verify_heap_after_update_refs();
+  }
+
+  _heap->reset_mark_bitmap();
+
+  if (UseTLAB) {
+    _heap->resize_all_tlabs();
+  }
+
+  if (ShenandoahTraceFullGC) {
+    gclog_or_tty->print_cr("Shenandoah-full-gc: finish with heap used: "SIZE_FORMAT" MB", _heap->used() / M);
+  }
+
+  _heap->_bytesAllocSinceCM = 0;
+
+  oopDesc::set_bs(_old_barrier_set); 
+
+  _heap->set_need_update_refs(false);
+
+  _heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::full_gc);
+}
+ 
+void ShenandoahMarkCompact::phase1_mark_heap() {
+  ShenandoahHeap* _heap = ShenandoahHeap::heap();
+  ReferenceProcessor* rp = _heap->ref_processor();
+
+  MarkSweep::_ref_processor = rp;
+ 
+  // Need cleared claim bits for the roots processing
+  ClassLoaderDataGraph::clear_claimed_marks();
+ 
+  MarkingCodeBlobClosure follow_code_closure(&MarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
+  {
+    ShenandoahRootProcessor rp(_heap, 1);
+    rp.process_strong_roots(&MarkSweep::follow_root_closure,
+			    &MarkSweep::follow_cld_closure,
+			    &follow_code_closure);
+  }
+ 
+ 
+  _heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
+  bool clear_soft_refs = false; //fixme 
+  rp->setup_policy(clear_soft_refs);
+ 
+  const ReferenceProcessorStats& stats =
+    rp->process_discovered_references(&MarkSweep::is_alive,
+				      &MarkSweep::keep_alive,
+				      &MarkSweep::follow_stack_closure,
+				      NULL,
+				      _heap->collector_policy()->conc_timer(),
+				      _heap->tracer()->gc_id());
+ 
+  //     heap->tracer()->report_gc_reference_stats(stats);
+ 
+  _heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
+ 
+  // Unload classes and purge the SystemDictionary.
+  bool purged_class = SystemDictionary::do_unloading(&MarkSweep::is_alive);
+ 
+  // Unload nmethods.
+  CodeCache::do_unloading(&MarkSweep::is_alive, purged_class);
+ 
+  // Prune dead klasses from subklass/sibling/implementor lists.
+  Klass::clean_weak_klass_links(&MarkSweep::is_alive);
+ 
+  // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
+  _heap->unlink_string_and_symbol_table(&MarkSweep::is_alive);
+ 
+  if (VerifyDuringGC) {
+    HandleMark hm;  // handle scope
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
+    //    Universe::heap()->prepare_for_verify();
+    _heap->prepare_for_verify();
+    // Note: we can verify only the heap here. When an object is
+    // marked, the previous value of the mark word (including
+    // identity hash values, ages, etc) is preserved, and the mark
+    // word is set to markOop::marked_value - effectively removing
+    // any hash values from the mark word. These hash values are
+    // used when verifying the dictionaries and so removing them
+    // from the mark word can make verification of the dictionaries
+    // fail. At the end of the GC, the original mark word values
+    // (including hash values) are restored to the appropriate
+    // objects.
+    if (!VerifySilently) {
+      gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
+    }
+    //    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+    _heap->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+    if (!VerifySilently) {
+      gclog_or_tty->print_cr("]");
+    }
+  }
+}
+ 
+class ShenandoahPrepareForCompaction : public ShenandoahHeapRegionClosure {
+  CompactPoint _cp;
+  ShenandoahHeap* _heap;
+  bool _dead_humongous;
+
+public:
+  ShenandoahPrepareForCompaction() :
+    _heap(ShenandoahHeap::heap()),
+    _dead_humongous(false) {
+  }
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    // We need to save the contents
+    if (!r->is_humongous()) {
+      if (_cp.space == NULL) {
+	_cp.space = r;
+	_cp.threshold = _heap->start_of_heap();
+      }
+      _dead_humongous = false;
+      r->prepare_for_compaction(&_cp);
+    }  else {
+      if (r->is_humongous_start()) {
+        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+	if (obj->is_gc_marked()) {
+	  obj->forward_to(obj);
+	  _dead_humongous = false;
+	} else {
+	  if (_cp.space == NULL) {
+	    _cp.space = r;
+	    _cp.threshold = _heap->start_of_heap();
+	  }
+	  _dead_humongous = true;
+	  r->reset();
+	}
+      } else {
+	assert(r->is_humongous_continuation(), "expect humongous continuation");
+	if (_dead_humongous) {
+	  r->reset();
+	}
+      }
+    }
+    return false;
+  }
+};
+  
+void ShenandoahMarkCompact::phase2_calculate_target_addresses() {
+  ShenandoahPrepareForCompaction prepare;
+  ShenandoahHeap::heap()->heap_region_iterate(&prepare);
+}
+ 
+
+class ShenandoahMarkCompactAdjustPointersClosure : public ShenandoahHeapRegionClosure {
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if (r->is_humongous()) {
+      if (r->is_humongous_start()) {
+        // We must adjust the pointers on the single H object.
+        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+	assert(obj->is_gc_marked(), "should be marked");
+	// point all the oops to the new location
+	MarkSweep::adjust_pointers(obj);
+      }
+    } else {
+      r->adjust_pointers();
+    }
+    return false;
+  }
+};
+
+void ShenandoahMarkCompact::phase3_update_references() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+ 
+    // Need cleared claim bits for the roots processing
+  ClassLoaderDataGraph::clear_claimed_marks();
+
+  CodeBlobToOopClosure adjust_code_closure(&MarkSweep::adjust_pointer_closure,
+					   CodeBlobToOopClosure::FixRelocations);
+
+  {
+    ShenandoahRootProcessor rp(heap, 1);
+    rp.process_all_roots(&MarkSweep::adjust_pointer_closure,
+			 &MarkSweep::adjust_cld_closure,
+			 &adjust_code_closure);
+  }
+
+  assert(MarkSweep::ref_processor() == heap->ref_processor(), "Sanity");
+
+  // Now adjust pointers in remaining weak roots.  (All of which should
+  // have been cleared if they pointed to non-surviving objects.)
+  heap->weak_roots_iterate(&MarkSweep::adjust_pointer_closure);
+
+  //  if (G1StringDedup::is_enabled()) {
+  //    G1StringDedup::oops_do(&MarkSweep::adjust_pointer_closure);
+  //  }
+
+  MarkSweep::adjust_marks();
+
+  ShenandoahMarkCompactAdjustPointersClosure apc;
+  heap->heap_region_iterate(&apc);
+}
+
+class ShenandoahCleanupObjectClosure : public ObjectClosure {
+  void  do_object(oop p) {
+    ShenandoahHeap::heap()->initialize_brooks_ptr(p);
+  }
+};
+
+class CompactObjectsClosure : public ShenandoahHeapRegionClosure {
+
+public:
+
+  CompactObjectsClosure() {
+  }
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if (r->is_humongous()) {
+      if (r->is_humongous_start()) {
+        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+	assert(obj->is_gc_marked(), "expect marked humongous object");
+	obj->init_mark();
+      }
+    } else {
+      r->compact();
+    }
+
+    return false;
+  }
+
+};
+
+class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
+  size_t _live;
+  ShenandoahHeap* _heap;
+public:
+
+  ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) { 
+    _heap->clear_free_regions();
+  }
+
+  bool doHeapRegion(ShenandoahHeapRegion* r) {
+    if (r->is_humongous()) {
+      if (r->is_humongous_start()) {
+	oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
+	size_t size = obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
+      }
+      _live += ShenandoahHeapRegion::RegionSizeBytes;
+
+    } else {
+      size_t live = r->used();
+      if (live == 0) _heap->add_free_region(r);
+      r->setLiveData(live);
+      _live += live;
+    }
+
+    return false;
+  }
+  
+  size_t getLive() { return _live;}
+
+};
+
+void ShenandoahMarkCompact::phase4_compact_objects() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  CompactObjectsClosure coc;
+  heap->heap_region_iterate(&coc);
+  
+  ShenandoahCleanupObjectClosure cleanup;
+  heap->object_iterate(&cleanup);
+
+  ShenandoahPostCompactClosure post_compact;
+  heap->heap_region_iterate(&post_compact);
+
+  heap->set_used(post_compact.getLive());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahMarkCompact.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,45 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
+
+#include "gc/serial/genMarkSweep.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+class HeapWord;
+class ShenandoahMarkCompactBarrierSet;
+
+/**
+ * This implements full-GC (e.g. when invoking System.gc() ) using a
+ * mark-compact algorithm. It's implemented in four phases:
+ *
+ * 1. Mark all live objects of the heap by traversing objects starting at GC roots.
+ * 2. Calculate the new location of each live object. This is done by sequentially scanning
+ *    the heap, keeping track of a next-location-pointer, which is then written to each
+ *    object's brooks ptr field.
+ * 3. Update all references. This is implemented by another scan of the heap, and updates
+ *    all references in live objects by what's stored in the target object's brooks ptr.
+ * 3. Compact the heap by copying all live objects to their new location.
+ */
+
+class ShenandoahMarkCompact: AllStatic {
+
+public:
+
+  static void do_mark_compact();
+
+private:
+
+  static void phase1_mark_heap();
+  static void phase2_calculate_target_addresses();
+  static void phase3_update_references();
+  static void phase4_compact_objects();
+  static void finish_compaction(HeapWord* last_addr);
+
+  static void allocate_stacks();
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahRootProcessor.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,118 @@
+#include "precompiled.hpp"
+
+#include "classfile/stringTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/fprofiler.hpp"
+#include "runtime/mutex.hpp"
+#include "services/management.hpp"
+
+ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers) :
+  _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
+  _srs(n_workers)
+{
+}
+
+void ShenandoahRootProcessor::process_roots(OopClosure* strong_oops,
+					    OopClosure* weak_oops,
+					    CLDClosure* strong_clds,
+					    CLDClosure* weak_clds,
+					    CLDClosure* thread_stack_clds,
+					    CodeBlobClosure* strong_code) {
+  process_java_roots(strong_oops, thread_stack_clds, strong_clds, weak_clds, strong_code, 0);
+  process_vm_roots(strong_oops, weak_oops, 0);
+  
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_CodeCache_oops_do)) {
+    CodeCache::blobs_do(strong_code);
+  }
+
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) {
+    ShenandoahAlwaysTrueClosure always_true;
+    JNIHandles::weak_oops_do(&always_true, weak_oops);
+  }
+
+  _process_strong_tasks->all_tasks_completed(n_workers());
+}
+
+void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops,
+                                           CLDClosure* clds,
+                                           CodeBlobClosure* blobs) {
+
+  process_java_roots(oops, clds, clds, NULL, blobs, 0);
+  process_vm_roots(oops, NULL, 0);
+
+  _process_strong_tasks->all_tasks_completed(n_workers());
+}
+
+void ShenandoahRootProcessor::process_all_roots(OopClosure* oops,
+                                        CLDClosure* clds,
+                                        CodeBlobClosure* blobs) {
+
+  process_java_roots(oops, NULL, clds, clds, NULL, 0);
+  process_vm_roots(oops, oops, 0);
+
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_CodeCache_oops_do)) {
+    CodeCache::blobs_do(blobs);
+  }
+
+  _process_strong_tasks->all_tasks_completed(n_workers());
+}
+
+void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots,
+                                                 CLDClosure* thread_stack_clds,
+                                                 CLDClosure* strong_clds,
+                                                 CLDClosure* weak_clds,
+                                                 CodeBlobClosure* strong_code,
+                                                 uint worker_i)
+{
+  //assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
+  // Iterating over the CLDG and the Threads are done early to allow us to
+  // first process the strong CLDs and nmethods and then, after a barrier,
+  // let the thread process the weak CLDs and nmethods.
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_ClassLoaderDataGraph_oops_do)) {
+    ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
+  }
+
+  bool is_par = n_workers() > 1;
+  ResourceMark rm;
+  Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
+}
+
+void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots,
+                                               OopClosure* weak_roots,
+                                               uint worker_i)
+{
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Universe_oops_do)) {
+    Universe::oops_do(strong_roots);
+  }
+
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_oops_do)) {
+    JNIHandles::oops_do(strong_roots);
+  }
+  if (!_process_strong_tasks-> is_task_claimed(SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do)) {
+    ObjectSynchronizer::oops_do(strong_roots);
+  }
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_FlatProfiler_oops_do)) {
+    FlatProfiler::oops_do(strong_roots);
+  }
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Management_oops_do)) {
+    Management::oops_do(strong_roots);
+  }
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_jvmti_oops_do)) {
+    JvmtiExport::oops_do(strong_roots);
+  }
+  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) {
+    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
+  }
+  // All threads execute the following. A specific chunk of buckets
+  // from the StringTable are the individual tasks.
+  if (weak_roots != NULL) {
+    StringTable::possibly_parallel_oops_do(weak_roots);
+  }
+}
+
+uint ShenandoahRootProcessor::n_workers() const {
+  return _srs.n_threads();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahRootProcessor.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,74 @@
+
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
+
+#include "gc/shared/strongRootsScope.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+
+class CLDClosure;
+class CodeBlobClosure;
+class G1CollectedHeap;
+class G1GCPhaseTimes;
+class G1ParPushHeapRSClosure;
+class Monitor;
+class OopClosure;
+class SubTasksDone;
+
+class ShenandoahRootProcessor : public StackObj {
+  SubTasksDone* _process_strong_tasks;
+  StrongRootsScope _srs;
+
+  enum Shenandoah_process_roots_tasks {
+    SHENANDOAH_RP_PS_Universe_oops_do,
+    SHENANDOAH_RP_PS_JNIHandles_oops_do,
+    SHENANDOAH_RP_PS_JNIHandles_weak_oops_do,
+    SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do,
+    SHENANDOAH_RP_PS_FlatProfiler_oops_do,
+    SHENANDOAH_RP_PS_Management_oops_do,
+    SHENANDOAH_RP_PS_SystemDictionary_oops_do,
+    SHENANDOAH_RP_PS_ClassLoaderDataGraph_oops_do,
+    SHENANDOAH_RP_PS_jvmti_oops_do,
+    SHENANDOAH_RP_PS_CodeCache_oops_do,
+    SHENANDOAH_RP_PS_filter_satb_buffers,
+    SHENANDOAH_RP_PS_refProcessor_oops_do,
+    // Leave this one last.
+    SHENANDOAH_RP_PS_NumElements
+  };
+
+  void process_java_roots(OopClosure* scan_non_heap_roots,
+                          CLDClosure* thread_stack_clds,
+                          CLDClosure* scan_strong_clds,
+                          CLDClosure* scan_weak_clds,
+                          CodeBlobClosure* scan_strong_code,
+                          uint worker_i);
+
+  void process_vm_roots(OopClosure* scan_non_heap_roots,
+                        OopClosure* scan_non_heap_weak_roots,
+                        uint worker_i);
+
+public:
+  ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers);
+
+  void process_roots(OopClosure* strong_oops,
+		     OopClosure* weak_oops,
+		     CLDClosure* strong_clds,
+		     CLDClosure* weak_clds,
+		     CLDClosure* thread_stack_clds,
+		     CodeBlobClosure* strong_code);
+
+  // Apply oops, clds and blobs to all strongly reachable roots in the system
+  void process_strong_roots(OopClosure* oops,
+                            CLDClosure* clds,
+                            CodeBlobClosure* blobs);
+
+  // Apply oops, clds and blobs to strongly and weakly reachable roots in the system
+  void process_all_roots(OopClosure* oops,
+                         CLDClosure* clds,
+                         CodeBlobClosure* blobs);
+
+  // Number of worker threads used by the root processor.
+  uint n_workers() const;
+};
+
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahRuntime.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+
+#include "gc/shenandoah/shenandoahRuntime.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "oops/oop.inline.hpp"
+
+JRT_LEAF(bool, ShenandoahRuntime::compare_and_swap_object(HeapWord* addr, oopDesc* newval, oopDesc* old))
+  bool success;
+  oop expected;
+  do {
+    expected = old;
+    old = oopDesc::atomic_compare_exchange_oop(newval, addr, expected, true);
+    success  = (old == expected);
+  } while ((! success) && oopDesc::bs()->resolve_oop(old) == oopDesc::bs()->resolve_oop(expected));
+
+  return success;
+JRT_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/shenandoahRuntime.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,13 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
+#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
+
+#include "oops/oop.hpp"
+
+class ShenandoahRuntime : AllStatic {
+public:
+  static bool compare_and_swap_object(HeapWord* adr, oopDesc* newval, oopDesc* expected);
+};
+#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,244 @@
+/*
+  Copyright 2014 Red Hat, Inc. and/or its affiliates.
+*/
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/vm_operations_shenandoah.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+
+VM_Operation::VMOp_Type VM_ShenandoahInitMark::type() const {
+  return VMOp_ShenandoahInitMark;
+}
+
+const char* VM_ShenandoahInitMark::name() const {
+  return "Shenandoah Initial Marking";
+}
+
+void VM_ShenandoahInitMark::doit() {
+  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark);
+
+  if (sh->need_reset_bitmaps()) {
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
+    sh->reset_mark_bitmap();
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
+  }
+
+  assert(sh->is_bitmap_clear(), "need clear marking bitmap");
+
+  sh->set_need_reset_bitmaps(true);
+
+  if (ShenandoahGCVerbose)
+    tty->print("vm_ShenandoahInitMark\n");
+  sh->start_concurrent_marking();
+
+  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark);
+
+  if (! ShenandoahConcurrentMarking) {
+    sh->concurrentMark()->mark_from_roots();
+    VM_ShenandoahStartEvacuation finishMark;
+    finishMark.doit();
+  }
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahFullGC::type() const {
+  return VMOp_ShenandoahFullGC;
+}
+
+void VM_ShenandoahFullGC::doit() {
+
+  ShenandoahMarkCompact::do_mark_compact();
+
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::resize_tlabs);
+  sh->resize_all_tlabs();
+  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::resize_tlabs);
+}
+
+const char* VM_ShenandoahFullGC::name() const {
+  return "Shenandoah Full GC";
+}
+
+
+bool VM_ShenandoahReferenceOperation::doit_prologue() {
+  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+  sh->acquire_pending_refs_lock();
+  return true;
+}
+
+void VM_ShenandoahReferenceOperation::doit_epilogue() {
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  sh->release_pending_refs_lock();
+}
+
+void VM_ShenandoahStartEvacuation::doit() {
+
+  // We need to do the finish mark here, so that a JNI critical region
+  // can't divide it from evacuation start. It is critical that we
+  // evacuate roots right after finishing marking, so that we don't
+  // get unmarked objects in the roots.
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  if (!sh->cancelled_concgc()) {
+    if (ShenandoahGCVerbose)
+      tty->print("vm_ShenandoahFinalMark\n");
+
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark);
+    sh->concurrentMark()->finish_mark_from_roots();
+    sh->stop_concurrent_marking();
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark);
+
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::prepare_evac);
+    sh->prepare_for_concurrent_evacuation();
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::prepare_evac);
+
+    if (!sh->cancelled_concgc()){
+      sh->set_evacuation_in_progress(true);
+
+      // From here on, we need to update references.
+      sh->set_need_update_refs(true);
+
+      if (! ShenandoahConcurrentEvacuation) {
+	VM_ShenandoahEvacuation evacuation;
+	evacuation.doit();
+      } else {
+	if (!sh->cancelled_concgc()) {
+	  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_evac);
+	  sh->evacuate_and_update_roots();
+	  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_evac);
+	}
+      }
+    } else {
+      sh->free_regions()->set_concurrent_iteration_safe_limits();
+      //      sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::prepare_evac);
+      //      sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark);
+    }
+  } else {
+    sh->concurrentMark()->cancel();
+    sh->stop_concurrent_marking();
+  }    
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahStartEvacuation::type() const {
+  return VMOp_ShenandoahStartEvacuation;
+}
+
+const char* VM_ShenandoahStartEvacuation::name() const {
+  return "Start shenandoah evacuation";
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahVerifyHeapAfterEvacuation::type() const {
+  return VMOp_ShenandoahVerifyHeapAfterEvacuation;
+}
+
+const char* VM_ShenandoahVerifyHeapAfterEvacuation::name() const {
+  return "Shenandoah verify heap after evacuation";
+}
+
+void VM_ShenandoahVerifyHeapAfterEvacuation::doit() {
+
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  sh->verify_heap_after_evacuation();
+
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahEvacuation::type() const {
+  return VMOp_ShenandoahEvacuation;
+}
+
+const char* VM_ShenandoahEvacuation::name() const {
+  return "Shenandoah evacuation";
+}
+
+void VM_ShenandoahEvacuation::doit() {
+  if (ShenandoahGCVerbose)
+    tty->print("vm_ShenandoahEvacuation\n");
+
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  sh->do_evacuation();
+
+  if (! ShenandoahConcurrentUpdateRefs) {
+    assert(! ShenandoahConcurrentEvacuation, "turn off concurrent evacuation");
+    sh->prepare_for_update_references();
+    sh->update_references();
+  }
+}
+/*
+  VM_Operation::VMOp_Type VM_ShenandoahVerifyHeapAfterUpdateRefs::type() const {
+  return VMOp_ShenandoahVerifyHeapAfterUpdateRefs;
+  }
+
+  const char* VM_ShenandoahVerifyHeapAfterUpdateRefs::name() const {
+  return "Shenandoah verify heap after updating references";
+  }
+
+  void VM_ShenandoahVerifyHeapAfterUpdateRefs::doit() {
+
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  sh->verify_heap_after_update_refs();
+
+  }
+*/
+VM_Operation::VMOp_Type VM_ShenandoahUpdateRootRefs::type() const {
+  return VMOp_ShenandoahUpdateRootRefs;
+}
+
+const char* VM_ShenandoahUpdateRootRefs::name() const {
+  return "Shenandoah update root references";
+}
+
+void VM_ShenandoahUpdateRootRefs::doit() {
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  if (! sh->cancelled_concgc()) {
+
+    if (ShenandoahGCVerbose)
+      tty->print("vm_ShenandoahUpdateRootRefs\n");
+
+
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_uprefs);
+
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
+
+    sh->update_roots();
+
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
+
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_uprefs);
+  }
+
+  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::recycle_regions);
+  sh->recycle_dirty_regions();
+  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::recycle_regions);
+
+  if (ShenandoahVerify && ! sh->cancelled_concgc()) {
+    sh->verify_heap_after_update_refs();
+    sh->verify_regions_after_update_refs();
+  }
+#ifdef ASSERT
+  if (! ShenandoahVerify) {
+    assert(sh->is_bitmap_clear(), "need cleared bitmap here");
+  }
+#endif
+
+}
+
+VM_Operation::VMOp_Type VM_ShenandoahUpdateRefs::type() const {
+  return VMOp_ShenandoahUpdateRefs;
+}
+
+const char* VM_ShenandoahUpdateRefs::name() const {
+  return "Shenandoah update references";
+}
+
+void VM_ShenandoahUpdateRefs::doit() {
+  ShenandoahHeap *sh = ShenandoahHeap::heap();
+  if (!sh->cancelled_concgc()) {
+
+    if (ShenandoahGCVerbose)
+      tty->print("vm_ShenandoahUpdateRefs\n");
+    
+    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_evac);
+    sh->set_evacuation_in_progress(false);
+    sh->prepare_for_update_references();
+    assert(ShenandoahConcurrentUpdateRefs, "only do this when concurrent update references is turned on");
+    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_evac);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -0,0 +1,97 @@
+/*
+Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ */
+#ifndef SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
+#define SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
+
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shared/vmGCOperations.hpp"
+
+// VM_operations for the Shenandoah Collector.
+// For now we are just doing two pauses.  The initial marking pause, and the final finish up marking and perform evacuation pause.
+//    VM_ShenandoahInitMark
+//    VM_ShenandoahFinishMark
+
+class VM_ShenandoahInitMark: public VM_Operation {
+  
+public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+};
+
+class VM_ShenandoahReferenceOperation : public VM_Operation {
+  bool doit_prologue();
+  void doit_epilogue();
+
+};
+
+class VM_ShenandoahStartEvacuation: public VM_ShenandoahReferenceOperation {
+
+ public:
+  VMOp_Type type() const;
+  void doit();
+  const char* name() const;
+
+};
+
+class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation {
+ public:
+  VMOp_Type type() const;
+  void doit();
+  const char* name() const;
+};
+
+class VM_ShenandoahVerifyHeapAfterEvacuation: public VM_Operation {
+
+ public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+
+};
+
+class VM_ShenandoahEvacuation: public VM_Operation {
+
+ public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+
+};
+
+/*
+class VM_ShenandoahVerifyHeapAfterUpdateRefs: public VM_Operation {
+
+ public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+
+};
+*/
+class VM_ShenandoahUpdateRootRefs: public VM_Operation {
+
+ public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+
+};
+
+class VM_ShenandoahUpdateRefs: public VM_Operation {
+
+ public:
+  virtual VMOp_Type type() const;
+  virtual void doit();
+
+  virtual const char* name() const;
+
+};
+
+#endif //SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
--- a/src/share/vm/gc_implementation/shenandoah/brooksPointer.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "memory/universe.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-
-BrooksPointer::BrooksPointer(HeapWord** hw) : _heap_word(hw) {}
-
-BrooksPointer BrooksPointer::get(oop obj) {
-  HeapWord* hw_obj = (HeapWord*) obj;
-  HeapWord* brooks_ptr = hw_obj - 1;
-  // We know that the value in that memory location is a pointer to another
-  // heapword/oop.
-  return BrooksPointer((HeapWord**) brooks_ptr);
-}
-
-void BrooksPointer::set_forwardee(oop forwardee) {
-  assert(ShenandoahHeap::heap()->is_in(forwardee), "forwardee must be valid oop in the heap");
-  *_heap_word = (HeapWord*) forwardee;
-#ifdef ASSERT
-  if (ShenandoahTraceBrooksPointers) {
-    tty->print_cr("setting_forwardee to "PTR_FORMAT" = "PTR_FORMAT, p2i((HeapWord*) forwardee), p2i(*_heap_word));
-  }
-#endif
-}
-
-HeapWord* BrooksPointer::cas_forwardee(HeapWord* old, HeapWord* forwardee) {
-  assert(ShenandoahHeap::heap()->is_in(forwardee), "forwardee must point to a heap address");
-  
-
-
-  HeapWord* o = old;
-  HeapWord* n = forwardee;
-  HeapWord* result;
-
-#ifdef ASSERT
-  if (ShenandoahTraceBrooksPointers) {
-    tty->print_cr("Attempting to CAS "PTR_FORMAT" value "PTR_FORMAT" from "PTR_FORMAT" to "PTR_FORMAT, p2i(_heap_word), p2i(*_heap_word), p2i(o), p2i(n));
-  }
-#endif
-
-#ifdef ASSERT  
-  if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
-    ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-    ShenandoahHeapRegion* hr = sh->heap_region_containing(old);
-
-    {
-      hr->memProtectionOff();
-      result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
-      hr->memProtectionOn();
-    }
-  } else {
-    result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
-  }
-#else 
-  result =  (HeapWord*) (HeapWord*) Atomic::cmpxchg_ptr(n, _heap_word, o);
-#endif
-  
-#ifdef ASSERT
-  if (ShenandoahTraceBrooksPointers) {
-    tty->print_cr("Result of CAS from "PTR_FORMAT" to "PTR_FORMAT" was "PTR_FORMAT" read value was "PTR_FORMAT, p2i(o), p2i(n), p2i(result), p2i(*_heap_word));
-  }
-#endif
-
-  return result;
-}					 
-
-bool BrooksPointer::check_forwardee_is_in_heap(oop forwardee) {
-   return Universe::heap()->is_in(forwardee);
-}
--- a/src/share/vm/gc_implementation/shenandoah/brooksPointer.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,62 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_BROOKSPOINTER_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_BROOKSPOINTER_HPP
-
-#include "oops/oop.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-
-class BrooksPointer {
-
-public:
-  static const uint BROOKS_POINTER_OBJ_SIZE = 1;
-
-private:
-
-  HeapWord** _heap_word;
-
-  BrooksPointer(HeapWord** heap_word);
-
-public:
-
-  bool check_forwardee_is_in_heap(oop forwardee);
-  
-  inline oop get_forwardee_raw() {
-    return oop(*_heap_word);
-  }
-
-  inline oop get_forwardee() {
-    oop forwardee;
-
-#ifdef ASSERT
-    if (ShenandoahVerifyReadsToFromSpace) {
-      ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-      ShenandoahHeapRegion* hr = sh->heap_region_containing(_heap_word);
-
-      {
-        hr->memProtectionOff();
-        forwardee = (oop) (*_heap_word);
-        hr->memProtectionOn();
-      }
-    } else {
-      forwardee = get_forwardee_raw();
-    }
-#else
-    forwardee = get_forwardee_raw();
-#endif
-
-    assert(check_forwardee_is_in_heap(forwardee), "forwardee must be in heap");
-    assert(forwardee->is_oop(), "forwardee must be valid oop");
-    return forwardee;
-  }
-
-  void set_forwardee(oop forwardee);
-  HeapWord* cas_forwardee(HeapWord* old, HeapWord* forwardee);
-
-  static BrooksPointer get(oop obj);
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_BROOKSPOINTER_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,685 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "memory/universe.hpp"
-#include "utilities/array.hpp"
-
-#define __ masm->
-
-class UpdateRefsForOopClosure: public ExtendedOopClosure {
-
-private:
-  ShenandoahHeap* _heap;
-public:
-  UpdateRefsForOopClosure() {
-    _heap = ShenandoahHeap::heap();
-  }
-
-  void do_oop(oop* p)       {
-    _heap->maybe_update_oop_ref(p);
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-
-};
-
-ShenandoahBarrierSet::ShenandoahBarrierSet() :
-  BarrierSet(BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet))
-{
-}
-
-void ShenandoahBarrierSet::print_on(outputStream* st) const {
-  st->print("ShenandoahBarrierSet");
-}
-
-bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
-  return bsn == BarrierSet::ShenandoahBarrierSet;
-}
-
-bool ShenandoahBarrierSet::has_read_prim_array_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_read_prim_barrier() {
-  return false;
-}
-
-bool ShenandoahBarrierSet::has_read_ref_array_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_read_ref_barrier() {
-  return false;
-}
-
-bool ShenandoahBarrierSet::has_read_region_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_write_prim_array_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_write_prim_barrier() {
-  return false;
-}
-
-bool ShenandoahBarrierSet::has_write_ref_array_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_write_ref_barrier() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_write_ref_pre_barrier() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::has_write_region_opt() {
-  return true;
-}
-
-bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
-  return true;
-}
-
-void ShenandoahBarrierSet::read_prim_array(MemRegion mr) {
-  Unimplemented();
-}
-
-void ShenandoahBarrierSet::read_prim_field(HeapWord* hw, size_t s){
-  Unimplemented();
-}
-
-bool ShenandoahBarrierSet::read_prim_needs_barrier(HeapWord* hw, size_t s) {
-  return false;
-}
-
-void ShenandoahBarrierSet::read_ref_array(MemRegion mr) {
-  Unimplemented();
-}
-
-void ShenandoahBarrierSet::read_ref_field(void* v) {
-  //    tty->print_cr("read_ref_field: v = "PTR_FORMAT, v);
-  // return *v;
-}
-
-bool ShenandoahBarrierSet::read_ref_needs_barrier(void* v) {
-  Unimplemented();
-  return false;
-}
-
-void ShenandoahBarrierSet::read_region(MemRegion mr) {
-  Unimplemented();
-}
-
-void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
-  Unimplemented();
-}
-
-void ShenandoahBarrierSet::write_prim_array(MemRegion mr) {
-  Unimplemented();
-}
-
-void ShenandoahBarrierSet::write_prim_field(HeapWord* hw, size_t s , juint x, juint y) {
-  Unimplemented();
-}
-
-bool ShenandoahBarrierSet::write_prim_needs_barrier(HeapWord* hw, size_t s, juint x, juint y) {
-  Unimplemented();
-  return false;
-}
-
-bool ShenandoahBarrierSet::need_update_refs_barrier() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  return heap->is_update_references_in_progress() || (heap->concurrent_mark_in_progress() && heap->need_update_refs());
-}
-
-void ShenandoahBarrierSet::write_ref_array_work(MemRegion mr) {
-  if (! need_update_refs_barrier()) return;
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  for (HeapWord* word = mr.start(); word < mr.end(); word++) {
-    oop* oop_ptr = (oop*) word;
-    heap->maybe_update_oop_ref(oop_ptr);
-  }
-}
-
-template <class T>
-void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) {
-
-#ifdef ASSERT
-    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-    if (sh->is_in(dst) && 
-	sh->heap_region_containing((HeapWord*) dst)->is_in_collection_set() &&
-        ! sh->cancelled_concgc()) {
-      tty->print_cr("dst = "PTR_FORMAT, p2i(dst));
-      sh->heap_region_containing((HeapWord*) dst)->print();
-      assert(false, "We should have fixed this earlier");   
-    }   
-#endif
-
-  if (! JavaThread::satb_mark_queue_set().is_active()) return;
-  // tty->print_cr("write_ref_array_pre_work: "PTR_FORMAT", "INT32_FORMAT, dst, count);
-  T* elem_ptr = dst;
-  for (int i = 0; i < count; i++, elem_ptr++) {
-    T heap_oop = oopDesc::load_heap_oop(elem_ptr);
-    if (!oopDesc::is_null(heap_oop)) {
-      G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
-    }
-    // tty->print_cr("write_ref_array_pre_work: oop: "PTR_FORMAT, heap_oop);
-  }
-}
-
-void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
-  if (! dest_uninitialized) {
-    write_ref_array_pre_work(dst, count);
-  }
-}
-
-void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
-  if (! dest_uninitialized) {
-    write_ref_array_pre_work(dst, count);
-  }
-}
-
-template <class T>
-void ShenandoahBarrierSet::write_ref_field_pre_static(T* field, oop newVal) {
-  T heap_oop = oopDesc::load_heap_oop(field);
-
-#ifdef ASSERT
-    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-    if (sh->is_in(field) && 
-	sh->heap_region_containing((HeapWord*)field)->is_in_collection_set() &&
-        ! sh->cancelled_concgc()) {
-      tty->print_cr("field = "PTR_FORMAT, p2i(field));
-      sh->heap_region_containing((HeapWord*)field)->print();
-      assert(false, "We should have fixed this earlier");   
-    }   
-#endif
-
-  if (!oopDesc::is_null(heap_oop)) {
-    G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop(heap_oop));
-    // tty->print_cr("write_ref_field_pre_static: v = "PTR_FORMAT" o = "PTR_FORMAT" old: "PTR_FORMAT, field, newVal, heap_oop);
-  }
-}
-
-template <class T>
-inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop newVal) {
-  write_ref_field_pre_static(field, newVal);
-}
-
-// These are the more general virtual versions.
-void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
-  write_ref_field_pre_static(field, new_val);
-}
-
-void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
-  write_ref_field_pre_static(field, new_val);
-}
-
-void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
-  guarantee(false, "Not needed");
-}
-
-void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
-  if (! need_update_refs_barrier()) return;
-  assert (! UseCompressedOops, "compressed oops not supported yet");
-  ShenandoahHeap::heap()->maybe_update_oop_ref((oop*) v);
-  // tty->print_cr("write_ref_field_work: v = "PTR_FORMAT" o = "PTR_FORMAT, v, o);
-}
-
-void ShenandoahBarrierSet::write_region_work(MemRegion mr) {
-
-  if (! need_update_refs_barrier()) return;
-
-  // This is called for cloning an object (see jvm.cpp) after the clone
-  // has been made. We are not interested in any 'previous value' because
-  // it would be NULL in any case. But we *are* interested in any oop*
-  // that potentially need to be updated.
-
-  // tty->print_cr("write_region_work: "PTR_FORMAT", "PTR_FORMAT, mr.start(), mr.end());
-  oop obj = oop(mr.start());
-  assert(obj->is_oop(), "must be an oop");
-  UpdateRefsForOopClosure cl;
-  obj->oop_iterate(&cl);
-}
-
-oop ShenandoahBarrierSet::resolve_oop(oop src) {
-  return ShenandoahBarrierSet::resolve_oop_static(src);
-}
-
-oop ShenandoahBarrierSet::maybe_resolve_oop(oop src) {
-  if (Universe::heap()->is_in(src)) {
-    return resolve_oop_static(src);
-  } else {
-    return src;
-  }
-}
-
-oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop_work(oop src) {
-  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-  assert(src != NULL, "only evacuated non NULL oops");
-
-  if (sh->in_cset_fast_test((HeapWord*) src)) {
-    return resolve_and_maybe_copy_oop_work2(src);
-  } else {
-    return src;
-  }
-}
-
-oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop_work2(oop src) {
-  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-  if (! sh->is_evacuation_in_progress()) {
-    // We may get here through a barrier that just took a safepoint that
-    // turned off evacuation. In this case, return right away.
-    return ShenandoahBarrierSet::resolve_oop_static(src);
-  }
-  assert(src != NULL, "only evacuated non NULL oops");
-  assert(sh->heap_region_containing(src)->is_in_collection_set(), "only evacuate objects in collection set");
-  assert(! sh->heap_region_containing(src)->is_humongous(), "never evacuate humongous objects");
-  // TODO: Consider passing thread from caller.
-  oop dst = sh->evacuate_object(src, Thread::current());
-#ifdef ASSERT
-    if (ShenandoahTraceEvacuations) {
-      tty->print_cr("src = "PTR_FORMAT" dst = "PTR_FORMAT" src = "PTR_FORMAT" src-2 = "PTR_FORMAT,
-                 p2i((HeapWord*) src), p2i((HeapWord*) dst), p2i((HeapWord*) src), p2i(((HeapWord*) src) - 2));
-    }
-#endif
-  assert(sh->is_in(dst), "result should be in the heap");
-  return dst;
-}
-
-oop ShenandoahBarrierSet::resolve_and_maybe_copy_oopHelper(oop src) {
-    if (src != NULL) {
-      ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-      oop tmp = resolve_oop_static(src);
-      if (! sh->is_evacuation_in_progress()) {
-        return tmp;
-      }
-      return resolve_and_maybe_copy_oop_work(src);
-    } else {
-      return NULL;
-    }
-}
-
-JRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c2(oopDesc* src))
-  oop result = ((ShenandoahBarrierSet*) oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
-  // tty->print_cr("called C2 write barrier with: %p result: %p copy: %d", (oopDesc*) src, (oopDesc*) result, src != result);
-  return (oopDesc*) result;
-JRT_END
-
-IRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_interp(oopDesc* src))
-  oop result = ((ShenandoahBarrierSet*)oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
-  // tty->print_cr("called interpreter write barrier with: %p result: %p", src, result);
-  return (oopDesc*) result;
-IRT_END
-
-JRT_LEAF(oopDesc*, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_c1(JavaThread* thread, oopDesc* src))
-  oop result = ((ShenandoahBarrierSet*)oopDesc::bs())->resolve_and_maybe_copy_oop_work2(oop(src));
-  // tty->print_cr("called static write barrier (2) with: "PTR_FORMAT" result: "PTR_FORMAT, p2i(src), p2i((oopDesc*)(result)));
-  return (oopDesc*) result;
-JRT_END
-
-oop ShenandoahBarrierSet::resolve_and_maybe_copy_oop(oop src) {
-    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
-    oop result;
-    if (src != NULL && sh->is_in(src)) {
-      result = resolve_and_maybe_copy_oopHelper(src);
-      assert(sh->is_in(result), "result should be in the heap");
-    } else {
-      result = src;
-    }
-    assert(result == NULL || (sh->is_in(result) && result->is_oop()), "resolved oop must be NULL, or a valid oop in the heap");
-    return result;
-  }
-
-#ifndef CC_INTERP
-void ShenandoahBarrierSet::compile_resolve_oop_runtime(MacroAssembler* masm, Register dst) {
-
-  __ push(rscratch1);
-
-  if (dst != rax) {
-    __ push(rax);
-  }
-  if (dst != rbx) {
-    __ push(rbx);
-  }
-  if (dst != rcx) {
-    __ push(rcx);
-  }
-  if (dst != rdx) {
-    __ push(rdx);
-  }
-  if (dst != rdi) {
-    __ push(rdi);
-  }
-  if (dst != rsi) {
-    __ push(rsi);
-  }
-  if (dst != rbp) {
-    __ push(rbp);
-  }
-  if (dst != r8) {
-    __ push(r8);
-  }
-  if (dst != r9) {
-    __ push(r9);
-  }
-  if (dst != r11) {
-    __ push(r11);
-  }
-  if (dst != r12) {
-    __ push(r12);
-  }
-  if (dst != r13) {
-    __ push(r13);
-  }
-  if (dst != r14) {
-    __ push(r14);
-  }
-  if (dst != r15) {
-    __ push(r15);
-  }
-
-  __ subptr(rsp, 128);
-  __ movdbl(Address(rsp, 0), xmm0);
-  __ movdbl(Address(rsp, 8), xmm1);
-  __ movdbl(Address(rsp, 16), xmm2);
-  __ movdbl(Address(rsp, 24), xmm3);
-  __ movdbl(Address(rsp, 32), xmm4);
-  __ movdbl(Address(rsp, 40), xmm5);
-  __ movdbl(Address(rsp, 48), xmm6);
-  __ movdbl(Address(rsp, 56), xmm7);
-  __ movdbl(Address(rsp, 64), xmm8);
-  __ movdbl(Address(rsp, 72), xmm9);
-  __ movdbl(Address(rsp, 80), xmm10);
-  __ movdbl(Address(rsp, 88), xmm11);
-  __ movdbl(Address(rsp, 96), xmm12);
-  __ movdbl(Address(rsp, 104), xmm13);
-  __ movdbl(Address(rsp, 112), xmm14);
-  __ movdbl(Address(rsp, 120), xmm15);
-
-  __ mov(c_rarg1, dst);
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::resolve_oop_static), c_rarg1);
-  __ mov(rscratch1, rax);
-
-  __ movdbl(xmm0, Address(rsp, 0));
-  __ movdbl(xmm1, Address(rsp, 8));
-  __ movdbl(xmm2, Address(rsp, 16));
-  __ movdbl(xmm3, Address(rsp, 24));
-  __ movdbl(xmm4, Address(rsp, 32));
-  __ movdbl(xmm5, Address(rsp, 40));
-  __ movdbl(xmm6, Address(rsp, 48));
-  __ movdbl(xmm7, Address(rsp, 56));
-  __ movdbl(xmm8, Address(rsp, 64));
-  __ movdbl(xmm9, Address(rsp, 72));
-  __ movdbl(xmm10, Address(rsp, 80));
-  __ movdbl(xmm11, Address(rsp, 88));
-  __ movdbl(xmm12, Address(rsp, 96));
-  __ movdbl(xmm13, Address(rsp, 104));
-  __ movdbl(xmm14, Address(rsp, 112));
-  __ movdbl(xmm15, Address(rsp, 120));
-  __ addptr(rsp, 128);
-
-  if (dst != r15) {
-    __ pop(r15);
-  }
-  if (dst != r14) {
-    __ pop(r14);
-  }
-  if (dst != r13) {
-    __ pop(r13);
-  }
-  if (dst != r12) {
-    __ pop(r12);
-  }
-  if (dst != r11) {
-    __ pop(r11);
-  }
-  if (dst != r9) {
-    __ pop(r9);
-  }
-  if (dst != r8) {
-    __ pop(r8);
-  }
-  if (dst != rbp) {
-    __ pop(rbp);
-  }
-  if (dst != rsi) {
-    __ pop(rsi);
-  }
-  if (dst != rdi) {
-    __ pop(rdi);
-  }
-  if (dst != rdx) {
-    __ pop(rdx);
-  }
-  if (dst != rcx) {
-    __ pop(rcx);
-  }
-  if (dst != rbx) {
-    __ pop(rbx);
-  }
-  if (dst != rax) {
-    __ pop(rax);
-  }
-
-  __ mov(dst, rscratch1);
-
-  __ pop(rscratch1);
-}
-
-// TODO: The following should really live in an X86 specific subclass.
-void ShenandoahBarrierSet::compile_resolve_oop(MacroAssembler* masm, Register dst) {
-  if (ShenandoahReadBarrier) {
-
-    Label is_null;
-    __ testptr(dst, dst);
-    __ jcc(Assembler::zero, is_null);
-    compile_resolve_oop_not_null(masm, dst);
-    __ bind(is_null);
-  }
-}
-
-void ShenandoahBarrierSet::compile_resolve_oop_not_null(MacroAssembler* masm, Register dst) {
-  if (ShenandoahReadBarrier) {
-    if (ShenandoahVerifyReadsToFromSpace) {
-      compile_resolve_oop_runtime(masm, dst);
-      return;
-    }
-    __ movptr(dst, Address(dst, -8));
-  }
-}
-
-void ShenandoahBarrierSet::compile_resolve_oop_for_write(MacroAssembler* masm, Register dst, bool explicit_null_check, int stack_adjust, int num_state_save, ...) {
-
-  if (! ShenandoahWriteBarrier) {
-    assert(! ShenandoahConcurrentEvacuation, "Can only do this without concurrent evacuation");
-    return compile_resolve_oop(masm, dst);
-  }
-      
-  assert(dst != rscratch1, "different regs");
-  //assert(dst != rscratch2, "Need rscratch2");
-
-  Label done;
-
-  // Resolve oop first.
-  // TODO: Make this not-null-checking as soon as we have implicit null checks in c1!
-
-
-  if (explicit_null_check) {
-    __ testptr(dst, dst);
-    __ jcc(Assembler::zero, done);
-  }
-
-  Address evacuation_in_progress = Address(r15_thread, in_bytes(JavaThread::evacuation_in_progress_offset()));
-
-  __ cmpb(evacuation_in_progress, 0);
-
-  // Now check if evacuation is in progress.
-  compile_resolve_oop_not_null(masm, dst);
-
-  __ jcc(Assembler::equal, done);
-  __ push(rscratch1);
-  __ push(rscratch2);
-
-  __ movptr(rscratch1, dst);
-  __ shrptr(rscratch1, ShenandoahHeapRegion::RegionSizeShift);
-  __ movptr(rscratch2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
-  __ movbool(rscratch2, Address(rscratch2, rscratch1, Address::times_1));
-  __ testb(rscratch2, 0x1);
-
-  __ pop(rscratch2);
-  __ pop(rscratch1);
-
-  __ jcc(Assembler::zero, done);
-
-  intArray save_states = intArray(num_state_save);
-  va_list vl;
-  va_start(vl, num_state_save);
-  for (int i = 0; i < num_state_save; i++) {
-    save_states.at_put(i, va_arg(vl, int));
-  }
-  va_end(vl);
-
-  __ push(rscratch1);
-  for (int i = 0; i < num_state_save; i++) {
-    switch (save_states[i]) {
-    case noreg:
-      __ subptr(rsp, Interpreter::stackElementSize);
-      break;
-    case ss_rax:
-      __ push(rax);
-      break;
-    case ss_rbx:
-      __ push(rbx);
-      break;
-    case ss_rcx:
-      __ push(rcx);
-      break;
-    case ss_rdx:
-      __ push(rdx);
-      break;
-    case ss_rsi:
-      __ push(rsi);
-      break;
-    case ss_rdi:
-      __ push(rdi);
-      break;
-    case ss_r13:
-      __ push(r13);
-      break;
-    case ss_ftos:
-      __ subptr(rsp, wordSize);
-      __ movflt(Address(rsp, 0), xmm0);
-      break;
-    case ss_dtos:
-      __ subptr(rsp, 2 * wordSize);
-      __ movdbl(Address(rsp, 0), xmm0);
-      break;
-    case ss_c_rarg0:
-      __ push(c_rarg0);
-      break;
-    case ss_c_rarg1:
-      __ push(c_rarg1);
-      break;
-    case ss_c_rarg2:
-      __ push(c_rarg2);
-      break;
-    case ss_c_rarg3:
-      __ push(c_rarg3);
-      break;
-    case ss_c_rarg4:
-      __ push(c_rarg4);
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-  }
-
-  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::resolve_and_maybe_copy_oop_interp), dst);
-  __ mov(rscratch1, rax);
-
-  for (int i = num_state_save - 1; i >= 0; i--) {
-    switch (save_states[i]) {
-    case noreg:
-      __ addptr(rsp, Interpreter::stackElementSize);
-      break;
-    case ss_rax:
-      __ pop(rax);
-      break;
-    case ss_rbx:
-      __ pop(rbx);
-      break;
-    case ss_rcx:
-      __ pop(rcx);
-      break;
-    case ss_rdx:
-      __ pop(rdx);
-      break;
-    case ss_rsi:
-      __ pop(rsi);
-      break;
-    case ss_rdi:
-      __ pop(rdi);
-      break;
-    case ss_r13:
-      __ pop(r13);
-      break;
-    case ss_ftos:
-      __ movflt(xmm0, Address(rsp, 0));
-      __ addptr(rsp, wordSize);
-      break;
-    case ss_dtos:
-      __ movdbl(xmm0, Address(rsp, 0));
-      __ addptr(rsp, 2 * Interpreter::stackElementSize);
-      break;
-    case ss_c_rarg0:
-      __ pop(c_rarg0);
-      break;
-    case ss_c_rarg1:
-      __ pop(c_rarg1);
-      break;
-    case ss_c_rarg2:
-      __ pop(c_rarg2);
-      break;
-    case ss_c_rarg3:
-      __ pop(c_rarg3);
-      break;
-    case ss_c_rarg4:
-      __ pop(c_rarg4);
-      break;
-    default:
-      ShouldNotReachHere();
-    }
-  }
-
-  __ mov(dst, rscratch1);
-  __ pop(rscratch1);
-
-  __ bind(done);
-}
-
-/*
-void ShenandoahBarrierSet::compile_resolve_oop_for_write(MacroAssembler* masm, Register dst) {
-
-  Label is_null;
-  __ testptr(dst, dst);
-  __ jcc(Assembler::zero, is_null);
-  compile_resolve_oop_for_write_not_null(masm, dst);
-  __ bind(is_null);
-
-}
-*/
-#endif
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
-
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc/shared/barrierSet.hpp"
-
-class ShenandoahBarrierSet: public BarrierSet {
-private:
-
-  static inline oop get_shenandoah_forwardee_helper(oop p) {
-    assert(UseShenandoahGC, "must only be called when Shenandoah is used.");
-    assert(Universe::heap()->is_in(p), "We shouldn't be calling this on objects not in the heap");
-    oop forwardee;
-#ifdef ASSERT
-    if (ShenandoahVerifyReadsToFromSpace) {
-      ShenandoahHeap* heap = (ShenandoahHeap *) Universe::heap();
-      ShenandoahHeapRegion* region = heap->heap_region_containing(p);
-      {
-        region->memProtectionOff();
-        forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
-        region->memProtectionOn();
-      }
-    } else {
-      forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
-    }
-#else
-    forwardee = oop( *((HeapWord**) ((HeapWord*) p) - 1));
-#endif
-    return forwardee;
-  }
-
-public:
-
-  ShenandoahBarrierSet();
-
-  void print_on(outputStream* st) const;
-
-  bool is_a(BarrierSet::Name bsn);
-
-  bool has_read_prim_array_opt();
-  bool has_read_prim_barrier();
-  bool has_read_ref_array_opt();
-  bool has_read_ref_barrier();
-  bool has_read_region_opt();
-  bool has_write_prim_array_opt();
-  bool has_write_prim_barrier();
-  bool has_write_ref_array_opt();
-  bool has_write_ref_barrier();
-  bool has_write_ref_pre_barrier();
-  bool has_write_region_opt();
-  bool is_aligned(HeapWord* hw);
-  void read_prim_array(MemRegion mr);
-  void read_prim_field(HeapWord* hw, size_t s);
-  bool read_prim_needs_barrier(HeapWord* hw, size_t s);
-  void read_ref_array(MemRegion mr);
-
-  void read_ref_field(void* v);
-
-  bool read_ref_needs_barrier(void* v);
-  void read_region(MemRegion mr);
-  void resize_covered_region(MemRegion mr);
-  void write_prim_array(MemRegion mr);
-  void write_prim_field(HeapWord* hw, size_t s , juint x, juint y);
-  bool write_prim_needs_barrier(HeapWord* hw, size_t s, juint x, juint y);
-  void write_ref_array_work(MemRegion mr);
-
-  template <class T> void
-  write_ref_array_pre_work(T* dst, int count);
-
-  void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
-
-  void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
-
-
-  template <class T> static void write_ref_field_pre_static(T* field, oop newVal);
-
-  // We export this to make it available in cases where the static
-  // type of the barrier set is known.  Note that it is non-virtual.
-  template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
-
-  // These are the more general virtual versions.
-  void write_ref_field_pre_work(oop* field, oop new_val);
-  void write_ref_field_pre_work(narrowOop* field, oop new_val);
-  void write_ref_field_pre_work(void* field, oop new_val);
-
-  void write_ref_field_work(void* v, oop o, bool release = false);
-  void write_region_work(MemRegion mr);
-
-  virtual oop resolve_oop(oop src);
-
-  template <class T>
-  static inline oop resolve_and_update_oop_static(T p, oop obj) {
-    oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
-    if (forw != obj) {
-      obj = forw;
-      oopDesc::encode_store_heap_oop_not_null(p, obj);
-    }
-    return obj;
-  }
-
-  static inline oop resolve_oop_static_not_null(oop p) {
-    assert(p != NULL, "Must be NULL checked");
-
-    oop result = get_shenandoah_forwardee_helper(p);
-
-    if (result != NULL) {
-#ifdef ASSERT
-      if (result != p) {
-        oop second_forwarding = get_shenandoah_forwardee_helper(result);
-
-        // We should never be forwarded more than once.
-        if (result != second_forwarding) {
-          ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-          tty->print("first reference "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) p));
-          sh->heap_region_containing(p)->print();
-          tty->print("first_forwarding "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) result));
-          sh->heap_region_containing(result)->print();
-          tty->print("final reference "PTR_FORMAT" is in heap region:\n", p2i((HeapWord*) second_forwarding));
-          sh->heap_region_containing(second_forwarding)->print();
-          assert(get_shenandoah_forwardee_helper(result) == result, "Only one fowarding per customer");
-        }
-      }
-#endif
-      if (! ShenandoahVerifyReadsToFromSpace) {
-	// is_oop() would trigger a SEGFAULT when we're checking from-space-access.
-	assert(ShenandoahHeap::heap()->is_in(result) && result->is_oop(), "resolved oop must be a valid oop in the heap");
-      }
-    }
-    return result;
-  }
-
-  static inline oop resolve_oop_static(oop p) {
-    if (((HeapWord*) p) != NULL) {
-      return resolve_oop_static_not_null(p);
-    } else {
-      return p;
-    }
-  }
-
-  static inline oop resolve_oop_static_no_check(oop p) {
-    if (((HeapWord*) p) != NULL) {
-      return get_shenandoah_forwardee_helper(p);
-    } else {
-      return p;
-    }
-  }
-
-
-  virtual oop maybe_resolve_oop(oop src);
-  oop resolve_and_maybe_copy_oopHelper(oop src);
-  oop resolve_and_maybe_copy_oop_work(oop src);
-  oop resolve_and_maybe_copy_oop_work2(oop src);
-  virtual oop resolve_and_maybe_copy_oop(oop src);
-
-  static oopDesc* resolve_and_maybe_copy_oop_c2(oopDesc* src);
-  static oopDesc* resolve_and_maybe_copy_oop_interp(oopDesc* src);
-  static oopDesc* resolve_and_maybe_copy_oop_c1(JavaThread* thread, oopDesc* src);
-
-private:
-  bool need_update_refs_barrier();
-
-#ifndef CC_INTERP
-public:
-  // TODO: The following should really live in an X86 specific subclass.
-  virtual void compile_resolve_oop(MacroAssembler* masm, Register dst);
-  virtual void compile_resolve_oop_not_null(MacroAssembler* masm, Register dst);
-  void compile_resolve_oop_for_write(MacroAssembler* masm, Register dst, bool explicit_null_check, int stack_adjust, int num_save_state, ...);
-
-private:
-  void compile_resolve_oop_runtime(MacroAssembler* masm, Register dst);
-
-#endif
-};
-
-class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet {
-
-   oop resolve_oop(oop src) {
-     return src;
-   }
-   oop maybe_resolve_oop(oop src) {
-     return src;
-   }
-};
-
-#endif //SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHBARRIERSET_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,761 +0,0 @@
-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-
-class ShenandoahHeuristics : public CHeapObj<mtGC> {
-
-  NumberSeq _allocation_rate_bytes;
-  NumberSeq _reclamation_rate_bytes;
-
-  size_t _bytes_allocated_since_CM;
-  size_t _bytes_reclaimed_this_cycle;
-
-protected:
-  size_t _bytes_allocated_start_CM;
-  size_t _bytes_allocated_during_CM;
-
-public:
-
-  ShenandoahHeuristics();
-
-  void record_bytes_allocated(size_t bytes);
-  void record_bytes_reclaimed(size_t bytes);
-  void record_bytes_start_CM(size_t bytes);
-  void record_bytes_end_CM(size_t bytes);
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
-  virtual bool update_refs_early();
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set, 
-                                               ShenandoahHeapRegionSet* collection_set, 
-                                               ShenandoahHeapRegionSet* free_set) =0;
-  void print_tracing_info();
-};
-
-ShenandoahHeuristics::ShenandoahHeuristics() :
-  _bytes_allocated_since_CM(0),
-  _bytes_reclaimed_this_cycle(0),
-  _bytes_allocated_start_CM(0),
-  _bytes_allocated_during_CM(0)
-{
-  if (PrintGCDetails)
-    tty->print_cr("initializing heuristics");
-}
-
-void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
-  _timing_data[phase]._start = os::elapsedTime();
-
-  if (PrintGCTimeStamps) {
-    if (phase == init_mark)
-      _tracer->report_gc_start(GCCause::_shenandoah_init_mark, _conc_timer->gc_start());
-    else if (phase == full_gc) 
-      _tracer->report_gc_start(GCCause::_last_ditch_collection, _stw_timer->gc_start());
-
-    gclog_or_tty->gclog_stamp(_tracer->gc_id());
-    gclog_or_tty->print("[GC %s start", _phase_names[phase]);
-    ShenandoahHeap* heap = (ShenandoahHeap*) Universe::heap();
-
-    gclog_or_tty->print(" total = " SIZE_FORMAT " K, used = " SIZE_FORMAT " K free = " SIZE_FORMAT " K", heap->capacity()/ K, heap->used() /K, 
-			((heap->capacity() - heap->used())/K) );
-
-    if (heap->calculateUsed() != heap->used()) {
-      gclog_or_tty->print("calc used = " SIZE_FORMAT " K heap used = " SIZE_FORMAT " K",
-			    heap->calculateUsed() / K, heap->used() / K);
-    }
-    //    assert(heap->calculateUsed() == heap->used(), "Just checking");
-    gclog_or_tty->print_cr("]");
-  }
-}
-
-void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
-  double end = os::elapsedTime();
-  double elapsed = end - _timing_data[phase]._start;
-  _timing_data[phase]._ms.add(elapsed * 1000);
-
-  if (ShenandoahGCVerbose && PrintGCDetails) {
-    tty->print_cr("PolicyPrint: %s "SIZE_FORMAT" took %lf ms", _phase_names[phase],
-                  _timing_data[phase]._count++, elapsed * 1000);
-  }
-  if (PrintGCTimeStamps) {
-    ShenandoahHeap* heap = (ShenandoahHeap*) Universe::heap();
-    gclog_or_tty->gclog_stamp(_tracer->gc_id());
-
-    gclog_or_tty->print("[GC %s end, %lf secs", _phase_names[phase], elapsed );
-    gclog_or_tty->print(" total = " SIZE_FORMAT " K, used = " SIZE_FORMAT " K free = " SIZE_FORMAT " K", heap->capacity()/ K, heap->used() /K,
-			((heap->capacity() - heap->used())/K) );
-
-    if (heap->calculateUsed() != heap->used()) {
-      gclog_or_tty->print("calc used = " SIZE_FORMAT " K heap used = " SIZE_FORMAT " K",
-			    heap->calculateUsed() / K, heap->used() / K);
-    }
-    //    assert(heap->calculateUsed() == heap->used(), "Stashed heap used must be equal to calculated heap used");
-    gclog_or_tty->print_cr("]");
-
-    if (phase == recycle_regions) {
-      _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
-    } else if (phase == full_gc) {
-      _tracer->report_gc_end(_stw_timer->gc_end(), _stw_timer->time_partitions());
-    } else if (phase == conc_mark || phase == conc_evac || phase == conc_uprefs || phase == prepare_evac) {
-      if (_conc_gc_aborted) {
-        _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
-        clear_conc_gc_aborted();
-      }
-    } else if (phase == final_evac) {
-      ShenandoahHeap* heap = ShenandoahHeap::heap();
-      this->record_bytes_end_CM(heap->_bytesAllocSinceCM);
-    }
-  }
-}
-
-void ShenandoahCollectorPolicy::report_concgc_cancelled() {
-  if (PrintGCTimeStamps)  {
-    gclog_or_tty->print("Concurrent GC Cancelled\n");
-    set_conc_gc_aborted();
-    //    _tracer->report_gc_end(_conc_timer->gc_end(), _conc_timer->time_partitions());
-  }
-}
-
-bool ShenandoahHeuristics::update_refs_early() {
-  return ShenandoahUpdateRefsEarly;
-}
-
-void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
-  _bytes_allocated_since_CM = bytes;
-  _bytes_allocated_start_CM = bytes;
-  _allocation_rate_bytes.add(bytes);
-}
-
-void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
-  _bytes_reclaimed_this_cycle = bytes;
-  _reclamation_rate_bytes.add(bytes);
-}
-
-void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
-  _bytes_allocated_start_CM = bytes;
-}
-
-void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
-  _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
-                                                                   : bytes;
-}
-
-class AggressiveHeuristics : public ShenandoahHeuristics {
-public:
-  AggressiveHeuristics() : ShenandoahHeuristics(){
-  if (PrintGCDetails)
-    tty->print_cr("Initializing aggressive heuristics");
-  }
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-    return true;
-  }
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set) {
-    region_set->set_garbage_threshold(8);
-    region_set->choose_collection_and_free_sets(collection_set, free_set);
-  }
-};
-
-class HalfwayHeuristics : public ShenandoahHeuristics {
-public:
-  HalfwayHeuristics() : ShenandoahHeuristics() {
-  if (PrintGCDetails)
-    tty->print_cr("Initializing halfway heuristics");
-  }
-
-  bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    size_t threshold_bytes_allocated = heap->capacity() / 4;
-    if (used * 2 > capacity && heap->_bytesAllocSinceCM > threshold_bytes_allocated)
-      return true;
-    else
-      return false;
-  }
-  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                       ShenandoahHeapRegionSet* collection_set,
-                                       ShenandoahHeapRegionSet* free_set) {
-    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2);
-    region_set->choose_collection_and_free_sets(collection_set, free_set);
-  }
-};
-
-// GC as little as possible
-class LazyHeuristics : public ShenandoahHeuristics {
-public:
-  LazyHeuristics() : ShenandoahHeuristics() {
-    if (PrintGCDetails) {
-      tty->print_cr("Initializing lazy heuristics");
-    }
-  }
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-    size_t targetStartMarking = (capacity / 5) * 4;
-    if (used > targetStartMarking) {
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set) {
-    region_set->choose_collection_and_free_sets(collection_set, free_set);
-  }
-};
-
-// These are the heuristics in place when we made this class
-class StatusQuoHeuristics : public ShenandoahHeuristics {
-public:
-  StatusQuoHeuristics() : ShenandoahHeuristics() {
-    if (PrintGCDetails) {
-      tty->print_cr("Initializing status quo heuristics");
-    }
-  }
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-    size_t targetStartMarking = capacity / 16;
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    size_t threshold_bytes_allocated = heap->capacity() / 4;
-
-    if (used > targetStartMarking
-        && heap->_bytesAllocSinceCM > threshold_bytes_allocated) {
-      // Need to check that an appropriate number of regions have
-      // been allocated since last concurrent mark too.
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set) {
-    region_set->choose_collection_and_free_sets(collection_set, free_set);
-  }
-};
-
-static uintx clamp(uintx value, uintx min, uintx max) {
-  value = MAX2(value, min);
-  value = MIN2(value, max);
-  return value;
-}
-
-static double get_percent(uintx value) {
-  double _percent = static_cast<double>(clamp(value, 0, 100));
-  return _percent / 100.;
-}
-
-class DynamicHeuristics : public ShenandoahHeuristics {
-private:
-  double _free_threshold_factor;
-  double _garbage_threshold_factor;
-  double _allocation_threshold_factor;
-
-  uintx _free_threshold;
-  uintx _garbage_threshold;
-  uintx _allocation_threshold;
-
-public:
-  DynamicHeuristics() : ShenandoahHeuristics() {
-    if (PrintGCDetails) {
-      tty->print_cr("Initializing dynamic heuristics");
-    }
-
-    _free_threshold = 0;
-    _garbage_threshold = 0;
-    _allocation_threshold = 0;
-
-    _free_threshold_factor = 0.;
-    _garbage_threshold_factor = 0.;
-    _allocation_threshold_factor = 0.;
-  }
-
-  virtual ~DynamicHeuristics() {}
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-
-    bool shouldStartConcurrentMark = false;
-
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    size_t available = heap->free_regions()->available();
-    uintx factor = heap->need_update_refs() ? ShenandoahFreeThreshold : ShenandoahInitialFreeThreshold;
-    size_t targetStartMarking = (capacity * factor) / 100;
-
-    size_t threshold_bytes_allocated = heap->capacity() * _allocation_threshold_factor;
-    if (available < targetStartMarking &&
-        heap->_bytesAllocSinceCM > threshold_bytes_allocated)
-    {
-      // Need to check that an appropriate number of regions have
-      // been allocated since last concurrent mark too.
-      shouldStartConcurrentMark = true;
-    }
-
-    if (shouldStartConcurrentMark && ShenandoahTracePhases) {
-      tty->print_cr("Start GC at available: "SIZE_FORMAT", factor: "UINTX_FORMAT", update-refs: %s", available, factor, BOOL_TO_STR(heap->need_update_refs()));
-    }
-    return shouldStartConcurrentMark;
-  }
-
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set)
-  {
-    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes * _garbage_threshold_factor);
-    region_set->choose_collection_and_free_sets(collection_set, free_set);
-  }
-
-  void set_free_threshold(uintx free_threshold) {
-    this->_free_threshold_factor = get_percent(free_threshold);
-    this->_free_threshold = free_threshold;
-  }
-
-  void set_garbage_threshold(uintx garbage_threshold) {
-    this->_garbage_threshold_factor = get_percent(garbage_threshold);
-    this->_garbage_threshold = garbage_threshold;
-  }
-
-  void set_allocation_threshold(uintx allocationThreshold) {
-    this->_allocation_threshold_factor = get_percent(allocationThreshold);
-    this->_allocation_threshold = allocationThreshold;
-  }
-
-  uintx get_allocation_threshold() {
-    return this->_allocation_threshold;
-  }
-
-  uintx get_garbage_threshold() {
-    return this->_garbage_threshold;
-  }
-
-  uintx get_free_threshold() {
-    return this->_free_threshold;
-  }
-};
-
-
-class AdaptiveHeuristics : public ShenandoahHeuristics {
-private:
-  size_t _max_live_data;
-  double _used_threshold_factor;
-  double _garbage_threshold_factor;
-  double _allocation_threshold_factor;
-
-  uintx _used_threshold;
-  uintx _garbage_threshold;
-  uintx _allocation_threshold;
-
-public:
-  AdaptiveHeuristics() : ShenandoahHeuristics() {
-    if (PrintGCDetails) {
-      tty->print_cr("Initializing dynamic heuristics");
-    }
-
-    _max_live_data = 0;
-
-    _used_threshold = 0;
-    _garbage_threshold = 0;
-    _allocation_threshold = 0;
-
-    _used_threshold_factor = 0.;
-    _garbage_threshold_factor = 0.1;
-    _allocation_threshold_factor = 0.;
-  }
-
-  virtual ~AdaptiveHeuristics() {}
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
-
-    ShenandoahHeap* _heap = ShenandoahHeap::heap();
-    bool shouldStartConcurrentMark = false;
-
-    size_t max_live_data = _max_live_data;
-    if (max_live_data == 0) {
-      max_live_data = capacity * 0.2; // Very generous initial value.
-    } else {
-      max_live_data *= 1.3; // Add some wiggle room.
-    }
-    size_t max_cycle_allocated = _heap->_max_allocated_gc;
-    if (max_cycle_allocated == 0) {
-      max_cycle_allocated = capacity * 0.3; // Very generous.
-    } else {
-      max_cycle_allocated *= 1.3; // Add 20% wiggle room. Should be enough.
-    }
-    size_t threshold = _heap->capacity() - max_cycle_allocated - max_live_data;
-    if (used > threshold)
-    {
-      shouldStartConcurrentMark = true;
-    }
-
-    return shouldStartConcurrentMark;
-  }
-
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set)
-  {
-    size_t bytes_alloc = ShenandoahHeap::heap()->_bytesAllocSinceCM;
-    size_t min_garbage =  bytes_alloc/* * 1.1*/;
-    region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes * _garbage_threshold_factor);
-    region_set->choose_collection_and_free_sets_min_garbage(collection_set, free_set, min_garbage);
-    /*
-    tty->print_cr("garbage to be collected: "SIZE_FORMAT, collection_set->garbage());
-    tty->print_cr("objects to be evacuated: "SIZE_FORMAT, collection_set->live_data());
-    */
-    _max_live_data = MAX2(_max_live_data, collection_set->live_data());
-  }
-
-  void set_used_threshold(uintx used_threshold) {
-    this->_used_threshold_factor = get_percent(used_threshold);
-    this->_used_threshold = used_threshold;
-  }
-
-  void set_garbage_threshold(uintx garbage_threshold) {
-    this->_garbage_threshold_factor = get_percent(garbage_threshold);
-    this->_garbage_threshold = garbage_threshold;
-  }
-
-  void set_allocation_threshold(uintx allocationThreshold) {
-    this->_allocation_threshold_factor = get_percent(allocationThreshold);
-    this->_allocation_threshold = allocationThreshold;
-  }
-
-  uintx get_allocation_threshold() {
-    return this->_allocation_threshold;
-  }
-
-  uintx get_garbage_threshold() {
-    return this->_garbage_threshold;
-  }
-
-  uintx get_used_threshold() {
-    return this->_used_threshold;
-  }
-};
-
-class NewAdaptiveHeuristics : public ShenandoahHeuristics {
-private:
-  size_t _max_live_data;
-  double _target_heap_occupancy_factor;
-  double _allocation_threshold_factor;
-  size_t _last_bytesAllocSinceCM;
-
-  uintx _target_heap_occupancy;
-  uintx _allocation_threshold;
-
-public:
-  NewAdaptiveHeuristics() : ShenandoahHeuristics()
-  {
-    if (PrintGCDetails) {
-      tty->print_cr("Initializing newadaptive heuristics");
-    }
-    _max_live_data = 0;
-    _allocation_threshold = 0;
-    _target_heap_occupancy_factor = 0.;
-    _allocation_threshold_factor = 0.;
-    _last_bytesAllocSinceCM = 0;
-  }
-
-  virtual ~NewAdaptiveHeuristics() {}
-
-  virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const
-  {
-      if (this->_bytes_allocated_during_CM > 0) {
-          // Not the first concurrent mark.
-          // _bytes_allocated_during_CM
-          ShenandoahHeap *heap = ShenandoahHeap::heap();
-          size_t threshold_bytes_allocated = heap->capacity() / 4;
-          size_t targetStartMarking = (size_t) capacity * this->_target_heap_occupancy_factor;
-          return (used > targetStartMarking) && (this->_bytes_allocated_during_CM > threshold_bytes_allocated);
-      } else {
-          // First concurrent mark.
-          size_t targetStartMarking = capacity / 2;
-          ShenandoahHeap *heap = ShenandoahHeap::heap();
-          size_t threshold_bytes_allocated = heap->capacity() / 4;
-
-          // Need to check that an appropriate number of regions have
-          // been allocated since last concurrent mark too.
-          return (used > targetStartMarking) && (heap->_bytesAllocSinceCM > threshold_bytes_allocated);
-      }
-  }
-
-  virtual void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set,
-                                               ShenandoahHeapRegionSet* collection_set,
-                                               ShenandoahHeapRegionSet* free_set)
-  {
-    ShenandoahHeap *_heap = ShenandoahHeap::heap();
-    this->_last_bytesAllocSinceCM = ShenandoahHeap::heap()->_bytesAllocSinceCM;
-    if (this->_last_bytesAllocSinceCM > 0) {
-      size_t min_garbage = this->_last_bytesAllocSinceCM;
-      region_set->choose_collection_and_free_sets_min_garbage(collection_set, free_set, min_garbage);
-    } else {
-      region_set->set_garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2);
-      region_set->choose_collection_and_free_sets(collection_set, free_set);
-    }
-    this->_max_live_data = MAX2(this->_max_live_data, collection_set->live_data());
-  }
-
-  void set_target_heap_occupancy(uintx target_heap_occupancy) {
-    this->_target_heap_occupancy_factor = get_percent(target_heap_occupancy);
-    this->_target_heap_occupancy = target_heap_occupancy;
-  }
-
-  void set_allocation_threshold(uintx allocationThreshold) {
-    this->_allocation_threshold_factor = get_percent(allocationThreshold);
-    this->_allocation_threshold = allocationThreshold;
-  }
-
-  uintx get_allocation_threshold() {
-    return this->_allocation_threshold;
-  }
-
-  uintx get_target_heap_occupancy() {
-    return this->_target_heap_occupancy;
-  }
-};
-
-
-static DynamicHeuristics *configureDynamicHeuristics() {
-  DynamicHeuristics *heuristics = new DynamicHeuristics();
-
-  heuristics->set_garbage_threshold(ShenandoahGarbageThreshold);
-  heuristics->set_allocation_threshold(ShenandoahAllocationThreshold);
-  heuristics->set_free_threshold(ShenandoahFreeThreshold);
-  if (ShenandoahLogConfig) {
-    tty->print_cr("Shenandoah dynamic heuristics thresholds: allocation "SIZE_FORMAT", used "SIZE_FORMAT", garbage "SIZE_FORMAT,
-                  heuristics->get_allocation_threshold(),
-                  heuristics->get_free_threshold(),
-                  heuristics->get_garbage_threshold());
-  }
-  return heuristics;
-}
-
-
-static NewAdaptiveHeuristics* configureNewAdaptiveHeuristics() {
-  NewAdaptiveHeuristics* heuristics = new NewAdaptiveHeuristics();
-
-  heuristics->set_target_heap_occupancy(ShenandoahTargetHeapOccupancy);
-  if (ShenandoahLogConfig) {
-    tty->print_cr( "Shenandoah newadaptive heuristics target heap occupancy: "SIZE_FORMAT,
-                   heuristics->get_target_heap_occupancy() );
-  }
-  return heuristics;
-}
-
-
-ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() {
-
-  ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), initial_heap_byte_size());
-
-  initialize_all();
-
-  _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
-  _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
-  _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
-  _user_requested_gcs = 0;
-  _allocation_failure_gcs = 0;
-  _conc_gc_aborted = false;
-
-  _phase_names[init_mark] = "InitMark";
-  _phase_names[final_mark] = "FinalMark";
-  _phase_names[rescan_roots] = "RescanRoots";
-  _phase_names[drain_satb] = "DrainSATB";
-  _phase_names[drain_queues] = "DrainQueues";
-  _phase_names[weakrefs] = "WeakRefs";
-  _phase_names[prepare_evac] = "PrepareEvac";
-  _phase_names[init_evac] = "InitEvac";
-  _phase_names[final_evac] = "FinalEvacuation";
-  _phase_names[final_uprefs] = "FinalUpdateRefs";
-
-  _phase_names[update_roots] = "UpdateRoots";
-  _phase_names[recycle_regions] = "RecycleRegions";
-  _phase_names[reset_bitmaps] = "ResetBitmaps";
-  _phase_names[resize_tlabs] = "ResizeTLABs";
-
-  _phase_names[full_gc] = "FullGC";
-  _phase_names[conc_mark] = "ConcurrentMark";
-  _phase_names[conc_evac] = "ConcurrentEvacuation";
-  _phase_names[conc_uprefs] = "ConcurrentUpdateReferences";
-
-  if (ShenandoahGCHeuristics != NULL) {
-    if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: aggressive");
-      }
-      _heuristics = new AggressiveHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "statusquo") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: statusquo");
-      }
-      _heuristics = new StatusQuoHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "halfway") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: halfway");
-      }
-      _heuristics = new HalfwayHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "lazy") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: lazy");
-      }
-      _heuristics = new LazyHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: dynamic");
-      }
-      _heuristics = configureDynamicHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: adaptive");
-      }
-      _heuristics = new AdaptiveHeuristics();
-    } else if (strcmp(ShenandoahGCHeuristics, "newadaptive") == 0) {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: newadaptive");
-      }
-      _heuristics = configureNewAdaptiveHeuristics();
-    } else {
-      fatal("Unknown -XX:ShenandoahGCHeuristics option");
-    }
-  } else {
-      if (ShenandoahLogConfig) {
-        tty->print_cr("Shenandoah heuristics: statusquo (default)");
-      }
-    _heuristics = new StatusQuoHeuristics();
-  }
-
-}
-
-ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
-  return this;
-}
-
-ShenandoahCollectorPolicy::Name ShenandoahCollectorPolicy::kind() {
-  return CollectorPolicy::ShenandoahCollectorPolicyKind;
-}
-
-BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
-  return BarrierSet::ShenandoahBarrierSet;
-}
-
-HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
-                                                       bool is_tlab,
-                                                       bool* gc_overhead_limit_was_exceeded) {
-  guarantee(false, "Not using this policy feature yet.");
-  return NULL;
-}
-
-HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
-  guarantee(false, "Not using this policy feature yet.");
-  return NULL;
-}
-
-void ShenandoahCollectorPolicy::initialize_alignments() {
-  
-  // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
-  _space_alignment = ShenandoahHeapRegion::RegionSizeBytes;
-  _heap_alignment = ShenandoahHeapRegion::RegionSizeBytes;
-}
-
-void ShenandoahCollectorPolicy::post_heap_initialize() {
-  // Nothing to do here (yet).
-}
-
-void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
-  _heuristics->record_bytes_allocated(bytes);
-}
-
-void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
-  _heuristics->record_bytes_start_CM(bytes);
-}
-
-void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
-  _heuristics->record_bytes_end_CM(bytes);
-}
-
-void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
-  _heuristics->record_bytes_reclaimed(bytes);
-}
-
-void ShenandoahCollectorPolicy::record_user_requested_gc() {
-  _user_requested_gcs++;
-}
-
-void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
-  _allocation_failure_gcs++;
-}
-
-bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
-							     size_t capacity) {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  return _heuristics->should_start_concurrent_mark(used, capacity);
-}
-
-bool ShenandoahCollectorPolicy::update_refs_early() {
-  return _heuristics->update_refs_early();
-}
-
-void ShenandoahCollectorPolicy::choose_collection_and_free_sets(
-			     ShenandoahHeapRegionSet* region_set, 
-			     ShenandoahHeapRegionSet* collection_set,
-                             ShenandoahHeapRegionSet* free_set) {
-  _heuristics->choose_collection_and_free_sets(region_set, collection_set, free_set);
-}
-
-void ShenandoahCollectorPolicy::print_tracing_info() {
-  print_summary_sd("Initial Mark Pauses", 0, &(_timing_data[init_mark]._ms));
-  print_summary_sd("Final Mark Pauses", 0, &(_timing_data[final_mark]._ms));
-
-  print_summary_sd("Rescan Roots", 2, &(_timing_data[rescan_roots]._ms));
-  print_summary_sd("Drain SATB", 2, &(_timing_data[drain_satb]._ms));
-  print_summary_sd("Drain Queues", 2, &(_timing_data[drain_queues]._ms));
-  if (ShenandoahProcessReferences) {
-    print_summary_sd("Weak References", 2, &(_timing_data[weakrefs]._ms));
-  }
-  print_summary_sd("Prepare Evacuation", 2, &(_timing_data[prepare_evac]._ms));
-  print_summary_sd("Initial Evacuation", 2, &(_timing_data[init_evac]._ms));
-
-  print_summary_sd("Final Evacuation Pauses", 0, &(_timing_data[final_evac]._ms));
-  print_summary_sd("Final Update Refs Pauses", 0, &(_timing_data[final_uprefs]._ms));
-  print_summary_sd("Update roots", 2, &(_timing_data[update_roots]._ms));
-  print_summary_sd("Recycle regions", 2, &(_timing_data[recycle_regions]._ms));
-  print_summary_sd("Reset bitmaps", 2, &(_timing_data[reset_bitmaps]._ms));
-  print_summary_sd("Resize TLABs", 2, &(_timing_data[resize_tlabs]._ms));
-  gclog_or_tty->print_cr(" ");
-  print_summary_sd("Concurrent Marking Times", 0, &(_timing_data[conc_mark]._ms));
-  print_summary_sd("Concurrent Evacuation Times", 0, &(_timing_data[conc_evac]._ms));
-  print_summary_sd("Concurrent Update References Times", 0, &(_timing_data[conc_uprefs]._ms));
-  print_summary_sd("Full GC Times", 0, &(_timing_data[full_gc]._ms));
-
-  gclog_or_tty->print_cr("User requested GCs: "SIZE_FORMAT, _user_requested_gcs);
-  gclog_or_tty->print_cr("Allocation failure GCs: "SIZE_FORMAT, _allocation_failure_gcs);
-
-  gclog_or_tty->print_cr(" ");
-  double total_sum = _timing_data[init_mark]._ms.sum() +
-    _timing_data[final_mark]._ms.sum() +
-    _timing_data[final_evac]._ms.sum() +
-    _timing_data[final_uprefs]._ms.sum();
-  double total_avg = (_timing_data[init_mark]._ms.avg() +
-                      _timing_data[final_mark]._ms.avg() +
-                      _timing_data[final_evac]._ms.avg() +
-                      _timing_data[final_uprefs]._ms.avg()) / 4.0;
-  double total_max = MAX2(
-                          MAX2(
-                               MAX2(_timing_data[init_mark]._ms.maximum(),
-                                    _timing_data[final_mark]._ms.maximum()),
-                               _timing_data[final_evac]._ms.maximum()),
-                          _timing_data[final_uprefs]._ms.maximum());
-
-  gclog_or_tty->print_cr("%-27s = %8.2lf s, avg = %8.2lf ms, max = %8.2lf ms",
-                         "Total", total_sum / 1000.0, total_avg, total_max);
-
-}
-
-void ShenandoahCollectorPolicy::print_summary_sd(const char* str, uint indent, const NumberSeq* seq)  {
-  double sum = seq->sum();
-  for (uint i = 0; i < indent; i++) gclog_or_tty->print(" ");
-  gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
-                         str, sum / 1000.0, seq->avg());
-  for (uint i = 0; i < indent; i++) gclog_or_tty->print(" ");
-  gclog_or_tty->print_cr("%s = "INT32_FORMAT_W(5)", std dev = %8.2lf ms, max = %8.2lf ms)",
-                         "(num", seq->num(), seq->sd(), seq->maximum());
-}
-
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-/*
-  Copyright 2014 Red Hat, Inc. and/or its affiliates.
-*/
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
-
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "runtime/arguments.hpp"
-#include "utilities/numberSeq.hpp"
-
-
-class ShenandoahHeap;
-class ShenandoahHeuristics;
-
-class ShenandoahCollectorPolicy: public CollectorPolicy {
-
-public:
-  enum TimingPhase {
-    init_mark,
-    final_mark,
-    rescan_roots,
-    drain_satb,
-    drain_queues,
-    weakrefs,
-    prepare_evac,
-    init_evac,
-
-    final_evac,
-    final_uprefs,
-    update_roots,
-    recycle_regions,
-    reset_bitmaps,
-    resize_tlabs,
-    full_gc,
-    conc_mark,
-    conc_evac,
-    conc_uprefs,
-
-    _num_phases
-  };
-
-private:
-  struct TimingData {
-    NumberSeq _ms;
-    double _start;
-    size_t _count;
-  };
-
-private:
-  TimingData _timing_data[_num_phases];
-  const char* _phase_names[_num_phases];
-
-  size_t _user_requested_gcs;
-  size_t _allocation_failure_gcs;
-
-  ShenandoahHeap* _pgc;
-  ShenandoahHeuristics* _heuristics;
-  ShenandoahTracer* _tracer;
-  STWGCTimer* _stw_timer;
-  ConcurrentGCTimer* _conc_timer;
-  
-  bool _conc_gc_aborted;
-
-public:
-  ShenandoahCollectorPolicy();
-
-  virtual ShenandoahCollectorPolicy* as_pgc_policy();
-
-  virtual ShenandoahCollectorPolicy::Name kind();
-
-  BarrierSet::Name barrier_set_name();
-
-  HeapWord* mem_allocate_work(size_t size,
-			      bool is_tlab,
-			      bool* gc_overhead_limit_was_exceeded);
-
-  HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
-
-  void initialize_alignments();
-
-  void post_heap_initialize();
-
-  void record_phase_start(TimingPhase phase);
-  void record_phase_end(TimingPhase phase);
-  void report_concgc_cancelled();
-
-  void record_user_requested_gc();
-  void record_allocation_failure_gc();
-
-  void record_bytes_allocated(size_t bytes);
-  void record_bytes_reclaimed(size_t bytes);
-  void record_bytes_start_CM(size_t bytes);
-  void record_bytes_end_CM(size_t bytes);
-  bool should_start_concurrent_mark(size_t used, size_t capacity);
-  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* region_set, 
-                                       ShenandoahHeapRegionSet* collection_set,
-                                       ShenandoahHeapRegionSet* free_set);
-
-  bool update_refs_early();
-
-  void print_tracing_info();
-
-  GCTimer* conc_timer(){return _conc_timer;}
-  GCTimer* stw_timer() {return _stw_timer;}
-  ShenandoahTracer* tracer() {return _tracer;}
-
-  void set_conc_gc_aborted() { _conc_gc_aborted = true;}
-  void clear_conc_gc_aborted() {_conc_gc_aborted = false;}
-
-private:
-  void print_summary_sd(const char* str, uint indent, const NumberSeq* seq);
-};
-
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAH_COLLECTOR_POLICY_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,742 +0,0 @@
-/*
-  Copyright 2014 Red Hat, Inc. and/or its affiliates.
-*/
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "classfile/stringTable.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "code/codeCache.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-
-// Mark the object and add it to the queue to be scanned
-ShenandoahMarkObjsClosure::ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs) :
-  _heap((ShenandoahHeap*)(Universe::heap())),
-  _mark_refs(ShenandoahMarkRefsClosure(q, update_refs)),
-  _live_data(NEW_C_HEAP_ARRAY(size_t, _heap->max_regions(), mtGC))
-{
-  Copy::zero_to_bytes(_live_data, _heap->max_regions() * sizeof(size_t));
-}
-
-ShenandoahMarkObjsClosure::~ShenandoahMarkObjsClosure() {
-  // Merge liveness data back into actual regions.
-
-  // We need to lock the heap here, to avoid race with growing of heap.
-  MutexLockerEx ml(ShenandoahHeap_lock, true);
-  ShenandoahHeapRegion** regions = _heap->heap_regions();
-  for (uint i = 0; i < _heap->num_regions(); i++) {
-    regions[i]->increase_live_data(_live_data[i]);
-  }
-  FREE_C_HEAP_ARRAY(size_t, _live_data);
-}
-
-ShenandoahMarkRefsClosure::ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs) :
-  MetadataAwareOopClosure(((ShenandoahHeap *) Universe::heap())->ref_processor()),
-  _queue(q),
-  _heap((ShenandoahHeap*) Universe::heap()),
-  _scm(_heap->concurrentMark()),
-  _update_refs(update_refs)
-{
-}
-
-void ShenandoahMarkRefsClosure::do_oop(narrowOop* p) {
-  Unimplemented();
-}
-
-
-// Walks over all the objects in the generation updating any
-// references to from space.
-
-class CLDMarkAliveClosure : public CLDClosure {
-private:
-  CLDClosure* _cl;
-public:
-  CLDMarkAliveClosure(CLDClosure* cl) : _cl(cl) {
-  }
-  void do_cld(ClassLoaderData* cld) {
-    ShenandoahIsAliveClosure is_alive;
-    if (cld->is_alive(&is_alive)) {
-      _cl->do_cld(cld);
-    }
-  }
-};
-
-class ShenandoahMarkRootsTask : public AbstractGangTask {
-private:
-  ShenandoahRootProcessor* _rp;
-  bool _update_refs;
-public:
-  ShenandoahMarkRootsTask(ShenandoahRootProcessor* rp, bool update_refs) :
-    AbstractGangTask("Shenandoah update roots task"), _update_refs(update_refs),
-    _rp(rp) {
-  }
-
-  void work(uint worker_id) {
-    // tty->print_cr("start mark roots worker: "INT32_FORMAT, worker_id);
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    SCMObjToScanQueue* q = heap->concurrentMark()->get_queue(worker_id);
-    ShenandoahMarkRefsClosure cl(q, _update_refs);
-
-    CodeBlobToOopClosure blobsCl(&cl, true);
-    CLDToOopClosure cldCl(&cl);
-
-    ResourceMark m;
-    if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
-      _rp->process_strong_roots(&cl, &cldCl, &blobsCl);
-    } else {
-      _rp->process_all_roots(&cl, &cldCl, &blobsCl);
-    }
-    // tty->print_cr("finish mark roots worker: "INT32_FORMAT, worker_id);
-  }
-};
-
-class SCMConcurrentMarkingTask : public AbstractGangTask {
-private:
-  ShenandoahConcurrentMark* _cm;
-  ParallelTaskTerminator* _terminator;
-  int _seed;
-  bool _update_refs;
-
-public:
-  SCMConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
-    AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _seed(17) {
-  }
-
-      
-  void work(uint worker_id) {
-
-    SCMObjToScanQueue* q = _cm->get_queue(worker_id);
-    ShenandoahMarkObjsClosure cl(q, _update_refs);
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    while (true) {
-      if (heap->cancelled_concgc() ||
-	  (!_cm->try_queue(q, &cl) &&
-	   !_cm->try_draining_an_satb_buffer(worker_id) &&
-	   !_cm->try_to_steal(worker_id, &cl, &_seed))
-	  ) {
-	if (_terminator->offer_termination()) break;
-      }
-    }
-    if (ShenandoahTracePhases && heap->cancelled_concgc()) {
-      tty->print_cr("Cancelled concurrent marking");
-    }
-  }
-};
-
-void ShenandoahConcurrentMark::prepare_unmarked_root_objs() {
-
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  bool update_refs = heap->need_update_refs();
-
-  if (update_refs) {
-    COMPILER2_PRESENT(DerivedPointerTable::clear());
-  }
-
-  prepare_unmarked_root_objs_no_derived_ptrs(update_refs);
-
-  if (update_refs) {
-    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
-  }
-
-}
-
-void ShenandoahConcurrentMark::prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs) {
-  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
-
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  if (ShenandoahParallelRootScan) {
-
-    ClassLoaderDataGraph::clear_claimed_marks();
-    heap->conc_workers()->set_active_workers(_max_conc_worker_id);
-    ShenandoahRootProcessor root_proc(heap, _max_conc_worker_id);
-    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
-    ShenandoahMarkRootsTask mark_roots(&root_proc, update_refs);
-    heap->conc_workers()->run_task(&mark_roots);
-
-    // Mark through any class loaders that have been found alive.
-    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
-    CLDToOopClosure cldCl(&cl);
-    CLDMarkAliveClosure cld_keep_alive(&cldCl);
-    ClassLoaderDataGraph::roots_cld_do(NULL, &cld_keep_alive);
-
-  } else {
-    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
-    heap->roots_iterate(&cl);
-  }
-
-  if (!(ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark)) {
-    ShenandoahMarkRefsClosure cl(get_queue(0), update_refs);
-    heap->weak_roots_iterate(&cl);
-  }
-
-  // tty->print_cr("all root marker threads done");
-}
-
-
-void ShenandoahConcurrentMark::initialize() {
-  _max_conc_worker_id = MAX2((uint) ConcGCThreads, 1U);
-  _task_queues = new SCMObjToScanQueueSet((int) _max_conc_worker_id);
-
-  for (uint i = 0; i < _max_conc_worker_id; ++i) {
-    SCMObjToScanQueue* task_queue = new SCMObjToScanQueue();
-    task_queue->initialize();
-    _task_queues->register_queue(i, task_queue);
-  }
-  JavaThread::satb_mark_queue_set().set_buffer_size(1014 /* G1SATBBufferSize */);
-}
-
-void ShenandoahConcurrentMark::mark_from_roots() {
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("STOPPING THE WORLD: before marking");
-    tty->print_cr("Starting markFromRoots");
-  }
-
-  ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
-
-  bool update_refs = sh->need_update_refs();
-
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::conc_mark);
-  ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
-
-  if (ShenandoahProcessReferences) {
-    ReferenceProcessor* rp = sh->ref_processor();
-    // enable ("weak") refs discovery
-    rp->enable_discovery(true /*verify_no_refs*/);
-    rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
-  }
-  
-  SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
-  sh->conc_workers()->set_active_workers(_max_conc_worker_id);
-  sh->conc_workers()->run_task(&markingTask);
-
-  if (ShenandoahGCVerbose) {
-    tty->print("total workers = %u finished workers = %u\n", 
-	       sh->conc_workers()->started_workers(), 
-	       sh->conc_workers()->finished_workers());
-    TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
-    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
-  }
-
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("Finishing markFromRoots");
-    tty->print_cr("RESUMING THE WORLD: after marking");
-  }
-
-  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
-}
-
-class FinishDrainSATBBuffersTask : public AbstractGangTask {
-private:
-  ShenandoahConcurrentMark* _cm;
-  ParallelTaskTerminator* _terminator;
-public:
-  FinishDrainSATBBuffersTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
-    AbstractGangTask("Finish draining SATB buffers"), _cm(cm), _terminator(terminator) {
-  }
-
-  void work(uint worker_id) {
-    _cm->drain_satb_buffers(worker_id, true);
-  }
-};
-
-class ShenandoahUpdateAliveRefs : public OopClosure {
-private:
-  ShenandoahHeap* _heap;
-public:
-  ShenandoahUpdateAliveRefs() : _heap(ShenandoahHeap::heap()) {
-  }
-  virtual void do_oop(oop* p) {
-    _heap->maybe_update_oop_ref(p);
-  }
-
-  virtual void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-};
-
-void ShenandoahConcurrentMark::finish_mark_from_roots() {
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("Starting finishMarkFromRoots");
-  }
-
-  IsGCActiveMark is_active;
-
-  ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
-
-  // Trace any (new) unmarked root references.
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::rescan_roots);
-  prepare_unmarked_root_objs();
-  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::rescan_roots);
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_satb);
-  {
-    StrongRootsScope scope(_max_conc_worker_id);
-    ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
-    // drain_satb_buffers(0, true);
-    FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
-    sh->conc_workers()->set_active_workers(_max_conc_worker_id);
-    sh->conc_workers()->run_task(&drain_satb_buffers);
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_satb);
-  }
-  
-  // Finally mark everything else we've got in our queues during the previous steps.
-  {
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_queues);
-    ParallelTaskTerminator terminator(_max_conc_worker_id, _task_queues);
-    SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, sh->need_update_refs());
-    sh->conc_workers()->set_active_workers(_max_conc_worker_id);
-    sh->conc_workers()->run_task(&markingTask);
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_queues);
-  }
-
-#ifdef ASSERT
-  for (int i = 0; i < (int) _max_conc_worker_id; i++) {
-    assert(_task_queues->queue(i)->is_empty(), "Should be empty");
-  }
-#endif
-
-  // When we're done marking everything, we process weak references.
-  if (ShenandoahProcessReferences) {
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
-    weak_refs_work();
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
-  }
-
-#ifdef ASSERT
-  for (int i = 0; i < (int) _max_conc_worker_id; i++) {
-    assert(_task_queues->queue(i)->is_empty(), "Should be empty");
-  }
-#endif
-
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("Finishing finishMarkFromRoots");
-#ifdef SLOWDEBUG
-    for (int i = 0; i <(int)_max_conc_worker_id; i++) {
-      tty->print("Queue: "INT32_FORMAT":", i);
-      _task_queues->queue(i)->stats.print(tty, 10);
-      tty->cr();
-      _task_queues->queue(i)->stats.verify();
-    }
-#endif
-  }
-
-  // We still need to update (without marking) alive refs in JNI handles.
-  if (ShenandoahProcessReferences && ClassUnloadingWithConcurrentMark) {
-    ShenandoahUpdateAliveRefs cl;
-    ShenandoahIsAliveClosure is_alive;
-    JNIHandles::weak_oops_do(&is_alive, &cl);
-  }
-
-#ifdef ASSERT
-  verify_roots();
-
-  if (ShenandoahDumpHeapAfterConcurrentMark) {
-    sh->ensure_parsability(false);
-    sh->print_all_refs("post-mark");
-  }
-#endif
-}
-
-#ifdef ASSERT
-void ShenandoahVerifyRootsClosure1::do_oop(oop* p) {
-  oop obj = oopDesc::load_heap_oop(p);
-  if (! oopDesc::is_null(obj)) {
-    guarantee(ShenandoahHeap::heap()->is_marked_current(obj), "oop must be marked");
-    guarantee(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "oop must not be forwarded");
-  }
-}
-
-void ShenandoahConcurrentMark::verify_roots() {
-  ShenandoahVerifyRootsClosure1 cl;
-  CodeBlobToOopClosure blobsCl(&cl, true);
-  CLDToOopClosure cldCl(&cl);
-  ClassLoaderDataGraph::clear_claimed_marks();
-  ShenandoahRootProcessor rp(ShenandoahHeap::heap(), 1);
-  rp.process_roots(&cl, &cl, &cldCl, &cldCl, &cldCl, &blobsCl);
-}
-#endif
-
-class ShenandoahSATBBufferClosure : public SATBBufferClosure {
-private:
-  SCMObjToScanQueue* _queue;
-
-public:
-  ShenandoahSATBBufferClosure(SCMObjToScanQueue* q) :
-    _queue(q)
-  {
-  }
-
-  void do_buffer(void** buffer, size_t size) {
-    // tty->print_cr("draining one satb buffer");
-    for (size_t i = 0; i < size; ++i) {
-      void* entry = buffer[i];
-      oop obj = oop(entry);
-      // tty->print_cr("satb buffer entry: "PTR_FORMAT, p2i((HeapWord*) obj));
-      if (!oopDesc::is_null(obj)) {
-	obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
-	bool pushed = _queue->push(obj);
-	assert(pushed, "overflow queue should always succeed pushing");
-      }
-    }
-  }
-};
-
-class ShenandoahSATBThreadsClosure : public ThreadClosure {
-  ShenandoahSATBBufferClosure* _satb_cl;
-  int _thread_parity;
-
- public:
-  ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
-    _satb_cl(satb_cl),
-    _thread_parity(Threads::thread_claim_parity()) {}
-
-  void do_thread(Thread* thread) {
-    if (thread->is_Java_thread()) {
-      if (thread->claim_oops_do(true, _thread_parity)) {
-        JavaThread* jt = (JavaThread*)thread;
-        jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
-      }
-    } else if (thread->is_VM_thread()) {
-      if (thread->claim_oops_do(true, _thread_parity)) {
-        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
-      }
-    }
-  }
-};
-
-void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
-
-  // tty->print_cr("start draining SATB buffers");
-
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  SCMObjToScanQueue* q = get_queue(worker_id);
-  ShenandoahSATBBufferClosure cl(q);
-
-  SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
-
-  if (remark) {
-    ShenandoahSATBThreadsClosure tc(&cl);
-    Threads::threads_do(&tc);
-  }
-
-  // tty->print_cr("end draining SATB buffers");
-
-}
-
-bool ShenandoahConcurrentMark::drain_one_satb_buffer(uint worker_id) {
-
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  SCMObjToScanQueue* q = get_queue(worker_id);
-  ShenandoahSATBBufferClosure cl(q);
-
-  SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-  bool result = satb_mq_set.apply_closure_to_completed_buffer(&cl);
-  return result;
-}
-
-#if TASKQUEUE_STATS
-void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
-  st->print_raw_cr("GC Task Stats");
-  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
-  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
-}
-
-void ShenandoahConcurrentMark::print_taskqueue_stats(outputStream* const st) const {
-  print_taskqueue_stats_hdr(st);
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  TaskQueueStats totals;
-  const int n = sh->max_conc_workers();
-  for (int i = 0; i < n; ++i) {
-    st->print(INT32_FORMAT_W(3), i); 
-    _task_queues->queue(i)->stats.print(st);
-    st->print("\n");
-    totals += _task_queues->queue(i)->stats;
-  }
-  st->print_raw("tot "); totals.print(st); st->cr();
-  DEBUG_ONLY(totals.verify());
-
-}
-
-void ShenandoahConcurrentMark::print_push_only_taskqueue_stats(outputStream* const st) const {
-  print_taskqueue_stats_hdr(st);
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  TaskQueueStats totals;
-  const int n = sh->max_conc_workers();
-  for (int i = 0; i < n; ++i) {
-    st->print(INT32_FORMAT_W(3), i); 
-    _task_queues->queue(i)->stats.print(st);
-    st->print("\n");
-    totals += _task_queues->queue(i)->stats;
-  }
-  st->print_raw("tot "); totals.print(st); st->cr();
-
-  DEBUG_ONLY(totals.verify_only_pushes());
-}
-
-void ShenandoahConcurrentMark::reset_taskqueue_stats() {
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  const int n = sh->max_conc_workers();
-  for (int i = 0; i < n; ++i) {
-    _task_queues->queue(i)->stats.reset();
-  }
-}
-#endif // TASKQUEUE_STATS
-
-// Weak Reference Closures
-class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
-  ShenandoahHeap* _sh;
-  ShenandoahConcurrentMark* _scm;
-  uint _worker_id;
-  int _seed;
-
-public:
-  ShenandoahCMDrainMarkingStackClosure(uint worker_id): _worker_id(worker_id), _seed(17) {
-    _sh = (ShenandoahHeap*) Universe::heap();
-    _scm = _sh->concurrentMark();
-  }
-
-      
-  void do_void() {
-
-    SCMObjToScanQueue* q = _scm->get_queue(_worker_id);
-    ShenandoahMarkObjsClosure cl(q, _sh->need_update_refs());
-    while (true) {
-      if (!_scm->try_queue(q, &cl) &&
-	  !_scm->try_draining_an_satb_buffer(_worker_id) &&
-	  !_scm->try_to_steal(_worker_id, &cl, &_seed)) {
-	break;
-      }
-    }
-  }
-};
-
-
-class ShenandoahCMKeepAliveAndDrainClosure: public OopClosure {
-  SCMObjToScanQueue* _queue;
-  ShenandoahHeap* _sh;
-  ShenandoahConcurrentMark* _scm;
-
-  size_t _ref_count;
-
-public:
-  ShenandoahCMKeepAliveAndDrainClosure(SCMObjToScanQueue* q) :
-    _queue(q) {
-    _sh = (ShenandoahHeap*) Universe::heap();
-    _scm = _sh->concurrentMark();
-    _ref_count = 0;
-  }
-
-  virtual void do_oop(oop* p){ do_oop_work(p);}
-  virtual void do_oop(narrowOop* p) {  
-    assert(false, "narrowOops Aren't implemented");
-  }
-
-
-  void do_oop_work(oop* p) {  
-
-    oop obj;
-    if (_sh->need_update_refs()) {
-      obj = _sh->maybe_update_oop_ref(p);
-    } else {
-      obj = oopDesc::load_heap_oop(p);
-    }
-
-    assert(obj == oopDesc::bs()->resolve_oop(obj), "only get updated oops in weak ref processing");
-
-    if (obj != NULL) {
-      if (Verbose && ShenandoahTraceWeakReferences) {
-	gclog_or_tty->print_cr("\twe're looking at location "
-			       "*"PTR_FORMAT" = "PTR_FORMAT,
-			       p2i(p), p2i((void*) obj));
-	obj->print();
-      }
-      bool pushed = _queue->push(obj);
-      assert(pushed, "overflow queue should always succeed pushing");
-
-      _ref_count++;
-    }    
-  }
-
-  size_t ref_count() { return _ref_count; }
-
-};
-
-class ShenandoahRefProcTaskProxy : public AbstractGangTask {
-
-private:
-  AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
-
-public:
-
-  ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task) :
-    AbstractGangTask("Process reference objects in parallel"),
-    _proc_task(proc_task) {
-  }
-
-  void work(uint worker_id) {
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    ShenandoahIsAliveClosure is_alive;
-    ShenandoahCMKeepAliveAndDrainClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
-    ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id);
-    _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
-  }
-};
-
-class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
-
-private:
-  AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
-
-public:
-
-  ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
-    AbstractGangTask("Enqueue reference objects in parallel"),
-    _enqueue_task(enqueue_task) {
-  }
-
-  void work(uint worker_id) {
-    _enqueue_task.work(worker_id);
-  }
-};
-
-class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
-
-private:
-  WorkGang* _workers;
-
-public:
-
-  ShenandoahRefProcTaskExecutor() : _workers(ShenandoahHeap::heap()->conc_workers()) {
-  }
-
-  // Executes a task using worker threads.
-  void execute(ProcessTask& task) {
-    ShenandoahRefProcTaskProxy proc_task_proxy(task);
-    _workers->run_task(&proc_task_proxy);
-  }
-
-  void execute(EnqueueTask& task) {
-    ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
-    _workers->run_task(&enqueue_task_proxy);
-  }
-};
-
-
-void ShenandoahConcurrentMark::weak_refs_work() {
-   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-   ReferenceProcessor* rp = sh->ref_processor();
-
-   // Setup collector policy for softref cleaning.
-   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
-   if (ShenandoahTraceWeakReferences) {
-     tty->print_cr("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
-   }
-   rp->setup_policy(clear_soft_refs);
-
-   uint serial_worker_id = 0;
-   ShenandoahIsAliveClosure is_alive;
-   ShenandoahCMKeepAliveAndDrainClosure keep_alive(get_queue(serial_worker_id));
-   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id);
-   ShenandoahRefProcTaskExecutor par_task_executor;
-   bool processing_is_mt = true;
-   AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
-
-   if (ShenandoahTraceWeakReferences) {
-     gclog_or_tty->print_cr("start processing references");
-   }
-
-   rp->process_discovered_references(&is_alive, &keep_alive, 
-				     &complete_gc, &par_task_executor, 
-				     ShenandoahHeap::heap()->collector_policy()->conc_timer(),
-                                     ShenandoahHeap::heap()->tracer()->gc_id());
-   
-   if (ShenandoahTraceWeakReferences) {
-     gclog_or_tty->print_cr("finished processing references, processed "SIZE_FORMAT" refs", keep_alive.ref_count());
-     gclog_or_tty->print_cr("start enqueuing references");
-   }
-
-   rp->enqueue_discovered_references(executor);
-
-   if (ShenandoahTraceWeakReferences) {
-     gclog_or_tty->print_cr("finished enqueueing references");
-   }
-
-   rp->verify_no_references_recorded();
-   assert(!rp->discovery_enabled(), "Post condition");
-
-   if (ClassUnloadingWithConcurrentMark) {
-     // Unload classes and purge SystemDictionary.
-     bool purged_class = SystemDictionary::do_unloading(&is_alive);
-     // Unload nmethods.
-     CodeCache::do_unloading(&is_alive, purged_class);
-     // Prune dead klasses from subklass/sibling/implementor lists.
-     Klass::clean_weak_klass_links(&is_alive);
-     // Delete entries from dead interned strings.
-     StringTable::unlink(&is_alive);
-     // Clean up unreferenced symbols in symbol table.
-     SymbolTable::unlink();
-
-     ClassLoaderDataGraph::purge();
-   }
-}
-
-void ShenandoahConcurrentMark::cancel() {
-  ShenandoahHeap* sh = ShenandoahHeap::heap();
-
-  // Cancel weak-ref discovery.
-  if (ShenandoahProcessReferences) {
-    ReferenceProcessor* rp = sh->ref_processor();
-    rp->abandon_partial_discovery();
-    rp->disable_discovery();
-  }
-
-  // Clean up marking stacks.
-  SCMObjToScanQueueSet* queues = task_queues();
-  for (uint i = 0; i < _max_conc_worker_id; ++i) {
-    SCMObjToScanQueue* task_queue = queues->queue(i);
-    task_queue->set_empty();
-    task_queue->overflow_stack()->clear();
-  }
-
-  // Cancel SATB buffers.
-  JavaThread::satb_mark_queue_set().abandon_partial_marking();
-}
-
-SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
-  worker_id = worker_id % _max_conc_worker_id;
-  return _task_queues->queue(worker_id);
-}
-
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
-
-#include "gc/shared/taskqueue.hpp"
-#include "gc/shared/workgroup.hpp"
-
-typedef OverflowTaskQueue<oop, mtGC> OopOverflowTaskQueue;
-typedef Padded<OopOverflowTaskQueue> SCMObjToScanQueue;
-typedef GenericTaskQueueSet<SCMObjToScanQueue, mtGC> SCMObjToScanQueueSet;
-
-class ShenandoahConcurrentMark;
-
-#ifdef ASSERT
-class ShenandoahVerifyRootsClosure1 : public OopClosure {
-  void do_oop(oop* p);
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-};
-#endif
-
-class ShenandoahMarkRefsClosure : public MetadataAwareOopClosure {
-  SCMObjToScanQueue* _queue;
-  ShenandoahHeap* _heap;
-  bool _update_refs;
-  ShenandoahConcurrentMark* _scm;
-
-public: 
-  ShenandoahMarkRefsClosure(SCMObjToScanQueue* q, bool update_refs);
-
-  void do_oop(narrowOop* p);
-
-  inline void do_oop(oop* p);
-
-};
-
-class ShenandoahMarkObjsClosure : public ObjectClosure {
-  ShenandoahHeap* _heap;
-  size_t* _live_data;
-  ShenandoahMarkRefsClosure _mark_refs;
-
-public: 
-  ShenandoahMarkObjsClosure(SCMObjToScanQueue* q, bool update_refs);
-
-  ~ShenandoahMarkObjsClosure();
-
-  inline void do_object(oop obj);
-};  
-
-class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
-
-private:
-  // The per-worker-thread work queues
-  SCMObjToScanQueueSet* _task_queues;
-
-  bool                    _aborted;       
-  uint _max_conc_worker_id;
-  ParallelTaskTerminator* _terminator;
-
-public:
-  // We need to do this later when the heap is already created.
-  void initialize();
-
-  void mark_from_roots();
-
-  // Prepares unmarked root objects by marking them and putting
-  // them into the marking task queue.
-  void prepare_unmarked_root_objs();
-  void prepare_unmarked_root_objs_no_derived_ptrs(bool update_refs);
-
-  void finish_mark_from_roots();
-  // Those are only needed public because they're called from closures.
-
-  SCMObjToScanQueue* get_queue(uint worker_id);
-  inline bool try_queue(SCMObjToScanQueue* q, ShenandoahMarkObjsClosure* cl);
-  inline bool try_to_steal(uint worker_id, ShenandoahMarkObjsClosure* cl, int *seed);
-  inline bool try_draining_an_satb_buffer(uint worker_id);
-  void drain_satb_buffers(uint worker_id, bool remark = false);
-  SCMObjToScanQueueSet* task_queues() { return _task_queues;}
-  uint max_conc_worker_id() { return _max_conc_worker_id; }
-
-  void cancel();
-
-private:
-
-#ifdef ASSERT
-  void verify_roots();
-#endif
-
-  bool drain_one_satb_buffer(uint worker_id);
-  void weak_refs_work();
-
-  ParallelTaskTerminator* terminator() { return _terminator;}
-
-#if TASKQUEUE_STATS
-  static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
-  void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
-  void print_push_only_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
-  void reset_taskqueue_stats();
-#endif // TASKQUEUE_STATS
-
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
-  Copyright 2015 Red Hat, Inc. and/or its affiliates.
-*/
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
-
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-
-void ShenandoahMarkRefsClosure::do_oop(oop* p) {
-  // We piggy-back reference updating to the marking tasks.
-#ifdef ASSERT
-  oop* old = p;
-#endif
-  oop obj;
-  if (_update_refs) {
-    obj = _heap->maybe_update_oop_ref(p);
-  } else {
-    obj = oopDesc::load_heap_oop(p);
-  }
-  assert(obj == ShenandoahBarrierSet::resolve_oop_static(obj), "need to-space object here");
-
-#ifdef ASSERT
-  if (ShenandoahTraceUpdates) {
-    if (p != old) 
-      tty->print_cr("Update "PTR_FORMAT" => "PTR_FORMAT"  to "PTR_FORMAT" => "PTR_FORMAT, p2i(p), p2i((HeapWord*) *p), p2i(old), p2i((HeapWord*) *old));
-    else
-      tty->print_cr("Not updating "PTR_FORMAT" => "PTR_FORMAT"  to "PTR_FORMAT" => "PTR_FORMAT, p2i(p), p2i((HeapWord*) *p), p2i(old), p2i((HeapWord*) *old));
-  }
-#endif
-
-  // NOTE: We used to assert the following here. This does not always work because
-  // a concurrent Java thread could change the the field after we updated it.
-  // oop obj = oopDesc::load_heap_oop(p);
-  // assert(oopDesc::bs()->resolve_oop(obj) == *p, "we just updated the referrer");
-  // assert(obj == NULL || ! _heap->heap_region_containing(obj)->is_dirty(), "must not point to dirty region");
-
-  //  ShenandoahExtendedMarkObjsClosure cl(_heap->ref_processor(), _worker_id);
-  //  ShenandoahMarkObjsClosure mocl(cl, _worker_id);
-
-  if (obj != NULL) {
-    if (_update_refs) {
-      Prefetch::write(obj, 128);
-    } else {
-      Prefetch::read(obj, 128);
-    }
-
-#ifdef ASSERT
-    uint region_idx  = _heap->heap_region_index_containing(obj);
-    ShenandoahHeapRegion* r = _heap->heap_regions()[region_idx];
-    assert(r->bottom() < (HeapWord*) obj && r->top() > (HeapWord*) obj, "object must be in region");
-#endif
-
-    bool pushed = _queue->push(obj);
-    assert(pushed, "overflow queue should always succeed pushing");
-  }
-}
-
-void ShenandoahMarkObjsClosure::do_object(oop obj) {
-
-  assert(obj != NULL, "expect non-null object");
-
-  assert(obj == ShenandoahBarrierSet::resolve_oop_static_not_null(obj), "expect forwarded obj in queue");
-
-#ifdef ASSERT
-  if (_heap->heap_region_containing(obj)->is_in_collection_set()) {
-    tty->print_cr("trying to mark obj: "PTR_FORMAT" (%s) in dirty region: ", p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_marked_current(obj)));
-    //      _heap->heap_region_containing(obj)->print();
-    //      _heap->print_heap_regions();
-  }
-#endif
-  assert(_heap->cancelled_concgc()
-	 || ! _heap->heap_region_containing(obj)->is_in_collection_set(),
-	 "we don't want to mark objects in from-space");
-  assert(_heap->is_in(obj), "referenced objects must be in the heap. No?");
-  if (_heap->mark_current(obj)) {
-#ifdef ASSERT
-    if (ShenandoahTraceConcurrentMarking) {
-      tty->print_cr("marked obj: "PTR_FORMAT, p2i((HeapWord*) obj));
-    }
-#endif
-
-    // Calculate liveness of heap region containing object.
-    uint region_idx  = _heap->heap_region_index_containing(obj);
-#ifdef ASSERT
-    ShenandoahHeapRegion* r = _heap->heap_regions()[region_idx];
-    assert(r->bottom() < (HeapWord*) obj && r->top() > (HeapWord*) obj, "object must be in region");
-#endif
-    _live_data[region_idx] += (obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE) * HeapWordSize;
-    obj->oop_iterate(&_mark_refs);
-  }
-
-#ifdef ASSERT
-  else {
-    if (ShenandoahTraceConcurrentMarking) {
-      tty->print_cr("failed to mark obj (already marked): "PTR_FORMAT, p2i((HeapWord*) obj));
-    }
-    assert(_heap->is_marked_current(obj), "make sure object is marked");
-  }
-#endif
-}
-
-inline bool ShenandoahConcurrentMark::try_queue(SCMObjToScanQueue* q, ShenandoahMarkObjsClosure* cl) {
-  oop obj;
-  if (q->pop_local(obj)) {
-    assert(obj != NULL, "Can't mark null");
-    cl->do_object(obj);
-    return true;
-  } else if (q->pop_overflow(obj)) {
-    cl->do_object(obj);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-inline bool ShenandoahConcurrentMark::try_to_steal(uint worker_id, ShenandoahMarkObjsClosure* cl, int *seed) {
-  oop obj;
-  if (task_queues()->steal(worker_id, seed, obj)) {
-    cl->do_object(obj);
-    return true;
-  } else 
-    return false;
-}
-
-inline bool ShenandoahConcurrentMark:: try_draining_an_satb_buffer(uint worker_id) {
-  return drain_one_satb_buffer(worker_id);
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,201 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc_implementation/shenandoah/shenandoahJNICritical.hpp"
-#include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
-#include "memory/iterator.hpp"
-#include "memory/universe.hpp"
-#include "runtime/vmThread.hpp"
-
-SurrogateLockerThread* ShenandoahConcurrentThread::_slt = NULL;
-
-ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
-  ConcurrentGCThread(),
-  _epoch(0),
-  _concurrent_mark_started(false),
-  _concurrent_mark_in_progress(false),
-  _do_full_gc(false),
-  _concurrent_mark_aborted(false)
-{
-  create_and_start();
-}
-
-ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
-  // This is here so that super is called.
-}
-
-void ShenandoahConcurrentThread::run() {
-  initialize_in_thread();
-
-  wait_for_universe_init();
-
-  // Wait until we have the surrogate locker thread in place.
-  {
-    MutexLockerEx x(CGC_lock, true);
-    while(_slt == NULL && !_should_terminate) {
-      CGC_lock->wait(true, 200);
-    }
-  }
-
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-
-  while (!_should_terminate) {
-    if (_do_full_gc) {
-      {
-        if (_full_gc_cause == GCCause::_allocation_failure) {
-          heap->shenandoahPolicy()->record_allocation_failure_gc();
-        } else {
-          heap->shenandoahPolicy()->record_user_requested_gc();
-        }
-
-	VM_ShenandoahFullGC full_gc;
-	heap->jni_critical()->execute_in_vm_thread(&full_gc);
-      }
-      MonitorLockerEx ml(ShenandoahFullGC_lock);
-      _do_full_gc = false;
-      ml.notify_all();
-    } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(),
-							       heap->capacity())) 
-      {
-
-	if (ShenandoahGCVerbose) 
-	  tty->print("Capacity = "SIZE_FORMAT" Used = "SIZE_FORMAT"  doing initMark\n", heap->capacity(), heap->used());
- 
-	if (ShenandoahGCVerbose) tty->print("Starting a mark");
-
-	VM_ShenandoahInitMark initMark;
-	VMThread::execute(&initMark);
-
-        if (ShenandoahConcurrentMarking) {
-          ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
-
-	  VM_ShenandoahStartEvacuation finishMark;
-	  heap->jni_critical()->execute_in_vm_thread(&finishMark);
-	}
-
-        if (cm_has_aborted()) {
-          clear_cm_aborted();
-          assert(heap->is_bitmap_clear(), "need to continue with clear mark bitmap");
-          assert(! heap->concurrent_mark_in_progress(), "concurrent mark must have terminated");
-          continue;
-        }
-        if (! _should_terminate) {
-          // If we're not concurrently evacuating, evacuation is done
-          // from VM_ShenandoahFinishMark within the VMThread above.
-          if (ShenandoahConcurrentEvacuation) {
-            VM_ShenandoahEvacuation evacuation;
-            evacuation.doit();
-          }
-        }
-
-        if (heap->shenandoahPolicy()->update_refs_early() && ! _should_terminate && ! heap->cancelled_concgc()) {
-          if (ShenandoahConcurrentUpdateRefs) {
-            VM_ShenandoahUpdateRefs update_refs;
-            VMThread::execute(&update_refs);
-            heap->update_references();
-          }
-        } else {
-	  if (heap->is_evacuation_in_progress()) {
-	    heap->set_evacuation_in_progress(false);
-	  }
-	}
-
-      } else {
-      Thread::current()->_ParkEvent->park(10) ;
-      // yield();
-    }
-    heap->clear_cancelled_concgc();
-  }
-}
-
-void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
-
-  assert(Thread::current()->is_Java_thread(), "expect Java thread here");
-
-  MonitorLockerEx ml(ShenandoahFullGC_lock);
-  schedule_full_gc();
-  _full_gc_cause = cause;
-  while (_do_full_gc) {
-    ml.wait();
-  }
-  assert(_do_full_gc == false, "expect full GC to have completed");
-}
-
-void ShenandoahConcurrentThread::schedule_full_gc() {
-  _do_full_gc = true;
-  OrderAccess::fence();
-}
-
-void ShenandoahConcurrentThread::print() const {
-  print_on(tty);
-}
-
-void ShenandoahConcurrentThread::print_on(outputStream* st) const {
-  st->print("Shenandoah Concurrent Thread");
-  Thread::print_on(st);
-  st->cr();
-}
-
-void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
-  assert(false, "Wake up in the GC thread that never sleeps :-)");
-}
-
-void ShenandoahConcurrentThread::set_cm_started() {
-    assert(!_concurrent_mark_in_progress, "cycle in progress"); 
-    _concurrent_mark_started = true;  
-}
-  
-void ShenandoahConcurrentThread::clear_cm_started() { 
-    assert(_concurrent_mark_in_progress, "must be starting a cycle"); 
-    _concurrent_mark_started = false; 
-}
-
-bool ShenandoahConcurrentThread::cm_started() {
-  return _concurrent_mark_started;
-}
-
-void ShenandoahConcurrentThread::set_cm_in_progress() { 
-  assert(_concurrent_mark_started, "must be starting a cycle"); 
-  _concurrent_mark_in_progress = true;  
-}
-
-void ShenandoahConcurrentThread::clear_cm_in_progress() { 
-  assert(!_concurrent_mark_started, "must not be starting a new cycle"); 
-  _concurrent_mark_in_progress = false; 
-}
-
-bool ShenandoahConcurrentThread::cm_in_progress() { 
-  return _concurrent_mark_in_progress;  
-}
-
-void ShenandoahConcurrentThread::start() {
-  create_and_start();
-}
-
-void ShenandoahConcurrentThread::yield() {
-  _sts.yield();
-}
-
-void ShenandoahConcurrentThread::safepoint_synchronize() {
-  assert(UseShenandoahGC, "just checking");
-  _sts.synchronize();
-}
-
-void ShenandoahConcurrentThread::safepoint_desynchronize() {
-  assert(UseShenandoahGC, "just checking");
-  _sts.desynchronize();
-}
-
-void ShenandoahConcurrentThread::makeSurrogateLockerThread(TRAPS) {
-  assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");
-  assert(THREAD->is_Java_thread(), "must be a Java thread");
-  assert(_slt == NULL, "SLT already created");
-  _slt = SurrogateLockerThread::make(THREAD);
-}
-
-void ShenandoahConcurrentThread::shutdown() {
-  _should_terminate = true;
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
-
-#include "gc/shared/concurrentGCThread.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "memory/resourceArea.hpp"
-
-// For now we just want to have a concurrent marking thread. 
-// Once we have that working we will build a concurrent evacuation thread.
-
-class ShenandoahConcurrentThread: public ConcurrentGCThread {
-  friend class VMStructs;
-
- public:
-  virtual void run();
-
- private:
-  volatile bool                    _concurrent_mark_started;
-  volatile bool                    _concurrent_mark_in_progress;
-  volatile bool                    _concurrent_mark_aborted;
-
-  int _epoch;
-
-  static SurrogateLockerThread* _slt;
-  static SuspendibleThreadSet _sts;
-
-  bool _do_full_gc;
-  GCCause::Cause _full_gc_cause;
-
-  void sleepBeforeNextCycle();
-
- public:
-  // Constructor
-  ShenandoahConcurrentThread();
-  ~ShenandoahConcurrentThread();
-
-  static void makeSurrogateLockerThread(TRAPS);
-  static SurrogateLockerThread* slt() { return _slt; }
-
-  // Printing
-  void print_on(outputStream* st) const;
-  void print() const;
-
-  void set_cm_started();
-  void clear_cm_started();
-  bool cm_started();
-
-  void set_cm_in_progress();
-  void clear_cm_in_progress();
-  bool cm_in_progress();
-
-  void cm_abort() { _concurrent_mark_aborted = true;}
-  bool cm_has_aborted() { return _concurrent_mark_aborted;}
-  void clear_cm_aborted() { _concurrent_mark_aborted = false;}
-
-  void do_full_gc(GCCause::Cause cause);
-
-  void schedule_full_gc();
-
-  // This flag returns true from the moment a marking cycle is
-  // initiated (during the initial-mark pause when started() is set)
-  // to the moment when the cycle completes (just after the next
-  // marking bitmap has been cleared and in_progress() is
-  // cleared). While this flag is true we will not start another cycle
-  // so that cycles do not overlap. We cannot use just in_progress()
-  // as the CM thread might take some time to wake up before noticing
-  // that started() is set and set in_progress().
-  bool during_cycle()      { return cm_started() || cm_in_progress(); }
-
-  char* name() const { return (char*)"ShenandoahConcurrentThread";}
-  void start();
-  void yield();
-
-  static void safepoint_synchronize();
-  static void safepoint_desynchronize();
-
-  void shutdown();
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2856 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-
-#include "classfile/symbolTable.hpp"
-#include "classfile/stringTable.hpp"
-
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/cmBitMap.inline.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahHumongous.hpp"
-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahJNICritical.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "memory/iterator.hpp"
-#include "memory/oopFactory.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
-#include "memory/universe.hpp"
-#include "utilities/copy.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-#include "runtime/atomic.inline.hpp"
-
-#define __ masm->
-
-ShenandoahHeap* ShenandoahHeap::_pgc = NULL;
-
-void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
-  HeapWord* cur = NULL;
-  for (cur = start; cur < end; cur++) {
-    tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
-  }
-}
-
-void ShenandoahHeap::print_heap_objects(HeapWord* start, HeapWord* end) {
-  HeapWord* cur = NULL;
-  for (cur = start; cur < end; cur = cur + oop(cur)->size()) {
-    oop(cur)->print();
-    print_heap_locations(cur, cur + oop(cur)->size());
-  }
-}
-
-void ShenandoahHeap::print_heap_object(oop p) {
-  HeapWord* hw = (HeapWord*) p;
-  print_heap_locations(hw-1, hw+1+p->size());
-}
-
-
-class PrintHeapRegionsClosure : public
-   ShenandoahHeapRegionClosure {
-private:
-  outputStream* _st;
-public:
-  PrintHeapRegionsClosure() : _st(tty) {}
-  PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    r->print_on(_st);
-    return false;
-  }
-};
-
-class PrintHeapObjectsClosure : public ShenandoahHeapRegionClosure {
-public:
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    tty->print_cr("Region "INT32_FORMAT" top = "PTR_FORMAT" used = "SIZE_FORMAT_HEX" free = "SIZE_FORMAT_HEX, 
-	       r->region_number(), p2i(r->top()), r->used(), r->free());
-    
-    ShenandoahHeap::heap()->print_heap_objects(r->bottom(), r->top());
-    return false;
-  }
-};
-
-jint ShenandoahHeap::initialize() {
-  CollectedHeap::pre_initialize();
-
-  size_t init_byte_size = collector_policy()->initial_heap_byte_size();
-  size_t max_byte_size = collector_policy()->max_heap_byte_size();
-  if (ShenandoahGCVerbose) 
-    tty->print_cr("init_byte_size = "SIZE_FORMAT","SIZE_FORMAT_HEX"  max_byte_size = "INT64_FORMAT","SIZE_FORMAT_HEX, 
-	     init_byte_size, init_byte_size, max_byte_size, max_byte_size);
-
-  Universe::check_alignment(max_byte_size,  
-			    ShenandoahHeapRegion::RegionSizeBytes, 
-			    "shenandoah heap");
-  Universe::check_alignment(init_byte_size, 
-			    ShenandoahHeapRegion::RegionSizeBytes, 
-			    "shenandoah heap");
-
-  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
-						 Arguments::conservative_max_heap_alignment());
-  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
-
-  set_barrier_set(new ShenandoahBarrierSet());
-  ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
-  _storage.initialize(pgc_rs, init_byte_size);
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("Calling initialize on reserved space base = "PTR_FORMAT" end = "PTR_FORMAT, 
-	       p2i(pgc_rs.base()), p2i(pgc_rs.base() + pgc_rs.size()));
-  }
-
-  _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
-  _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
-  _ordered_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _max_regions, mtGC); 
-  for (size_t i = 0; i < _max_regions; i++) {
-    _ordered_regions[i] = NULL;
-  }
-
-  _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
-  size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
-  assert(init_byte_size == _initialSize, "tautology");
-  _free_regions = new ShenandoahHeapRegionSet(_max_regions);
-  _collection_set = new ShenandoahHeapRegionSet(_max_regions);
-
-  for (size_t i = 0; i < _num_regions; i++) {
-    ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
-    current->initialize_heap_region((HeapWord*) pgc_rs.base() + 
-				    regionSizeWords * i, regionSizeWords, i);
-    _free_regions->append(current);
-    _ordered_regions[i] = current;
-  }
-  _first_region = _ordered_regions[0];
-  _first_region_bottom = _first_region->bottom();
-  assert((((size_t) _first_region_bottom) & (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, err_msg("misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)));
-
-  _numAllocs = 0;
-
-  if (ShenandoahGCVerbose) {
-    tty->print("All Regions\n");
-    print_heap_regions();
-    tty->print("Free Regions\n");
-    _free_regions->print();
-  }
-
-  // The call below uses stuff (the SATB* things) that are in G1, but probably
-  // belong into a shared location.
-  JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
-                                               SATB_Q_FL_lock,
-                                               20 /*G1SATBProcessCompletedThreshold */,
-                                               Shared_SATB_Q_lock);
-
-  // Reserve space for prev and next bitmap.
-  size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
-  MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
-
-  ReservedSpace bitmap(ReservedSpace::allocation_align_size_up(bitmap_size));
-  os::commit_memory_or_exit(bitmap.base(), bitmap.size(), false, err_msg("couldn't allocate mark bitmap"));
-  MemRegion bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
-  _mark_bit_map.initialize(heap_region, bitmap_region);
-
-  _next_mark_bit_map = &_mark_bit_map;
-  reset_mark_bitmap();
-
-  // Initialize fast collection set test structure.
-  _in_cset_fast_test_length = _max_regions;
-  _in_cset_fast_test_base =
-                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
-  _in_cset_fast_test = _in_cset_fast_test_base -
-               ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
-  clear_cset_fast_test();
-
-  _concurrent_gc_thread = new ShenandoahConcurrentThread();
-  return JNI_OK;
-}
-
-ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : 
-  CollectedHeap(),
-  _shenandoah_policy(policy), 
-  _concurrent_mark_in_progress(false),
-  _evacuation_in_progress(false),
-  _update_references_in_progress(false),
-  _free_regions(NULL),
-  _collection_set(NULL),
-  _bytesAllocSinceCM(0),
-  _bytes_allocated_during_cm(0),
-  _max_allocated_gc(0),
-  _allocated_last_gc(0),
-  _used_start_gc(0),
-  _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
-  _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
-  _ref_processor(NULL),
-  _in_cset_fast_test(NULL),
-  _in_cset_fast_test_base(NULL),
-  _mark_bit_map(),
-  _cancelled_concgc(false),
-  _need_update_refs(false),
-  _need_reset_bitmaps(false),
-  _jni_critical(new ShenandoahJNICritical())
-
-{
-  if (ShenandoahLogConfig) {
-    tty->print_cr("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
-    tty->print_cr("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
-    tty->print_cr("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
-  }
-  _pgc = this;
-  _scm = new ShenandoahConcurrentMark();
-  _used = 0;
-  // This is odd.  They are concurrent gc threads, but they are also task threads.  
-  // Framework doesn't allow both.
-  _workers = new FlexibleWorkGang("Concurrent GC Threads", ParallelGCThreads,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
-  _conc_workers = new FlexibleWorkGang("Concurrent GC Threads", ConcGCThreads,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
-  if ((_workers == NULL) || (_conc_workers == NULL)) {
-    vm_exit_during_initialization("Failed necessary allocation.");
-  } else {
-    _workers->initialize_workers();
-    _conc_workers->initialize_workers();
-  }
-}
-
-void ShenandoahHeap::reset_mark_bitmap() {
-  _next_mark_bit_map->clearAll();
-}
-
-void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) {
-  _next_mark_bit_map->clearRange(MemRegion(from, to));
-}
-
-class BitmapClearClosure : public BitMapClosure {
-private:
-  CMBitMap* _bm;
-
-public:
-
-  BitmapClearClosure(CMBitMap* bm) : _bm(bm) {
-  }
-
-  bool do_bit(BitMap::idx_t offset) {
-    HeapWord* hw = _bm->offsetToHeapWord(offset);
-    bool is_marked = _bm->isMarked(hw);
-    return ! is_marked;
-  }
-};
-
-bool ShenandoahHeap::is_bitmap_clear() {
-  
-  BitmapClearClosure bitmap_clear_cl(_next_mark_bit_map);
-  return _next_mark_bit_map->iterate(&bitmap_clear_cl);
-}
-
-void ShenandoahHeap::print_on(outputStream* st) const {
-  st->print("Shenandoah Heap");
-  st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
-  st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
-  if (_concurrent_mark_in_progress) {
-    st->print("marking ");
-  }
-  if (_evacuation_in_progress) {
-    st->print("evacuating ");
-  }
-  if (_update_references_in_progress) {
-    st->print("updating-refs ");
-  }
-  if (_cancelled_concgc) {
-    st->print("cancelled ");
-  }
-  st->print("\n");
-
-  if (Verbose) {
-    print_heap_regions(st);
-  }
-}
-
-class InitGCLABClosure : public ThreadClosure {
-public:
-  void do_thread(Thread* thread) {
-    thread->gclab().initialize(true);
-  }
-};
-
-void ShenandoahHeap::post_initialize() {
-
-  {
-    MutexLockerEx ml(Threads_lock);
-    InitGCLABClosure init_gclabs;
-    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
-      init_gclabs.do_thread(thread);
-    }
-    gc_threads_do(&init_gclabs);
-  }
-  _scm->initialize();
-
-  if (ShenandoahProcessReferences) {
-    ref_processing_init();
-  }
-  _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
-}
-
-class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
-  size_t sum;
-public:
-
-  CalculateUsedRegionClosure() {
-    sum = 0;
-  }
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    sum = sum + r->used();
-    return false;
-  }
-
-  size_t getResult() { return sum;}
-};
-
-size_t ShenandoahHeap::calculateUsed() {
-  CalculateUsedRegionClosure cl;
-  heap_region_iterate(&cl);
-  return cl.getResult();
-}
-
-size_t ShenandoahHeap::calculateFree() {
-  return capacity() - calculateUsed();
-}
-
-void ShenandoahHeap::verify_heap_size_consistency() {
-  
-  assert(calculateUsed() == used(),
-         err_msg("heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()));
-}
-
-size_t ShenandoahHeap::used() const {
-  return _used;
-}
-
-void ShenandoahHeap::increase_used(size_t bytes) {
-  _used += bytes;
-  // Atomic::add_ptr(bytes, &_used);
-}
-
-void ShenandoahHeap::set_used(size_t bytes) {
-  _used = bytes;
-}
-
-void ShenandoahHeap::decrease_used(size_t bytes) {
-  assert(_used >= bytes, "never decrease heap size by more than we've left");
-  _used -= bytes;
-  
-  // Atomic::add_ptr(-bytes, &_used);
-}
-
-size_t ShenandoahHeap::capacity() const {
-  return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
-
-}
-
-bool ShenandoahHeap::is_maximal_no_gc() const {
-  Unimplemented();
-  return true;
-}
-
-size_t ShenandoahHeap::max_capacity() const {
-  return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
-}
-
-class IsInRegionClosure : public ShenandoahHeapRegionClosure {
-  const void* _p;
-  bool _result;
-public:
-
-  IsInRegionClosure(const void* p) {
-    _p = p;
-    _result = false;
-  }
-  
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if (r->is_in(_p)) {
-      _result = true;
-      return true;
-    }
-    return false;
-  }
-
-  bool result() { return _result;}
-};
-
-bool ShenandoahHeap::is_in(const void* p) const {
-  //  IsInRegionClosure isIn(p);
-  //  heap_region_iterate(&isIn);
-  //  bool result = isIn.result();
-  
-  //  return isIn.result();
-  HeapWord* first_region_bottom = _first_region->bottom();
-  HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
-  return p > _first_region_bottom && p < last_region_end;
-}
-
-bool ShenandoahHeap::is_in_partial_collection(const void* p ) {
-  Unimplemented();
-  return false;
-}  
-
-bool  ShenandoahHeap::is_scavengable(const void* p) {
-  //  nyi();
-  //  return false;
-  return true;
-}
-
-HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
-  HeapWord* obj = thread->gclab().allocate(size);
-  if (obj != NULL) {
-    return obj;
-  }
-  // Otherwise...
-  return allocate_from_gclab_slow(thread, size);
-}
-
-HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
-  // Retain tlab and allocate object in shared space if
-  // the amount free in the tlab is too large to discard.
-  if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
-    thread->gclab().record_slow_allocation(size);
-    return NULL;
-  }
-
-  // Discard gclab and allocate a new one.
-  // To minimize fragmentation, the last GCLAB may be smaller than the rest.
-  size_t new_gclab_size = thread->gclab().compute_size(size);
-
-  thread->gclab().clear_before_allocation();
-
-  if (new_gclab_size == 0) {
-    return NULL;
-  }
-
-  // Allocate a new GCLAB...
-  HeapWord* obj = allocate_new_gclab(new_gclab_size);
-  if (obj == NULL) {
-    return NULL;
-  }
-
-  if (ZeroTLAB) {
-    // ..and clear it.
-    Copy::zero_to_words(obj, new_gclab_size);
-  } else {
-    // ...and zap just allocated object.
-#ifdef ASSERT
-    // Skip mangling the space corresponding to the object header to
-    // ensure that the returned space is not considered parsable by
-    // any concurrent GC thread.
-    size_t hdr_size = oopDesc::header_size();
-    Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
-#endif // ASSERT
-  }
-  thread->gclab().fill(obj, obj + size, new_gclab_size);
-  return obj;
-}
-
-HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
-  return allocate_new_tlab(word_size, true);
-}
-
-HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
-  return allocate_new_tlab(word_size, false);
-}
-
-HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool mark) {
-  HeapWord* result = allocate_memory(word_size);
-
-  if (result != NULL) {
-    if (mark && (_concurrent_mark_in_progress || _evacuation_in_progress)) {
-      // We mark the whole tlab here, this way we avoid marking every single
-      // allocated object. We mark it from the 2nd word, because the 1st word is always
-      // the brooks ptr of the first object, and it confuses the fast marked-iterator
-      // if we mark that.
-      _next_mark_bit_map->parMarkRange(MemRegion(result + BrooksPointer::BROOKS_POINTER_OBJ_SIZE,
-						 word_size - BrooksPointer::BROOKS_POINTER_OBJ_SIZE));
-    }
-    assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region");
-    _bytesAllocSinceCM += word_size * HeapWordSize;
-
-#ifdef ASSERT
-    if (ShenandoahTraceTLabs)
-      tty->print_cr("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
-#endif
-
-  }
-  return result;
-}
-
-ShenandoahHeap* ShenandoahHeap::heap() {
-  assert(_pgc != NULL, "Unitialized access to ShenandoahHeap::heap()");
-  assert(_pgc->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
-  return _pgc;
-}
-
-class VM_ShenandoahVerifyHeap: public VM_GC_Operation {
-public:
-  VM_ShenandoahVerifyHeap(unsigned int gc_count_before,
-                   unsigned int full_gc_count_before,
-                   GCCause::Cause cause)
-    : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { }
-  virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
-  virtual void doit() {
-    if (ShenandoahGCVerbose)
-      tty->print_cr("verifying heap");
-     Universe::heap()->ensure_parsability(false);
-     Universe::verify();
-  }
-  virtual const char* name() const {
-    return "Shenandoah verify trigger";
-  }
-};
-
-class FindEmptyRegionClosure: public ShenandoahHeapRegionClosure {
-  ShenandoahHeapRegion* _result;
-  size_t _required_size;
-public:
-
-  FindEmptyRegionClosure(size_t required_size) : _required_size(required_size) {
-    _result = NULL;
-  }
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if ((! r->is_in_collection_set()) && r->free() >= _required_size) {
-      _result = r;
-      return true;
-    }
-    return false;
-  }
-  ShenandoahHeapRegion* result() { return _result;}
-
-};
-
-HeapWord* ShenandoahHeap::allocate_memory(size_t word_size) {
-  HeapWord* result = NULL;
-  result = allocate_memory_with_lock(word_size);
-
-  if (result == NULL && ! Thread::current()->is_evacuating()) { // Allocation failed, try full-GC, then retry allocation.
-    // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize);
-    // _free_regions->print();
-    collect(GCCause::_allocation_failure);
-    result = allocate_memory_with_lock(word_size);
-  }
-
-  return result;
-}
-
-HeapWord* ShenandoahHeap::allocate_memory_with_lock(size_t word_size) {
-  return allocate_memory_shenandoah_lock(word_size);
-}
-
-HeapWord* ShenandoahHeap::allocate_memory_heap_lock(size_t word_size) {
-  ShouldNotReachHere();
-  MutexLocker ml(Heap_lock);
-  return allocate_memory_work(word_size);
-}
-
-HeapWord* ShenandoahHeap::allocate_memory_shenandoah_lock(size_t word_size) {
-  MutexLockerEx ml(ShenandoahHeap_lock, true);
-  return allocate_memory_work(word_size);
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::check_skip_humongous(ShenandoahHeapRegion* region) const {
-  while (region != NULL && region->is_humongous()) {
-    region = _free_regions->get_next();
-  }
-  return region;
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::get_next_region_skip_humongous() const {
-  ShenandoahHeapRegion* next = _free_regions->get_next();
-  return check_skip_humongous(next);
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::get_current_region_skip_humongous() const {
-  ShenandoahHeapRegion* current = _free_regions->current();
-  return check_skip_humongous(current);
-}
-
-
-ShenandoahHeapRegion* ShenandoahHeap::check_grow_heap(ShenandoahHeapRegion* current) {
-  if (current == NULL) {
-    if (grow_heap_by()) {
-      current = _free_regions->get_next();
-      assert(current != NULL, "After successfully growing the heap we should have a region");
-      assert(! current->is_humongous(), "new region must not be humongous");
-    } else {
-      current = NULL; // No more room to make a new region. OOM.
-    }
-  }
-  return current;
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::get_current_region() {
-  ShenandoahHeapRegion* current = get_current_region_skip_humongous();
-  return check_grow_heap(current);
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::get_next_region() {
-  ShenandoahHeapRegion* current = get_next_region_skip_humongous();
-  return check_grow_heap(current);
-}
-
-
-HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
-
-  if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
-    assert(! Thread::current()->is_evacuating(), "no humongous allocation for evacuating thread");
-    return allocate_large_memory(word_size);
-  }
-
-  ShenandoahHeapRegion* my_current_region = get_current_region();
-  if (my_current_region == NULL) {
-    return NULL; // No more room to make a new region. OOM.
-  }
-  assert(my_current_region != NULL, "should have a region at this point");
-
-#ifdef ASSERT
-  if (my_current_region->is_in_collection_set()) {
-    print_heap_regions();
-  }
-#endif
-  assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
-  assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
-
-  HeapWord* result;
-
-  result = my_current_region->par_allocate(word_size);
-  while (result == NULL && my_current_region != NULL) {
-    // 2nd attempt. Try next region.
-    size_t remaining = my_current_region->free();
-    my_current_region = get_next_region();
-    if (my_current_region == NULL) {
-      return NULL; // No more room to make a new region. OOM.
-    }
-    _free_regions->decrease_available(remaining);
-    assert(my_current_region != NULL, "should have a region at this point");
-    assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
-    assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
-    result = my_current_region->par_allocate(word_size);
-  }
-
-  if (result != NULL) {
-    my_current_region->increase_live_data(word_size * HeapWordSize);
-    increase_used(word_size * HeapWordSize);
-    _free_regions->decrease_available(word_size * HeapWordSize);
-  }
-  return result;
-}
-
-HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
-  if (ShenandoahTraceHumongous) {
-    gclog_or_tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB", (words * HeapWordSize) / K);
-  }
-
-  uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
-
-  assert(required_regions <= _max_regions, "sanity check");
-
-  HeapWord* result;
-  ShenandoahHeapRegion* free_regions[required_regions];
-
-  bool success = find_contiguous_free_regions(required_regions, free_regions);
-  if (! success) {
-    success = allocate_contiguous_free_regions(required_regions, free_regions);
-  }
-  if (! success) {
-    result = NULL; // Throw OOM, we cannot allocate the huge object.
-  } else {
-    // Initialize huge object flags in the regions.
-    size_t live = words * HeapWordSize;
-    free_regions[0]->set_humongous_start(true);
-    free_regions[0]->increase_live_data(live);
-
-    for (uint i = 0; i < required_regions; i++) {
-      if (i == 0) {
-        free_regions[0]->set_humongous_start(true);
-      } else {
-        free_regions[i]->set_humongous_continuation(true);
-      }
-      free_regions[i]->set_top(free_regions[i]->end());
-      increase_used(ShenandoahHeapRegion::RegionSizeBytes);
-    }
-    _free_regions->decrease_available(ShenandoahHeapRegion::RegionSizeBytes * required_regions);
-    result = free_regions[0]->bottom();
-  }
-  return result;
-}
-
-bool ShenandoahHeap::find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
-  if (ShenandoahTraceHumongous) {
-    gclog_or_tty->print_cr("trying to find "UINT32_FORMAT" contiguous free regions", num_free_regions);
-  }
-  uint free_regions_index = 0;
-  for (uint regions_index = 0; regions_index < _num_regions; regions_index++) {
-    // Claim a free region.
-    ShenandoahHeapRegion* region = _ordered_regions[regions_index];
-    bool free = false;
-    if (region != NULL) {
-      if (region->free() == ShenandoahHeapRegion::RegionSizeBytes) {
-        assert(! region->is_humongous(), "don't reuse occupied humongous regions");
-        free = true;
-      }
-    }
-    if (! free) {
-      // Not contiguous, reset search
-      free_regions_index = 0;
-      continue;
-    }
-    assert(free_regions_index < num_free_regions, "array bounds");
-    free_regions[free_regions_index] = region;
-    free_regions_index++;
-
-    if (free_regions_index == num_free_regions) {
-      if (ShenandoahTraceHumongous) {
-        gclog_or_tty->print_cr("found "UINT32_FORMAT" contiguous free regions:", num_free_regions);
-        for (uint i = 0; i < num_free_regions; i++) {
-          gclog_or_tty->print(UINT32_FORMAT": " , i);
-          free_regions[i]->print_on(gclog_or_tty);
-        }
-      }
-      return true;
-    }
-
-  }
-  if (ShenandoahTraceHumongous) {
-    gclog_or_tty->print_cr("failed to find "UINT32_FORMAT" free regions", num_free_regions);
-  }
-  return false;
-}
-
-bool ShenandoahHeap::allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
-  // We need to be smart here to avoid interleaved allocation of regions when concurrently
-  // allocating for large objects. We get the new index into regions array using CAS, where can
-  // subsequently safely allocate new regions.
-  int new_regions_index = ensure_new_regions(num_free_regions);
-  if (new_regions_index == -1) {
-    return false;
-  }
-
-  int last_new_region = new_regions_index + num_free_regions;
-
-  // Now we can allocate new regions at the found index without being scared that
-  // other threads allocate in the same contiguous region.
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("allocate contiguous regions:");
-  }
-  for (int i = new_regions_index; i < last_new_region; i++) {
-    ShenandoahHeapRegion* region = new ShenandoahHeapRegion();
-    HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * i;
-    region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, i);
-    _ordered_regions[i] = region;
-    uint index = i - new_regions_index;
-    assert(index < num_free_regions, "array bounds");
-    free_regions[index] = region;
-
-    if (ShenandoahGCVerbose) {
-      region->print();
-    }
-  }
-  return true;
-}
-
-HeapWord* ShenandoahHeap::mem_allocate_locked(size_t size,
-					      bool* gc_overhead_limit_was_exceeded) {
-
-  // This was used for allocation while holding the Heap_lock.
-  // HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
-
-  HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
-  HeapWord* result = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  if (filler != NULL) {
-    initialize_brooks_ptr(filler, result);
-    _bytesAllocSinceCM += size * HeapWordSize;
-#ifdef ASSERT
-    if (ShenandoahTraceAllocations) {
-      if (*gc_overhead_limit_was_exceeded)
-	tty->print("gc_overhead_limit_was_exceeded");
-      tty->print_cr("mem_allocate_locked object of size "SIZE_FORMAT" uat addr "PTR_FORMAT, size, p2i(result));
-    }
-#endif
-
-    assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region");
-    if (_concurrent_mark_in_progress || _evacuation_in_progress) {
-      mark_current_no_checks(oop(result));
-    }
-
-    return result;
-  } else {
-    tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytesAllocSinceCM);
-    {
-      MutexLockerEx ml(ShenandoahHeap_lock, true);
-      print_heap_regions();
-      tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->length());
-      _free_regions->print();
-    }
-    assert(false, "Out of memory");
-    return NULL;
-  }
-}
-
-class PrintOopContents: public OopClosure {
-public:
-  void do_oop(oop* o) {
-    oop obj = *o;
-    tty->print_cr("References oop "PTR_FORMAT, p2i((HeapWord*) obj));
-    obj->print();
-  }
-
-  void do_oop(narrowOop* o) {
-    assert(false, "narrowOops aren't implemented");
-  }
-};
-
-HeapWord*  ShenandoahHeap::mem_allocate(size_t size, 
-					bool*  gc_overhead_limit_was_exceeded) {
-
-#ifdef ASSERT
-  if (ShenandoahVerify && _numAllocs > 1000000) {
-    _numAllocs = 0;
-  //   VM_ShenandoahVerifyHeap op(0, 0, GCCause::_allocation_failure);
-  //   if (Thread::current()->is_VM_thread()) {
-  //     op.doit();
-  //   } else {
-  //     // ...and get the VM thread to execute it.
-  //     VMThread::execute(&op);
-  //   }
-  }
-  _numAllocs++;
-#endif
-
-  // MutexLockerEx ml(ShenandoahHeap_lock, true);
-  HeapWord* result = mem_allocate_locked(size, gc_overhead_limit_was_exceeded);
-  return result;
-}
-
-class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
-private:
-  ShenandoahHeap* _heap;
-  Thread* _thread;
-  public:
-  ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
-    _heap(heap), _thread(Thread::current()) { 
-  }
-
-  void do_object(oop p) {
-
-#ifdef ASSERT
-    if (ShenandoahTraceEvacuations) {
-      tty->print_cr("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT, p2i((HeapWord*) p));
-    }
-#endif
-
-    if (_heap->is_marked_current(p) && p == ShenandoahBarrierSet::resolve_oop_static_not_null(p)) {
-      _heap->evacuate_object(p, _thread);
-    }
-  }
-};
-      
-//fixme
-void ShenandoahHeap::initialize_brooks_ptr(HeapWord* filler, HeapWord* obj, bool new_obj) {
-  BrooksPointer brooks_ptr = BrooksPointer::get(oop(obj));
-  brooks_ptr.set_forwardee(oop(obj));
-}
-
-void ShenandoahHeap::initialize_brooks_ptr(oop p) {
-  BrooksPointer brooks_ptr = BrooksPointer::get(p);
-  brooks_ptr.set_forwardee(p);
-}
-
-class VerifyEvacuatedObjectClosure : public ObjectClosure {
-
-public:
-  
-  void do_object(oop p) {
-    if (ShenandoahHeap::heap()->is_marked_current(p)) {
-      oop p_prime = oopDesc::bs()->resolve_oop(p);
-      assert(p != p_prime, "Should point to evacuated copy");
-#ifdef ASSERT
-      if (p->klass() != p_prime->klass()) {
-	tty->print_cr("copy has different class than original:");
-	p->klass()->print_on(tty);
-	p_prime->klass()->print_on(tty);
-      }
-#endif
-      assert(p->klass() == p_prime->klass(), err_msg("Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)));
-      //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
-      assert(p->size() == p_prime->size(), "Should be the same size");
-      assert(p_prime == oopDesc::bs()->resolve_oop(p_prime), "One forward once");
-    }
-  }
-};
-
-void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
-  if (ShenandoahGCVerbose) {
-    tty->print("Verifying From Region\n");
-    from_region->print();
-  }
-
-  VerifyEvacuatedObjectClosure verify_evacuation;
-  from_region->object_iterate_interruptible(&verify_evacuation, false);
-}
-
-void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
-
-  assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier");
-
-  ParallelEvacuateRegionObjectClosure evacuate_region(this);
-  
-#ifdef ASSERT
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("parallel_evacuate_region starting from_region "INT32_FORMAT": free_regions = "SIZE_FORMAT,  from_region->region_number(), _free_regions->available_regions());
-  }
-#endif
-
-  marked_object_iterate(from_region, &evacuate_region);
-
-#ifdef ASSERT
-  if (ShenandoahVerify && ! cancelled_concgc()) {
-    verify_evacuated_region(from_region);
-  }
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("parallel_evacuate_region after from_region = "INT32_FORMAT": free_regions = "SIZE_FORMAT, from_region->region_number(), _free_regions->available_regions());
-  }
-#endif
-}
-
-class ParallelEvacuationTask : public AbstractGangTask {
-private:
-  ShenandoahHeap* _sh;
-  ShenandoahHeapRegionSet* _cs;
-  
-public:  
-  ParallelEvacuationTask(ShenandoahHeap* sh, 
-			 ShenandoahHeapRegionSet* cs) :
-    AbstractGangTask("Parallel Evacuation Task"), 
-    _cs(cs),
-    _sh(sh) {}
-  
-  void work(uint worker_id) {
-
-    ShenandoahHeapRegion* from_hr = _cs->claim_next();
-
-    while (from_hr != NULL) {
-      if (ShenandoahGCVerbose) {
-     	tty->print_cr("Thread "INT32_FORMAT" claimed Heap Region "INT32_FORMAT,
-     		   worker_id,
-     		   from_hr->region_number());
-	from_hr->print();
-      }
-
-      assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early");
-      _sh->parallel_evacuate_region(from_hr);
-
-      if (_sh->cancelled_concgc()) {
-	if (ShenandoahTracePhases) {
-	  tty->print_cr("Cancelled concurrent evacuation");
-	}
-        break;
-      }
-      from_hr = _cs->claim_next();
-    }
-
-    Thread::current()->gclab().make_parsable(true);
-  }
-};
-
-class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
-private:
-  ShenandoahHeap* _heap;
-  size_t _bytes_reclaimed;
-public:
-  RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-
-    // If evacuation has been cancelled, we can't recycle regions, we only
-    // clear their collection-set status.
-    if (_heap->cancelled_concgc()) {
-      r->set_is_in_collection_set(false);
-      return false;
-    }
-
-    if (r->is_in_collection_set()) {
-      //      tty->print_cr("recycling region "INT32_FORMAT":", r->region_number());
-      //      r->print_on(tty);
-      //      tty->print_cr(" ");
-      _heap->decrease_used(r->used());
-      _bytes_reclaimed += r->used();
-      r->recycle();
-      _heap->free_regions()->append(r);
-    }
-
-    return false;
-  }
-  size_t bytes_reclaimed() { return _bytes_reclaimed;}
-  void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
-};
-
-void ShenandoahHeap::recycle_dirty_regions() {
-  RecycleDirtyRegionsClosure cl;
-  cl.clear_bytes_reclaimed();
-
-  heap_region_iterate(&cl);
-
-  _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
-  clear_cset_fast_test();
-}
-
-ShenandoahHeapRegionSet* ShenandoahHeap::free_regions() {
-  return _free_regions;
-}
-
-void ShenandoahHeap::print_heap_regions(outputStream* st) const {
-  PrintHeapRegionsClosure pc1(st);
-  heap_region_iterate(&pc1);
-}
-
-class PrintAllRefsOopClosure: public ExtendedOopClosure {
-private:
-  int _index;
-  const char* _prefix;
-
-public:
-  PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
-
-  void do_oop(oop* p)       {
-    oop o = *p;
-    if (o != NULL) {
-      if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
-	tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass()));
-      } else {
-	//        tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set()));
-	tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o));
-      }
-    } else {
-      tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
-    }
-    _index++;
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-
-};
-
-class PrintAllRefsObjectClosure : public ObjectClosure {
-  const char* _prefix;
-
-public:
-  PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
-
-  void do_object(oop p) {
-    if (ShenandoahHeap::heap()->is_in(p)) {
-	tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass()));
-	PrintAllRefsOopClosure cl(_prefix);
-	p->oop_iterate(&cl);
-      }
-  }
-};
-
-void ShenandoahHeap::print_all_refs(const char* prefix) {
-  tty->print_cr("printing all references in the heap");
-  tty->print_cr("root references:");
-
-  ensure_parsability(false);
-
-  PrintAllRefsOopClosure cl(prefix);
-  roots_iterate(&cl);
-
-  tty->print_cr("heap references:");
-  PrintAllRefsObjectClosure cl2(prefix);
-  object_iterate(&cl2);
-}
-
-class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
-private:
-  ShenandoahHeap*  _heap;
-
-public:
-  VerifyAfterMarkingOopClosure() :
-    _heap(ShenandoahHeap::heap()) { }
-
-  void do_oop(oop* p)       {
-    oop o = *p;
-    if (o != NULL) {
-      if (! _heap->is_marked_current(o)) {
-	_heap->print_heap_regions();
-	_heap->print_all_refs("post-mark");
-	tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s", 
-		      p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_current(o)));
-	_heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
-
-        tty->print_cr("oop class: %s", o->klass()->internal_name());
-	if (_heap->is_in(p)) {
-	  oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
-	  tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
-	  referrer->print();
-	  _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
-	}
-        tty->print_cr("heap region containing object:");
-	_heap->heap_region_containing(o)->print();
-        tty->print_cr("heap region containing referrer:");
-	_heap->heap_region_containing(p)->print();
-        tty->print_cr("heap region containing forwardee:");
-	_heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->print();
-      }
-      assert(o->is_oop(), "oop must be an oop");
-      assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
-      if (! (o == oopDesc::bs()->resolve_oop(o))) {
-        tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o),  BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->resolve_oop(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->is_in_collection_set()));
-        tty->print_cr("oop class: %s", o->klass()->internal_name());
-      }
-      assert(o == oopDesc::bs()->resolve_oop(o), "oops must not be forwarded");
-      assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions");
-      assert(_heap->is_marked_current(o), "live oops must be marked current");
-    }
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-
-};
-
-class IterateMarkedCurrentObjectsClosure: public ObjectClosure {
-private:
-  ShenandoahHeap* _heap;
-  ExtendedOopClosure* _cl;
-public:
-  IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) :
-    _heap(ShenandoahHeap::heap()), _cl(cl) {};
-
-  void do_object(oop p) {
-    if (_heap->is_marked_current(p)) {
-      p->oop_iterate(_cl);
-    }
-  }
-
-};
-
-class IterateMarkedObjectsClosure: public ObjectClosure {
-private:
-  ShenandoahHeap* _heap;
-  ExtendedOopClosure* _cl;
-public:
-  IterateMarkedObjectsClosure(ExtendedOopClosure* cl) :
-    _heap(ShenandoahHeap::heap()), _cl(cl) {};
-
-  void do_object(oop p) {
-    if (_heap->is_marked_current(p)) {
-      p->oop_iterate(_cl);
-    }
-  }
-
-};
-
-void ShenandoahHeap::verify_heap_after_marking() {
-
-  verify_heap_size_consistency();
-
-  if (ShenandoahGCVerbose) {
-    tty->print("verifying heap after marking\n");
-  }
-  ensure_parsability(false);
-  VerifyAfterMarkingOopClosure cl;
-  roots_iterate(&cl);
-
-  IterateMarkedCurrentObjectsClosure marked_oops(&cl);
-  object_iterate(&marked_oops);
-}
-
-void ShenandoahHeap::prepare_for_concurrent_evacuation() {
-  if (!cancelled_concgc()) {
-
-    recycle_dirty_regions();
-
-      ensure_parsability(true);
-
-      // NOTE: This needs to be done during a stop the world pause, because
-      // putting regions into the collection set concurrently with Java threads
-      // will create a race. In particular, acmp could fail because when we
-      // resolve the first operand, the containing region might not yet be in
-      // the collection set, and thus return the original oop. When the 2nd
-      // operand gets resolved, the region could be in the collection set
-      // and the oop gets evacuated. If both operands have originally been
-      // the same, we get false negatives.
-      ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
-      regions.reclaim_humongous_regions();
-      _collection_set->clear();
-      _free_regions->clear();
-      _shenandoah_policy->choose_collection_and_free_sets(&regions, _collection_set, _free_regions);
-
-      if (PrintGCTimeStamps) {
-	gclog_or_tty->print("Collection set used = " SIZE_FORMAT " K live = " SIZE_FORMAT " K reclaimable = " SIZE_FORMAT " K\n",
-			    _collection_set->used() / K, _collection_set->live_data() / K, _collection_set->garbage() / K);
-      }
-
-      if (_collection_set->length() == 0)
-	cancel_concgc();
-  
-      _bytesAllocSinceCM = 0;
-
-      Universe::update_heap_info_at_gc();
-    }
-}
-    
-
-class ShenandoahUpdateRootsClosure: public ExtendedOopClosure {
-
-  void do_oop(oop* p)       {
-    ShenandoahHeap::heap()->maybe_update_oop_ref(p);
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-};
-
-void ShenandoahHeap::update_roots() {
-
-  COMPILER2_PRESENT(DerivedPointerTable::clear());
-
-  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
-
-  ShenandoahUpdateRootsClosure cl;
-  CodeBlobToOopClosure blobsCl(&cl, false);
-  CLDToOopClosure cldCl(&cl);
-
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  {
-    ShenandoahRootProcessor rp(this, 1);
-    rp.process_all_roots(&cl, &cldCl, &blobsCl);
-    ShenandoahIsAliveClosure is_alive;
-    JNIHandles::weak_oops_do(&is_alive, &cl);
-  }
-
-  COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
-}
-
-class ShenandoahUpdateObjectsClosure : public ObjectClosure {
-  ShenandoahHeap* _heap;
-
-public:
-  ShenandoahUpdateObjectsClosure() :
-    _heap(ShenandoahHeap::heap()) {
-  }
-
-  void do_object(oop p) {
-    ShenandoahUpdateRootsClosure refs_cl;
-    assert(ShenandoahHeap::heap()->is_in(p), "only update objects in heap (where else?)");
-
-    if (_heap->is_marked_current(p)) {
-      p->oop_iterate(&refs_cl);
-    }
-  }
-
-};
-
-class ParallelUpdateRefsTask : public AbstractGangTask {
-private:
-  ShenandoahHeapRegionSet* _regions;
-
-public:
-  ParallelUpdateRefsTask(ShenandoahHeapRegionSet* regions) :
-    AbstractGangTask("Parallel Update References Task"), 
-  _regions(regions) {
-  }
-
-  void work(uint worker_id) {
-    ShenandoahUpdateObjectsClosure update_refs_cl;
-    ShenandoahHeapRegion* region = _regions->claim_next();
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    while (region != NULL && ! heap->cancelled_concgc()) {
-      if ((! region->is_in_collection_set()) && ! region->is_humongous_continuation()) {
-	heap->marked_object_iterate_careful(region, &update_refs_cl);
-      }
-      heap->reset_mark_bitmap_range(region->bottom(), region->end());
-      region = _regions->claim_next();
-    }
-    if (ShenandoahTracePhases && heap->cancelled_concgc()) {
-      tty->print_cr("Cancelled concurrent update references");
-    }
-  }
-};
-
-class RetireTLABClosure : public ThreadClosure {
-private:
-  bool _retire;
-
-public:
-  RetireTLABClosure(bool retire) : _retire(retire) {
-  }
-
-  void do_thread(Thread* thread) {
-    thread->gclab().make_parsable(_retire);
-  }
-};
-
-void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
-  CollectedHeap::ensure_parsability(retire_tlabs);
-
-  RetireTLABClosure cl(retire_tlabs);
-  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
-    cl.do_thread(thread);
-  }
-  gc_threads_do(&cl);
-}
-
-void ShenandoahHeap::prepare_for_update_references() {
-  ensure_parsability(true);
-
-  ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
-  regions.set_concurrent_iteration_safe_limits();
-
-  if (ShenandoahVerifyReadsToFromSpace) {
-    set_from_region_protection(false);
-
-    // We need to update the roots so that they are ok for C2 when returning from the safepoint.
-    update_roots();
-
-    set_from_region_protection(true);
-
-  } else {
-    // We need to update the roots so that they are ok for C2 when returning from the safepoint.
-    update_roots();
-  }
-
-  set_update_references_in_progress(true);
-}
-
-void ShenandoahHeap::update_references() {
-
-  ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
-  ParallelUpdateRefsTask task = ParallelUpdateRefsTask(&regions);
-  conc_workers()->set_active_workers(_max_conc_workers);
-  _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_uprefs);
-  conc_workers()->run_task(&task);
-  _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_uprefs);
-  conc_workers()->set_active_workers(_max_conc_workers);
-
-  if (! cancelled_concgc()) {
-    VM_ShenandoahUpdateRootRefs update_roots;
-    if (ShenandoahConcurrentUpdateRefs) {
-      VMThread::execute(&update_roots);
-    } else {
-      update_roots.doit();
-    }
-
-    _allocated_last_gc = used() - _used_start_gc;
-    size_t max_allocated_gc = MAX2(_max_allocated_gc, _allocated_last_gc);
-    /*
-      tty->print_cr("prev max_allocated_gc: "SIZE_FORMAT", new max_allocated_gc: "SIZE_FORMAT", allocated_last_gc: "SIZE_FORMAT" diff %f", _max_allocated_gc, max_allocated_gc, _allocated_last_gc, ((double) max_allocated_gc/ (double) _allocated_last_gc));
-    */
-    _max_allocated_gc = max_allocated_gc;
-
-    // Update-references completed, no need to update-refs during marking.
-    set_need_update_refs(false);
-    set_need_reset_bitmaps(false);
-  }
-
-  Universe::update_heap_info_at_gc();
-
-  set_update_references_in_progress(false);
-}
-
-
-class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
-private:
-  ShenandoahHeap* _heap;
-  Thread* _thread;
-public:
-  ShenandoahEvacuateUpdateRootsClosure() :
-    _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
-  }
-
-  void do_oop(oop* p) {
-    assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
-
-    oop obj = oopDesc::load_heap_oop(p);
-    if (obj != NULL && _heap->in_cset_fast_test((HeapWord*) obj)) {
-      assert(_heap->is_marked_current(obj), err_msg("only evacuate marked objects %d %d", _heap->is_marked_current(obj), _heap->is_marked_current(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))));
-      oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
-      if (resolved == obj) {
-	resolved = _heap->evacuate_object(obj, _thread);
-      }
-      oopDesc::store_heap_oop(p, resolved);
-    }
-#ifdef ASSERT
-    else if (! oopDesc::is_null(obj)) {
-      // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj)));
-    }
-#endif
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-};
-
-class ShenandoahEvacuateUpdateStrongRootsTask : public AbstractGangTask {
-  ShenandoahRootProcessor* _rp;
-public:
-
-  ShenandoahEvacuateUpdateStrongRootsTask(ShenandoahRootProcessor* rp) :
-    AbstractGangTask("Shenandoah evacuate and update strong roots"),
-    _rp(rp)
-  {
-    // Nothing else to do.
-  }
-
-  void work(uint worker_id) {
-    ShenandoahEvacuateUpdateRootsClosure cl;
-    CodeBlobToOopClosure blobsCl(&cl, false);
-    CLDToOopClosure cldCl(&cl);
-
-    _rp->process_all_roots(&cl, &cldCl, &blobsCl);
-  }
-};
-
-class ShenandoahEvacuateUpdateWeakRootsTask : public AbstractGangTask {
-public:
-
-  ShenandoahEvacuateUpdateWeakRootsTask() : AbstractGangTask("Shenandoah evacuate and update weak roots") {
-    // Nothing else to do.
-  }
-
-  void work(uint worker_id) {
-    ShenandoahEvacuateUpdateRootsClosure cl;
-    ShenandoahIsAliveClosure is_alive;
-    JNIHandles::weak_oops_do(&is_alive, &cl);
-
-    ShenandoahHeap* heap = ShenandoahHeap::heap();
-    if (ShenandoahProcessReferences) {
-      heap->ref_processor()->weak_oops_do(&cl);
-    }
-  }
-};
-
-void ShenandoahHeap::evacuate_and_update_roots() {
-
-  COMPILER2_PRESENT(DerivedPointerTable::clear());
-  
-  if (ShenandoahVerifyReadsToFromSpace) {
-    set_from_region_protection(false);
-  }
-
-  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  {
-    ShenandoahRootProcessor rp(this, _max_parallel_workers);
-    ShenandoahEvacuateUpdateStrongRootsTask strong_roots_task(&rp);
-    workers()->set_active_workers(_max_parallel_workers);
-    workers()->run_task(&strong_roots_task);
-  }
-
-  // We process weak roots using only 1 worker thread, multi-threaded weak roots
-  // processing is not implemented yet. We can't use the VMThread itself, because
-  // we need to grab the Heap_lock.
-  {
-    ShenandoahEvacuateUpdateWeakRootsTask weak_roots_task;
-    workers()->set_active_workers(1);
-    workers()->run_task(&weak_roots_task);
-    workers()->set_active_workers(_max_parallel_workers);
-  }
-
-  if (ShenandoahVerifyReadsToFromSpace) {
-    set_from_region_protection(true);
-  }
-
-  COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
-
-}
-
-
-void ShenandoahHeap::do_evacuation() {
-  assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
-
-  parallel_evacuate();
-
-  if (! ShenandoahConcurrentEvacuation) {
-    // We need to make sure that after leaving the safepoint, all
-    // GC roots are up-to-date. This is an assumption built into
-    // the hotspot compilers, especially C2, that allows it to
-    // do optimizations like lifting barriers outside of a loop.
-
-    if (ShenandoahVerifyReadsToFromSpace) {
-      set_from_region_protection(false);
-
-      update_roots();
-
-      set_from_region_protection(true);
-
-    } else {
-      update_roots();
-    }
-  }
-
-  if (ShenandoahVerify && ! cancelled_concgc()) {
-    VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
-    if (Thread::current()->is_VM_thread()) {
-      verify_after_evacuation.doit();
-    } else {
-      VMThread::execute(&verify_after_evacuation);
-    }
-  }
-
-}
-
-void ShenandoahHeap::parallel_evacuate() {
-
-  if (! cancelled_concgc()) {
-    assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
-
-    if (ShenandoahGCVerbose) {
-      tty->print_cr("starting parallel_evacuate");
-      //    PrintHeapRegionsClosure pc1;
-      //    heap_region_iterate(&pc1);
-    }
-
-    _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
-
-    if (ShenandoahGCVerbose) {
-      tty->print("Printing all available regions");
-      print_heap_regions();
-    }
-
-    if (ShenandoahPrintCollectionSet) {
-      tty->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->length());
-      _collection_set->print();
-      
-      tty->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->length());
-      _free_regions->print();
-
-      //    if (_collection_set->length() == 0)
-      //      print_heap_regions();      
-    }
-
-    ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
-
-    conc_workers()->set_active_workers(_max_conc_workers);
-    conc_workers()->run_task(&evacuationTask);
-    //workers()->set_active_workers(_max_parallel_workers);
-
-    if (ShenandoahGCVerbose) {
-
-      tty->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->available_regions());
-      _collection_set->print();
-
-      tty->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", _free_regions->available_regions());
-      _free_regions->print();
-
-      tty->print_cr("finished parallel_evacuate");
-      print_heap_regions();
-
-      tty->print_cr("all regions after evacuation:");
-      print_heap_regions();
-    }
-
-    _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
-  }
-}
-
-class VerifyEvacuationClosure: public ExtendedOopClosure {
-private:
-  ShenandoahHeap*  _heap;
-  ShenandoahHeapRegion* _from_region;
-
-public:
-  VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
-    _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
-
-  void do_oop(oop* p)       {
-    oop heap_oop = oopDesc::load_heap_oop(p);
-    if (! oopDesc::is_null(heap_oop)) {
-      guarantee(! _from_region->is_in(heap_oop), err_msg("no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)));
-    }
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-
-};
-
-void ShenandoahHeap::roots_iterate(ExtendedOopClosure* cl) {
-
-  assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
-
-  CodeBlobToOopClosure blobsCl(cl, false);
-  CLDToOopClosure cldCl(cl);
-
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  ShenandoahRootProcessor rp(this, 1);
-  rp.process_all_roots(cl, &cldCl, &blobsCl);
-}
-
-void ShenandoahHeap::weak_roots_iterate(ExtendedOopClosure* cl) {
-  if (ShenandoahProcessReferences) {
-    ref_processor()->weak_oops_do(cl);
-  }
-  ShenandoahAlwaysTrueClosure always_true;
-  JNIHandles::weak_oops_do(&always_true, cl);
-}
-
-void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
-
-  VerifyEvacuationClosure rootsCl(from_region);
-  roots_iterate(&rootsCl);
-
-}
-
-bool ShenandoahHeap::supports_tlab_allocation() const {
-  return true;
-}
-
-
-size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
-  ShenandoahHeapRegion* current = get_current_region_skip_humongous();
-  if (current == NULL) 
-    return 0;
-  else if (current->free() > MinTLABSize) {
-    return current->free();
-  } else {
-    return MinTLABSize;
-  }
-}
-
-size_t ShenandoahHeap::max_tlab_size() const {
-  return ShenandoahHeapRegion::RegionSizeBytes;
-}
-
-class ResizeGCLABClosure : public ThreadClosure {
-public:
-  void do_thread(Thread* thread) {
-    thread->gclab().resize();
-  }
-};
-
-void ShenandoahHeap::resize_all_tlabs() {
-  CollectedHeap::resize_all_tlabs();
-
-  if (PrintTLAB && Verbose) {
-    tty->print_cr("Resizing Shenandoah GCLABs...");
-  }
-
-  ResizeGCLABClosure cl;
-  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
-    cl.do_thread(thread);
-  }
-  gc_threads_do(&cl);
-
-  if (PrintTLAB && Verbose) {
-    tty->print_cr("Done resizing Shenandoah GCLABs...");
-  }
-}
-
-class AccumulateStatisticsGCLABClosure : public ThreadClosure {
-public:
-  void do_thread(Thread* thread) {
-    thread->gclab().accumulate_statistics();
-    thread->gclab().initialize_statistics();
-  }
-};
-
-void ShenandoahHeap::accumulate_statistics_all_gclabs() {
-
-  AccumulateStatisticsGCLABClosure cl;
-  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
-    cl.do_thread(thread);
-  }
-  gc_threads_do(&cl);
-}
-
-bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
-  return true;
-}
-
-oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
-  // Overridden to do nothing.
-  return new_obj;
-}
-
-bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
-  return true;
-}
-
-bool ShenandoahHeap::card_mark_must_follow_store() const {
-  return false;
-}
-
-bool ShenandoahHeap::supports_heap_inspection() const {
-  return false;
-}
-
-size_t ShenandoahHeap::unsafe_max_alloc() {
-  return ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
-}
-
-void ShenandoahHeap::collect(GCCause::Cause cause) {
-  if (GCCause::is_user_requested_gc(cause)) {
-    if (! DisableExplicitGC) {
-      if (ShenandoahTraceFullGC) {
-        gclog_or_tty->print_cr("Shenandoah-full-gc: requested full GC");
-      }
-      cancel_concgc();
-      _concurrent_gc_thread->do_full_gc(cause);
-    }
-  } else if (cause == GCCause::_allocation_failure) {
-
-    if (ShenandoahTraceFullGC) {
-      gclog_or_tty->print_cr("Shenandoah-full-gc: full GC for allocation failure heap free: "SIZE_FORMAT", available: "SIZE_FORMAT, capacity() - used(), free_regions()->available());
-    }
-    cancel_concgc();
-    collector_policy()->set_should_clear_all_soft_refs(true);
-      _concurrent_gc_thread->do_full_gc(cause);
-
-  } else if (cause == GCCause::_gc_locker) {
-
-    if (ShenandoahTraceJNICritical) {
-      gclog_or_tty->print_cr("Resuming deferred evacuation after JNI critical regions");
-    }
-
-    jni_critical()->notify_jni_critical();
-  }
-}
-
-void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
-  //assert(false, "Shouldn't need to do full collections");
-}
-
-AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
-  Unimplemented();
-  return NULL;
-  
-}
-
-ShenandoahCollectorPolicy* ShenandoahHeap::collector_policy() const {
-  return _shenandoah_policy;
-}
-
-
-HeapWord* ShenandoahHeap::block_start(const void* addr) const {
-  Space* sp = space_containing(addr);
-  if (sp != NULL) {
-    return sp->block_start(addr);
-  }
-  return NULL;
-}
-
-size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
-  Space* sp = space_containing(addr);
-  assert(sp != NULL, "block_size of address outside of heap");
-  return sp->block_size(addr);
-}
-
-bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
-  Space* sp = space_containing(addr);
-  return sp->block_is_obj(addr);
-}
-
-jlong ShenandoahHeap::millis_since_last_gc() {
-  return 0;
-}
-
-void ShenandoahHeap::prepare_for_verify() {
-  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
-    ensure_parsability(false);
-  }
-}
-
-void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
-  workers()->print_worker_threads_on(st);
-  conc_workers()->print_worker_threads_on(st);
-}
-
-void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
-  workers()->threads_do(tcl);
-  conc_workers()->threads_do(tcl);
-}
-
-void ShenandoahHeap::print_tracing_info() const {
-  if (PrintGCDetails) {
-    _shenandoah_policy->print_tracing_info();
-  }
-}
-
-class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
-private:
-  ShenandoahHeap*  _heap;
-  VerifyOption     _vo;
-  bool             _failures;
-public:
-  // _vo == UsePrevMarking -> use "prev" marking information,
-  // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
-  ShenandoahVerifyRootsClosure(VerifyOption vo) :
-    _heap(ShenandoahHeap::heap()),
-    _vo(vo),
-    _failures(false) { }
-
-  bool failures() { return _failures; }
-
-  void do_oop(oop* p)       {
-    if (*p != NULL) {
-      oop heap_oop = oopDesc::load_heap_oop(p);
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (!obj->is_oop()) {
-        { // Just for debugging.
-	  gclog_or_tty->print_cr("Root location "PTR_FORMAT
-				 "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
-	  //	  obj->print_on(gclog_or_tty);
-        }
-      }
-      guarantee(obj->is_oop(), "is_oop");
-    }
-  }
-
-  void do_oop(narrowOop* p) {
-    Unimplemented();
-  }
-
-};
-
-class ShenandoahVerifyHeapClosure: public ObjectClosure {
-private:
-  ShenandoahVerifyRootsClosure _rootsCl;
-public:
-  ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
-    _rootsCl(rc) {};
-
-  void do_object(oop p) {
-    _rootsCl.do_oop(&p);
-  }
-};
-
-class ShenandoahVerifyKlassClosure: public KlassClosure {
-  OopClosure *_oop_closure;
- public:
-  ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
-  void do_klass(Klass* k) {
-    k->oops_do(_oop_closure);
-  }
-};
-
-void ShenandoahHeap::verify(bool silent , VerifyOption vo) {
-  if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
-
-    ShenandoahVerifyRootsClosure rootsCl(vo);
-
-    assert(Thread::current()->is_VM_thread(),
-	   "Expected to be executed serially by the VM thread at this point");
-
-    roots_iterate(&rootsCl);
-
-    bool failures = rootsCl.failures();
-    if (ShenandoahGCVerbose)
-      gclog_or_tty->print("verify failures: %s", BOOL_TO_STR(failures));
-
-    ShenandoahVerifyHeapClosure heapCl(rootsCl);
-
-    object_iterate(&heapCl);
-    // TODO: Implement rest of it.
-#ifdef ASSERT_DISABLED
-    verify_live();
-#endif
-  } else {
-    if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
-  }
-}
-
-size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
-  return _free_regions->available();
-}
-
-class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
-  ObjectClosure* _cl;
-public:
-  ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    r->object_iterate_interruptible(_cl, false);
-    return false;
-  }
-};
-
-class ShenandoahIterateUpdateClosure: public ShenandoahHeapRegionClosure {
-  ObjectClosure* _cl;
-public:
-  ShenandoahIterateUpdateClosure(ObjectClosure *cl) : _cl(cl) {}
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if ((! r->is_in_collection_set()) && !r->is_humongous_continuation()) {
-      r->object_iterate_interruptible(_cl, false);
-    }
-    return false;
-  }
-};
-
-void ShenandoahHeap::cleanup_after_cancelconcgc() {
-  if (need_update_refs()) {
-  ShenandoahUpdateObjectsClosure update_refs_cl;  
-  ShenandoahIterateUpdateClosure blk(&update_refs_cl);
-  heap_region_iterate(&blk, false, false);
-  }
-}
-
-class ShenandoahIterateObjectClosureCarefulRegionClosure: public ShenandoahHeapRegionClosure {
-  ObjectClosureCareful* _cl;
-public:
-  ShenandoahIterateObjectClosureCarefulRegionClosure(ObjectClosureCareful* cl) : _cl(cl) {}
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    r->object_iterate_careful(_cl);
-    return false;
-  }
-};
-
-void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
-  ShenandoahIterateObjectClosureRegionClosure blk(cl);
-  heap_region_iterate(&blk, false, true);
-}
-
-void ShenandoahHeap::object_iterate_careful(ObjectClosureCareful* cl) {
-  ShenandoahIterateObjectClosureCarefulRegionClosure blk(cl);
-  heap_region_iterate(&blk, false, true);
-}
-
-void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
-  Unimplemented();
-}
-
-void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) {
-  marked_object_iterate(region, cl, region->bottom(), region->top());
-}
-
-void ShenandoahHeap::marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl) {
-  marked_object_iterate(region, cl, region->bottom(), region->concurrent_iteration_safe_limit());
-}
-
-void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl,
-					   HeapWord* addr, HeapWord* limit) {
-  addr += BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  HeapWord* last_addr = NULL;
-  size_t last_size = 0;
-  while (addr < limit) {
-    addr = _next_mark_bit_map->getNextMarkedWordAddress(addr, limit);
-    if (addr < limit) {
-      oop obj = oop(addr);
-      assert(is_marked_current(obj), "object expected to be marked");
-      cl->do_object(obj);
-      last_addr = addr;
-      last_size = obj->size();
-      addr += obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-    } else {
-      break;
-    }
-  }
-}
-
-class ShenandoahIterateOopClosureRegionClosure : public ShenandoahHeapRegionClosure {
-  MemRegion _mr;
-  ExtendedOopClosure* _cl;
-  bool _skip_unreachable_objects;
-public:
-  ShenandoahIterateOopClosureRegionClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
-    _cl(cl), _skip_unreachable_objects(skip_unreachable_objects) {}
-  ShenandoahIterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl) 
-    :_mr(mr), _cl(cl) {}
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    r->oop_iterate_skip_unreachable(_cl, _skip_unreachable_objects);
-    return false;
-  }
-};
-
-void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions, bool skip_unreachable_objects) {
-  ShenandoahIterateOopClosureRegionClosure blk(cl, skip_unreachable_objects);
-  heap_region_iterate(&blk, skip_dirty_regions, true);
-}
-
-void ShenandoahHeap::oop_iterate(MemRegion mr, 
-				 ExtendedOopClosure* cl) {
-  ShenandoahIterateOopClosureRegionClosure blk(mr, cl);
-  heap_region_iterate(&blk, false, true);
-}
-
-void  ShenandoahHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
-  Unimplemented();
-}
-
-class SpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
-  SpaceClosure* _cl;
-public:
-  SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    _cl->do_space(r);
-    return false;
-  }
-};
-
-void  ShenandoahHeap::space_iterate(SpaceClosure* cl) {
-  SpaceClosureRegionClosure blk(cl);
-  heap_region_iterate(&blk);
-}
-
-ShenandoahHeapRegion*
-ShenandoahHeap::heap_region_containing(const void* addr) const {
-  uint index = heap_region_index_containing(addr);
-  ShenandoahHeapRegion* result = _ordered_regions[index];
-#ifdef ASSERT
-  if (!(addr >= result->bottom() && addr < result->end())) {
-    tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions[0]->bottom()), _num_regions);
-  }
-#endif
-  assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
-  return result;
-}
-
-Space*  ShenandoahHeap::space_containing(const void* oop) const {
-  Space* res = heap_region_containing(oop);
-  return res;
-}
-
-void  ShenandoahHeap::gc_prologue(bool b) {
-  Unimplemented();
-}
-
-void  ShenandoahHeap::gc_epilogue(bool b) {
-  Unimplemented();
-}
-
-// Apply blk->doHeapRegion() on all committed regions in address order,
-// terminating the iteration early if doHeapRegion() returns true.
-void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
-  for (size_t i = 0; i < _num_regions; i++) {
-    ShenandoahHeapRegion* current  = _ordered_regions[i];
-    if (skip_humongous_continuation && current->is_humongous_continuation()) {
-      continue;
-    }
-    if (skip_dirty_regions && current->is_in_collection_set()) {
-      continue;
-    }
-    if (blk->doHeapRegion(current)) { 
-      return;
-    }
-  }
-}
-
-/**
- * Maybe we need that at some point...
-oop* ShenandoahHeap::resolve_oop_ptr(oop* p) {
-  if (is_in(p) && heap_region_containing(p)->is_dirty()) {
-    // If the reference is in an object in from-space, we need to first
-    // find its to-space counterpart.
-    // TODO: This here is slow (linear search inside region). Make it faster.
-    oop from_space_oop = oop_containing_oop_ptr(p);
-    HeapWord* to_space_obj = (HeapWord*) oopDesc::bs()->resolve_oop(from_space_oop);
-    return (oop*) (to_space_obj + ((HeapWord*) p - ((HeapWord*) from_space_oop)));
-  } else {
-    return p;
-  }
-}
-
-oop ShenandoahHeap::oop_containing_oop_ptr(oop* p) {
-  HeapWord* from_space_ref = (HeapWord*) p;
-  ShenandoahHeapRegion* region = heap_region_containing(from_space_ref);
-  HeapWord* from_space_obj = NULL;
-  for (HeapWord* curr = region->bottom(); curr < from_space_ref; ) {
-    oop curr_obj = (oop) curr;
-    if (curr < from_space_ref && from_space_ref < (curr + curr_obj->size())) {
-      from_space_obj = curr;
-      break;
-    } else {
-      curr += curr_obj->size();
-    }
-  }
-  assert (from_space_obj != NULL, "must not happen");
-  oop from_space_oop = (oop) from_space_obj;
-  assert (from_space_oop->is_oop(), "must be oop");
-  assert(ShenandoahBarrierSet::is_brooks_ptr(oop(((HeapWord*) from_space_oop) - BrooksPointer::BROOKS_POINTER_OBJ_SIZE)), "oop must have a brooks ptr");
-  return from_space_oop;
-}
- */
-
-class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
-  ShenandoahHeap* sh;
-public:
-  ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
-  
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    r->clearLiveData();
-    return false;
-  }
-};
-
-
-void ShenandoahHeap::start_concurrent_marking() {
-
-  set_concurrent_mark_in_progress(true);
-  // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
-  if (UseTLAB) {
-    accumulate_statistics_all_tlabs();
-    ensure_parsability(true);
-  }
-
-  _shenandoah_policy->record_bytes_allocated(_bytesAllocSinceCM);
-  _used_start_gc = used();
-
-#ifdef ASSERT
-  if (ShenandoahDumpHeapBeforeConcurrentMark) {
-    ensure_parsability(false);
-    print_all_refs("pre-mark");
-  }
-#endif
-  
-  ClearLivenessClosure clc(this);
-  heap_region_iterate(&clc);
-
-  // print_all_refs("pre -mark");
-
-  // oopDesc::_debug = true;
-
-  concurrentMark()->prepare_unmarked_root_objs();
-
-  //  print_all_refs("pre-mark2");
-}
-
-
-class VerifyLivenessClosure : public ExtendedOopClosure {
-
-  ShenandoahHeap* _sh;
-
-public:
-  VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {}
-
-  template<class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
-                err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s",
-                        BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
-                        BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)))
-                );
-      obj = oopDesc::bs()->resolve_oop(obj);
-      guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
-      guarantee(obj->is_oop(), "is_oop");
-      ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-      if (! sh->is_marked_current(obj)) {
-        sh->print_on(tty);
-      }
-      assert(sh->is_marked_current(obj), err_msg("Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s", 
-                                               p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))));
-    }
-  }
-
-  void do_oop(oop* p)       { do_oop_nv(p); }
-  void do_oop(narrowOop* p) { do_oop_nv(p); }
-
-};
-
-void ShenandoahHeap::verify_live() {
-
-  VerifyLivenessClosure cl;
-  roots_iterate(&cl);
-
-  IterateMarkedObjectsClosure marked_oops(&cl);
-  object_iterate(&marked_oops);
-
-}
-
-class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
-
-  ShenandoahHeap* _sh;
-
-public:
-  VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
-
-  template<class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
-                err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
-                        BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
-                        BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)))
-                );
-      obj = oopDesc::bs()->resolve_oop(obj);
-      guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
-      guarantee(obj->is_oop(), "is_oop");
-      guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
-    }
-  }
-
-  void do_oop(oop* p)       { do_oop_nv(p); }
-  void do_oop(narrowOop* p) { do_oop_nv(p); }
-
-};
-
-class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure {
-
-  ShenandoahHeap* _sh;
-
-public:
-  VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {}
-
-  template<class T> void do_oop_nv(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()),
-                err_msg("no live reference must point to from-space, is_marked: %s",
-                        BOOL_TO_STR(_sh->is_marked_current(obj))));
-      if (obj != oopDesc::bs()->resolve_oop(obj) && _sh->is_in(p)) {
-        tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p));
-      }
-      guarantee(obj == oopDesc::bs()->resolve_oop(obj), "no live reference must point to forwarded object");
-      guarantee(obj->is_oop(), "is_oop");
-      guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
-    }
-  }
-
-  void do_oop(oop* p)       { do_oop_nv(p); }
-  void do_oop(narrowOop* p) { do_oop_nv(p); }
-
-};
-
-void ShenandoahHeap::verify_heap_after_evacuation() {
-
-  verify_heap_size_consistency();
-
-  ensure_parsability(false);
-
-  VerifyAfterEvacuationClosure cl;
-  roots_iterate(&cl);
-
-  IterateMarkedCurrentObjectsClosure marked_oops(&cl);
-  object_iterate(&marked_oops);
-
-}
-
-class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
-public:
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    assert(! r->is_in_collection_set(), "no region must be in collection set");
-    assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set");
-    return false;
-  }
-};
-
-void ShenandoahHeap::verify_regions_after_update_refs() {
-  VerifyRegionsAfterUpdateRefsClosure verify_regions;
-  heap_region_iterate(&verify_regions);
-}
-
-void ShenandoahHeap::verify_heap_after_update_refs() {
-
-  verify_heap_size_consistency();
-
-  ensure_parsability(false);
-
-  VerifyAfterUpdateRefsClosure cl;
-
-  roots_iterate(&cl);
-  weak_roots_iterate(&cl);
-  oop_iterate(&cl, true, true);
-
-}
-
-void ShenandoahHeap::accumulate_statistics_all_tlabs() {
-  CollectedHeap::accumulate_statistics_all_tlabs();
-}
-
-void ShenandoahHeap::stop_concurrent_marking() {
-  assert(concurrent_mark_in_progress(), "How else could we get here?");
-  if (! cancelled_concgc()) {
-    // If we needed to update refs, and concurrent marking has been cancelled,
-    // we need to finish updating references.
-    set_need_update_refs(false);
-  }
-  set_concurrent_mark_in_progress(false);
-
-
-  if (UseTLAB) {
-    shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::resize_tlabs);
-    accumulate_statistics_all_tlabs();
-    resize_all_tlabs();
-    shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::resize_tlabs);
-  }
-
-  if (ShenandoahGCVerbose) {
-    print_heap_regions();
-  }
-
-#ifdef ASSERT
-  if (ShenandoahVerify && ! _cancelled_concgc) {
-    verify_heap_after_marking();
-  }
-
-#endif
-}
-
-bool ShenandoahHeap::concurrent_mark_in_progress() {
-  return _concurrent_mark_in_progress;
-}
-
-void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
-  if (ShenandoahTracePhases) {
-    if (in_progress) {
-      gclog_or_tty->print_cr("Shenandoah starting concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
-    } else {
-      gclog_or_tty->print_cr("Shenandoah finishing concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
-    }
-  }
-
-  _concurrent_mark_in_progress = in_progress;
-  JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress);
-}
-
-void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
-  if (ShenandoahTracePhases) {
-    if (ShenandoahConcurrentEvacuation) {
-      if (in_progress) {
-        gclog_or_tty->print_cr("Shenandoah starting concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
-      } else {
-        gclog_or_tty->print_cr("Shenandoah finishing concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
-      }
-    } else {
-      if (in_progress) {
-        gclog_or_tty->print_cr("Shenandoah starting non-concurrent evacuation");
-      } else {
-        gclog_or_tty->print_cr("Shenandoah finishing non-concurrent evacuation");
-      }
-    }
-  }
-  JavaThread::set_evacuation_in_progress_all_threads(in_progress);
-  _evacuation_in_progress = in_progress;
-  OrderAccess::fence();
-}
-
-bool ShenandoahHeap::is_evacuation_in_progress() {
-  return _evacuation_in_progress;
-}
-
-bool ShenandoahHeap::is_update_references_in_progress() {
-  return _update_references_in_progress;
-}
-
-void ShenandoahHeap::set_update_references_in_progress(bool update_refs_in_progress) {
-  if (ShenandoahTracePhases) {
-    if (ShenandoahConcurrentUpdateRefs) {
-      if (update_refs_in_progress) {
-        gclog_or_tty->print_cr("Shenandoah starting concurrent reference-updating");
-      } else {
-        gclog_or_tty->print_cr("Shenandoah finishing concurrent reference-updating");
-      }
-    } else {
-      if (update_refs_in_progress) {
-        gclog_or_tty->print_cr("Shenandoah starting non-concurrent reference-updating");
-      } else {
-        gclog_or_tty->print_cr("Shenandoah finishing non-concurrent reference-updating");
-      }
-    }
-  }
-  _update_references_in_progress = update_refs_in_progress;
-}
-
-void ShenandoahHeap::post_allocation_collector_specific_setup(HeapWord* hw) {
-  oop obj = oop(hw);
-
-  // Assuming for now that objects can't be created already locked
-  assert(! obj->has_displaced_mark(), "hopefully new objects don't have displaced mark");
-  // tty->print_cr("post_allocation_collector_specific_setup:: "PTR_FORMAT, p2i(obj));
-
-  if (_concurrent_mark_in_progress || _evacuation_in_progress) {
-    mark_current_no_checks(obj);
-  }
-}
-
-void ShenandoahHeap::verify_copy(oop p,oop c){
-    assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly");
-    assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct");
-    if (p->klass() != c->klass()) {
-      print_heap_regions();
-    }
-    assert(p->klass() == c->klass(), err_msg("verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()));
-    assert(p->size() == c->size(), "verify size");
-    // Object may have been locked between copy and verification
-    //    assert(p->mark() == c->mark(), "verify mark");
-    assert(c == oopDesc::bs()->resolve_oop(c), "verify only forwarded once");
-  }
-
-void ShenandoahHeap::oom_during_evacuation() {
-  // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC");
-  // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
-  collector_policy()->set_should_clear_all_soft_refs(true);
-  concurrent_thread()->schedule_full_gc();
-  cancel_concgc();
-
-  if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
-    tty->print_cr("OOM during evacuation. Let Java thread wait until evacuation settlded..");
-    while (! conc_workers()->is_idle()) { // wait.
-      Thread::current()->_ParkEvent->park(1) ;
-    }
-  }
-
-}
-
-void ShenandoahHeap::copy_object(oop p, HeapWord* s) {
-  HeapWord* filler = s;
-  assert(s != NULL, "allocation of brooks pointer must not fail");
-  HeapWord* copy = s + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-
-  guarantee(copy != NULL, "allocation of copy object must not fail");
-  Copy::aligned_disjoint_words((HeapWord*) p, copy, p->size());
-  initialize_brooks_ptr(filler, copy);
-
-#ifdef ASSERT
-  if (ShenandoahTraceEvacuations) {
-    tty->print_cr("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
-  }
-#endif
-}
-
-oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
-  ShenandoahHeapRegion* hr;
-  size_t required;
-
-#ifdef ASSERT
-  if (ShenandoahVerifyReadsToFromSpace) {
-    hr = heap_region_containing(p);
-    {
-      hr->memProtectionOff();    
-      required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
-      hr->memProtectionOn();    
-    }
-  } else {
-    required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
-  }
-#else
-    required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
-#endif
-
-  assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
-
-  // Don't even attempt to evacuate anything if evacuation has been cancelled.
-  if (_cancelled_concgc) {
-    return ShenandoahBarrierSet::resolve_oop_static(p);
-  }
-
-  bool alloc_from_gclab = true;
-  thread->set_evacuating(true);
-  HeapWord* filler = allocate_from_gclab(thread, required);
-  if (filler == NULL) {
-    filler = allocate_memory(required);
-    alloc_from_gclab = false;
-  }
-  thread->set_evacuating(false);
-
-  if (filler == NULL) {
-    oom_during_evacuation();
-    // If this is a Java thread, it should have waited
-    // until all GC threads are done, and then we
-    // return the forwardee.
-    oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
-    return resolved;
-  }
-
-  HeapWord* copy = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  
-#ifdef ASSERT
-  if (ShenandoahVerifyReadsToFromSpace) {
-    hr->memProtectionOff();
-    copy_object(p, filler);
-    hr->memProtectionOn();
-  } else {
-    copy_object(p, filler);    
-  }
-#else 
-    copy_object(p, filler);    
-#endif
-
-  HeapWord* result = BrooksPointer::get(p).cas_forwardee((HeapWord*) p, copy);
-
-  oop return_val;
-  if (result == (HeapWord*) p) {
-    return_val = oop(copy);
-
-    mark_current(return_val);
-
-#ifdef ASSERT
-    if (ShenandoahTraceEvacuations) {
-      tty->print("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
-    }
-    assert(return_val->is_oop(), "expect oop");
-    assert(p->klass() == return_val->klass(), err_msg("Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy)));
-#endif
-  }  else {
-    if (alloc_from_gclab) {
-      thread->gclab().rollback(required);
-    }
-#ifdef ASSERT
-    if (ShenandoahTraceEvacuations) {
-      tty->print_cr("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
-    }
-#endif
-    return_val = (oopDesc*) result;
-  }
-
-  return return_val;
-}
-
-HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj, bool new_obj) {
-  HeapWord* result = obj + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  initialize_brooks_ptr(obj, result, new_obj);
-  return result;
-}
-
-uint ShenandoahHeap::oop_extra_words() {
-  return BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-}
-
-bool ShenandoahHeap::grow_heap_by() {
-  int new_region_index = ensure_new_regions(1);
-  if (new_region_index != -1) {
-    ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
-    HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
-    new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
-    if (ShenandoahGCVerbose) {
-      tty->print_cr("allocating new region at index: "INT32_FORMAT, new_region_index);
-      new_region->print();
-    }
-    _ordered_regions[new_region_index] = new_region;
-    _free_regions->append(new_region);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-int ShenandoahHeap::ensure_new_regions(int new_regions) {
-
-  size_t num_regions = _num_regions;
-  size_t new_num_regions = num_regions + new_regions;
-  if (new_num_regions >= _max_regions) {
-    // Not enough regions left.
-    return -1;
-  }
-
-  size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
-  if (ShenandoahGCVerbose) {
-    tty->print_cr("expanding storage by "SIZE_FORMAT_HEX" bytes, for "INT32_FORMAT" new regions", expand_size, new_regions);
-  }
-  bool success = _storage.expand_by(expand_size);
-  assert(success, "should always be able to expand by requested size");
-
-  _num_regions = new_num_regions;
-
-  return num_regions;
-
-}
-
-#ifndef CC_INTERP
-void ShenandoahHeap::compile_prepare_oop(MacroAssembler* masm, Register obj) {
-  __ incrementq(obj, BrooksPointer::BROOKS_POINTER_OBJ_SIZE * HeapWordSize);
-  __ movptr(Address(obj, -1 * HeapWordSize), obj);
-}
-#endif
-
-bool  ShenandoahIsAliveClosure:: do_object_b(oop obj) { 
-
-  ShenandoahHeap* sh = ShenandoahHeap::heap();
-  if (sh->need_update_refs()) {
-    obj = ShenandoahBarrierSet::resolve_oop_static(obj);
-  }
-
-#ifdef ASSERT
-  if (obj != ShenandoahBarrierSet::resolve_oop_static(obj)) {
-    ShenandoahHeap* sh = ShenandoahHeap::heap();
-  }
-#endif
-  assert(obj == ShenandoahBarrierSet::resolve_oop_static(obj), "needs to be in to-space");
-
-    HeapWord* addr = (HeapWord*) obj;
-
-    if (ShenandoahTraceWeakReferences) {
-
-      if (addr != NULL) {
-	if(sh->is_in(addr)) {
-	  if (sh->is_obj_ill(obj)) {
-	    HandleMark hm;
-	    tty->print_cr("ShenandoahIsAliveClosure Found an ill object "PTR_FORMAT, p2i((HeapWord*) obj));
-	    obj->print();
-	  }
-	  else 
-	    tty->print_cr("found a healthy object "PTR_FORMAT, p2i((HeapWord*) obj));
-
-	} else {
-	  tty->print_cr("found an object outside the heap "PTR_FORMAT, p2i((HeapWord*) obj));
-	}
-      } else {
-	tty->print_cr("found a null object "PTR_FORMAT, p2i((HeapWord*) obj));
-      }
-    }
-
-    return addr != NULL && sh->is_marked_current(obj); //(!sh->is_in(addr) || !sh->is_obj_ill(obj));
-}
-
-void ShenandoahHeap::ref_processing_init() {
-  MemRegion mr = reserved_region();
-
-  // Concurrent Mark ref processor
-//   _ref_processor =
-//     new ReferenceProcessor(mr,    // span
-//                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
-//                                 // mt processing
-//                            (int) ParallelGCThreads,
-//                                 // degree of mt processing
-//                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
-//                                 // mt discovery
-//                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
-//                                 // degree of mt discovery
-//                            false,
-//                                 // Reference discovery is not atomic
-// 			   &isAlive);
-//                                 // is alive closure
-//                                 // (for efficiency/performance)
-  _ref_processor =
-    new ReferenceProcessor(mr,    // span
-			   ParallelRefProcEnabled && (ConcGCThreads > 1),
-			   // mt processing
-                           (int) ConcGCThreads,
-			   // degree of mt processing
-			   (ConcGCThreads > 1),
-			   // mt discovery
-			   (int) ConcGCThreads,
-			   // degree of mt discovery
-			   false,
-			   // Reference discovery is not atomic
- 			   &isAlive);
-  // is alive closure
-  // (for efficiency/performance)
-
-
-
-}
-
-#ifdef ASSERT
-void ShenandoahHeap::set_from_region_protection(bool protect) {
-  for (uint i = 0; i < _num_regions; i++) {
-    ShenandoahHeapRegion* region = _ordered_regions[i];
-    if (region != NULL && region->is_in_collection_set()) {
-      if (protect) {
-        region->memProtectionOn();
-      } else {
-        region->memProtectionOff();
-      }
-    }
-  }
-}
-#endif
-
-void ShenandoahHeap::acquire_pending_refs_lock() {
-  _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
-}
-
-void ShenandoahHeap::release_pending_refs_lock() {
-  _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
-}
-
-ShenandoahHeapRegion** ShenandoahHeap::heap_regions() {
-  return _ordered_regions;
-}
-
-size_t ShenandoahHeap::num_regions() {
-  return _num_regions;
-}
-
-size_t ShenandoahHeap::max_regions() {
-  return _max_regions;
-}
-
-GCTracer* ShenandoahHeap::tracer() {
-  return collector_policy()->tracer();
-}
-
-size_t ShenandoahHeap::tlab_used(Thread* thread) const {
-  return _free_regions->used();
-}
-
-void ShenandoahHeap::cancel_concgc() {
-  // only report it once
-  if (!_cancelled_concgc) {
-    if (ShenandoahTracePhases) {
-      tty->print_cr("Cancelling GC");
-    }
-    _cancelled_concgc = true;
-    OrderAccess::fence();
-    _shenandoah_policy->report_concgc_cancelled();
-  }
-  
-  if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
-    while (! conc_workers()->is_idle()) { // wait.
-      Thread::current()->_ParkEvent->park(1) ;
-    }
-  }
-}
-
-bool ShenandoahHeap::cancelled_concgc() {
-  bool cancelled = _cancelled_concgc;
-  return cancelled;
-}
-
-void ShenandoahHeap::clear_cancelled_concgc() {
-  _cancelled_concgc = false;
-}
-
-int ShenandoahHeap::max_workers() {
-  return _max_workers;
-}
-
-int ShenandoahHeap::max_parallel_workers() {
-  return _max_parallel_workers;
-}
-int ShenandoahHeap::max_conc_workers() {
-  return _max_conc_workers;
-}
-
-void ShenandoahHeap::shutdown() {
-  // We set this early here, to let GC threads terminate before we ask the concurrent thread
-  // to terminate, which would otherwise block until all GC threads come to finish normally.
-  _cancelled_concgc = true;
-  _concurrent_gc_thread->shutdown();
-  cancel_concgc();
-}
-
-class ShenandoahStringSymbolTableUnlinkTask : public AbstractGangTask {
-private:
-  BoolObjectClosure* _is_alive;
-  int _initial_string_table_size;
-  int _initial_symbol_table_size;
-
-  bool  _process_strings;
-  int _strings_processed;
-  int _strings_removed;
-
-  bool  _process_symbols;
-  int _symbols_processed;
-  int _symbols_removed;
-
-public:
-  ShenandoahStringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
-    AbstractGangTask("String/Symbol Unlinking"),
-    _is_alive(is_alive),
-    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
-    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
-
-    _initial_string_table_size = StringTable::the_table()->table_size();
-    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
-    if (process_strings) {
-      StringTable::clear_parallel_claimed_index();
-    }
-    if (process_symbols) {
-      SymbolTable::clear_parallel_claimed_index();
-    }
-  }
-
-  ~ShenandoahStringSymbolTableUnlinkTask() {
-    guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
-              err_msg("claim value %d after unlink less than initial string table size %d",
-                      StringTable::parallel_claimed_index(), _initial_string_table_size));
-    guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
-              err_msg("claim value %d after unlink less than initial symbol table size %d",
-                      SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
-
-    if (G1TraceStringSymbolTableScrubbing) {
-      gclog_or_tty->print_cr("Cleaned string and symbol table, "
-                             "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
-                             "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
-                             strings_processed(), strings_removed(),
-                             symbols_processed(), symbols_removed());
-    }
-  }
-
-  void work(uint worker_id) {
-    int strings_processed = 0;
-    int strings_removed = 0;
-    int symbols_processed = 0;
-    int symbols_removed = 0;
-    if (_process_strings) {
-      StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
-      Atomic::add(strings_processed, &_strings_processed);
-      Atomic::add(strings_removed, &_strings_removed);
-    }
-    if (_process_symbols) {
-      SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
-      Atomic::add(symbols_processed, &_symbols_processed);
-      Atomic::add(symbols_removed, &_symbols_removed);
-    }
-  }
-
-  size_t strings_processed() const { return (size_t)_strings_processed; }
-  size_t strings_removed()   const { return (size_t)_strings_removed; }
-
-  size_t symbols_processed() const { return (size_t)_symbols_processed; }
-  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
-};
-
-void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
-
-  workers()->set_active_workers(_max_parallel_workers);
-  ShenandoahStringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
-  workers()->run_task(&shenandoah_unlink_task);
-
-  //  if (G1StringDedup::is_enabled()) {
-  //    G1StringDedup::unlink(is_alive);
-  //  }
-}
-
-bool ShenandoahHeap::is_obj_ill(const oop obj) const {
-  return ! is_marked_current(obj);
-}
-
-void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
-  _need_update_refs = need_update_refs;
-}
-
-void ShenandoahHeap::set_need_reset_bitmaps(bool need_reset_bitmaps) {
-  _need_reset_bitmaps = need_reset_bitmaps;
-}
-
-bool ShenandoahHeap::need_reset_bitmaps() const {
-  return _need_reset_bitmaps;
-}
-
-ShenandoahJNICritical* ShenandoahHeap::jni_critical() {
-  return _jni_critical;
-}
-
-ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
-  HeapWord* next_addr = r->bottom() + ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
-  ShenandoahHeapRegion* next = heap_region_containing(next_addr);
-  if (next->is_humongous()) {
-    return next_compaction_region(next);
-  } else {
-    return next;
-  }
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,452 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_HPP
-
-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp"
-#include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
-
-#include "gc/shared/cmBitMap.hpp"
-#include "gc/g1/heapRegionBounds.inline.hpp"
-
-#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/space.hpp"
-#include "oops/oop.hpp"
-#include "oops/markOop.hpp"
-
-
-class SpaceClosure;
-class GCTracer;
-
-class ShenandoahJNICritical;
-
-class ShenandoahJNICritical;
-
-class ShenandoahAlwaysTrueClosure : public BoolObjectClosure {
-public:
-  bool do_object_b(oop p) { return true; }
-};
-
-
-class ShenandoahIsAliveClosure: public BoolObjectClosure {
-
-public:
-  bool do_object_b(oop obj);
-};
-
-
-class ShenandoahHeapRegionClosure : public StackObj {
-  bool _complete;
-  void incomplete() {_complete = false;}
-
-public:
-  ShenandoahHeapRegionClosure(): _complete(true) {}
-
-  // typically called on each region until it returns true;
-  virtual bool doHeapRegion(ShenandoahHeapRegion* r) = 0;
-
-  bool complete() { return _complete;}
-};
-
-// A "ShenandoahHeap" is an implementation of a java heap for HotSpot.
-// It uses a new pauseless GC algorithm based on Brooks pointers.
-// Derived from G1
-
-// 
-// CollectedHeap  
-//    SharedHeap
-//      ShenandoahHeap
-
-class ShenandoahHeap : public CollectedHeap {
-
-private:
-
-  static ShenandoahHeap* _pgc;
-  ShenandoahCollectorPolicy* _shenandoah_policy;
-  VirtualSpace _storage;
-  ShenandoahHeapRegion* _first_region;
-  HeapWord* _first_region_bottom;
-  // Ordered array of regions  (name confusing with _regions)
-  ShenandoahHeapRegion** _ordered_regions;
-
-  // Sortable array of regions
-  ShenandoahHeapRegionSet* _free_regions;
-  ShenandoahHeapRegionSet* _collection_set;
-  ShenandoahHeapRegion* _currentAllocationRegion;
-  ShenandoahConcurrentMark* _scm;
-
-
-
-  ShenandoahConcurrentThread* _concurrent_gc_thread;
-
-  size_t _num_regions;
-  size_t _max_regions;
-  size_t _initialSize;
-#ifndef NDEBUG
-  uint _numAllocs;
-#endif
-  WorkGangBarrierSync barrierSync;
-  int _max_parallel_workers;
-  int _max_conc_workers;
-  int _max_workers;
-
-  FlexibleWorkGang* _conc_workers;
-  FlexibleWorkGang* _workers;
-
-
-  volatile size_t _used;
-
-  CMBitMap _mark_bit_map;
-  CMBitMap* _next_mark_bit_map;
-
-  bool* _in_cset_fast_test;
-  bool* _in_cset_fast_test_base;
-  uint _in_cset_fast_test_length;
-
-  bool _cancelled_concgc;
-
-  ShenandoahJNICritical* _jni_critical;
-
-public:
-  size_t _bytesAllocSinceCM;
-  size_t _bytes_allocated_during_cm;
-  size_t _bytes_allocated_during_cm_start;
-  size_t _max_allocated_gc;
-  size_t _allocated_last_gc;
-  size_t _used_start_gc;
-
-public:
-  ShenandoahHeap(ShenandoahCollectorPolicy* policy);
-  HeapWord* allocate_from_gclab(Thread* thread, size_t size);
-  HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
-  HeapWord* allocate_new_tlab(size_t word_size);
-  HeapWord* allocate_new_gclab(size_t word_size);
-private:
-  HeapWord* allocate_new_tlab(size_t word_size, bool mark);
-public:
-  HeapWord* allocate_memory(size_t word_size);
-
-  bool find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
-  bool allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
-
-  // For now we are ignoring eden.
-  inline bool should_alloc_in_eden(size_t size) { return false;}
-  void print_on(outputStream* st) const ;
-
-  ShenandoahHeap::Name kind() const {
-    return CollectedHeap::ShenandoahHeap;
-  }
-  
-  static ShenandoahHeap* heap();
-
-  ShenandoahCollectorPolicy *shenandoahPolicy() { return _shenandoah_policy;}
-
-  jint initialize();
-  static size_t conservative_max_heap_alignment() {
-    return HeapRegionBounds::max_size();
-  }
-
-  void post_initialize();
-  size_t capacity() const;
-  size_t used() const;
-  bool is_maximal_no_gc() const;
-  size_t max_capacity() const;
-  virtual bool is_in(const void* p) const;
-  bool is_in_partial_collection(const void* p);
-  bool is_scavengable(const void* addr);
-  virtual HeapWord* mem_allocate(size_t size, bool* what);
-  HeapWord* mem_allocate_locked(size_t size, bool* what);
-  virtual size_t unsafe_max_alloc();
-  bool can_elide_tlab_store_barriers() const;
-  virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
-  bool can_elide_initializing_store_barrier(oop new_obj);
-  bool card_mark_must_follow_store() const;
-  bool supports_heap_inspection() const;
-  void collect(GCCause::Cause);
-  void do_full_collection(bool clear_all_soft_refs);
-  AdaptiveSizePolicy* size_policy();
-  ShenandoahCollectorPolicy* collector_policy() const;
-
-  void ensure_parsability(bool retire_tlabs);
-
-  void add_free_region(ShenandoahHeapRegion* r) {_free_regions->append(r);}
-  void clear_free_regions() {_free_regions->clear();}
-
-  void oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions,
-                   bool skip_unreachable_objects);
-  void oop_iterate(ExtendedOopClosure* cl) {
-    oop_iterate(cl, false, false);
-  }
-
-  void roots_iterate(ExtendedOopClosure* cl);
-  void weak_roots_iterate(ExtendedOopClosure* cl);
-  
-  void object_iterate(ObjectClosure* cl);
-  void object_iterate_careful(ObjectClosureCareful* cl);
-  void object_iterate_no_from_space(ObjectClosure* cl);
-  void safe_object_iterate(ObjectClosure* cl);
-
-  void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl);
-  void marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl);
-private:
-  void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl, HeapWord* start, HeapWord* limit);
-
-public:
-  HeapWord* block_start(const void* addr) const;
-  size_t block_size(const HeapWord* addr) const;
-  bool block_is_obj(const HeapWord* addr) const;
-  jlong millis_since_last_gc();
-  void prepare_for_verify();
-  void print_gc_threads_on(outputStream* st) const;
-  void gc_threads_do(ThreadClosure* tcl) const;
-  void print_tracing_info() const;
-  void verify(bool silent,  VerifyOption vo);
-  bool supports_tlab_allocation() const;
-  virtual size_t tlab_capacity(Thread *thr) const;
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* ecl);
-  void object_iterate_since_last_GC(ObjectClosure* cl);
-  void space_iterate(SpaceClosure* scl);
-  virtual size_t unsafe_max_tlab_alloc(Thread *thread) const;
-  virtual size_t max_tlab_size() const;
-
-  void resize_all_tlabs();
-  void accumulate_statistics_all_tlabs();
-  void accumulate_statistics_all_gclabs();
-
-  HeapWord* tlab_post_allocation_setup(HeapWord* obj, bool new_obj);
-
-  uint oop_extra_words();
-
-#ifndef CC_INTERP
-  void compile_prepare_oop(MacroAssembler* masm, Register obj = rax);
-#endif
-
-  Space* space_containing(const void* oop) const;
-  void gc_prologue(bool b);
-  void gc_epilogue(bool b);
-
-  void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions = false, bool skip_humongous_continuation = false) const;
-  ShenandoahHeapRegion* heap_region_containing(const void* addr) const;  
-  inline uint heap_region_index_containing(const void* addr) const;  
-
-/**
- * Maybe we need that at some point...
-
-  oop* resolve_oop_ptr(oop* p);
-
-  oop oop_containing_oop_ptr(oop* p);
-
-*/
-
-  void temp();
-
-  volatile unsigned int _concurrent_mark_in_progress;
-
-  volatile unsigned int _evacuation_in_progress;
-  volatile bool _update_references_in_progress;
-  bool _need_update_refs;
-  bool _need_reset_bitmaps;
-
-  void start_concurrent_marking();
-  void stop_concurrent_marking();
-  ShenandoahConcurrentMark* concurrentMark() { return _scm;}
-  ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; }
-
-  ShenandoahJNICritical* jni_critical();
-
-  size_t bump_object_age(HeapWord* start, HeapWord* end);
-
-  inline bool mark_current(oop obj) const;
-  inline bool mark_current_no_checks(oop obj) const;
-  inline bool is_marked_current(oop obj) const;
-  
-  ReferenceProcessor* _ref_processor;
-  bool is_marked_prev(oop obj) const;
-
-  bool is_obj_ill(const oop obj) const;
-
-  void reset_mark_bitmap();
-  void reset_mark_bitmap_range(HeapWord* from, HeapWord* to);
-
-  bool is_bitmap_clear();
-
-  virtual void post_allocation_collector_specific_setup(HeapWord* obj);
-
-  void mark_object_live(oop obj, bool enqueue);
-
-  void prepare_for_concurrent_evacuation();
-  void do_evacuation();
-  void parallel_evacuate();
-
-  void initialize_brooks_ptr(HeapWord* brooks_ptr, HeapWord* object, bool new_obj = true);
-  void initialize_brooks_ptr(oop p);
-
-  inline oop maybe_update_oop_ref(oop* p);
-  void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region);
-  void parallel_evacuate_region(ShenandoahHeapRegion* from_region);
-  void verify_evacuated_region(ShenandoahHeapRegion* from_region);
-
-  void print_heap_regions(outputStream* st = tty) const;
-
-  void print_all_refs(const char* prefix);
-
-  void print_heap_objects(HeapWord* start, HeapWord* end);
-  void print_heap_locations(HeapWord* start, HeapWord* end);
-  void print_heap_object(oop p);
-
-  oop  evacuate_object(oop src, Thread* thread);
-  bool is_in_collection_set(const void* p) {
-    return heap_region_containing(p)->is_in_collection_set();
-  }
-  
-  void copy_object(oop p, HeapWord* s);
-  void verify_copy(oop p, oop c);
-  //  void assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy);
-  void verify_heap_size_consistency();
-  void verify_heap_after_marking();
-  void verify_heap_after_evacuation();
-  void verify_heap_after_update_refs();
-  void verify_regions_after_update_refs();
-
-  static ByteSize ordered_regions_offset() { return byte_offset_of(ShenandoahHeap, _ordered_regions); }
-  static ByteSize first_region_bottom_offset() { return byte_offset_of(ShenandoahHeap, _first_region_bottom); }
-
-  // Where the first object may be placed.
-  HeapWord* start_of_heap() { return _first_region_bottom + 1;}
-  void cleanup_after_cancelconcgc();
-  void increase_used(size_t bytes);
-  void decrease_used(size_t bytes);
-  void set_used(size_t bytes);
-
-  int ensure_new_regions(int num_new_regions);
-
-  void set_evacuation_in_progress(bool in_progress);
-  bool is_evacuation_in_progress();
-
-  bool is_update_references_in_progress();
-  void set_update_references_in_progress(bool update_refs_in_progress);
-
-  inline bool need_update_refs() const;
-  void set_need_update_refs(bool update_refs);
-
-  bool need_reset_bitmaps() const;
-  void set_need_reset_bitmaps(bool need_reset_bitmaps);
-
-  ReferenceProcessor* ref_processor() { return _ref_processor;}	
-  virtual void ref_processing_init();
-  ShenandoahIsAliveClosure isAlive;
-  void evacuate_and_update_roots();
-  void prepare_for_update_references();
-
-  void update_references();
-
-  ShenandoahHeapRegionSet* free_regions();
-
-  void update_roots();
-
-  void acquire_pending_refs_lock();
-  void release_pending_refs_lock();
-
-  int max_workers();
-  int max_conc_workers();
-  int max_parallel_workers();
-  FlexibleWorkGang* conc_workers() const{ return _conc_workers;}
-  FlexibleWorkGang* workers() const{ return _workers;}
-
-  ShenandoahHeapRegion** heap_regions();
-  size_t num_regions();
-  size_t max_regions();
-
-  ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r);
-
-  void recycle_dirty_regions();
-
-  void register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) {
-    assert(_in_cset_fast_test_base != NULL, "sanity");
-    assert(r->is_in_collection_set(), "invariant");
-    uint index = r->region_number();
-    assert(index < _in_cset_fast_test_length, "invariant");
-    assert(!_in_cset_fast_test_base[index], "invariant");
-    _in_cset_fast_test_base[index] = true;
-  }
-  bool in_cset_fast_test(HeapWord* obj) {
-    assert(_in_cset_fast_test != NULL, "sanity");
-    if (is_in(obj)) {
-      // no need to subtract the bottom of the heap from obj,
-      // _in_cset_fast_test is biased
-      uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
-      bool ret = _in_cset_fast_test[index];
-      // let's make sure the result is consistent with what the slower
-      // test returns
-      assert( ret || !is_in_collection_set(obj), "sanity");
-      assert(!ret ||  is_in_collection_set(obj), "sanity");
-      return ret;
-    } else {
-      return false;
-    }
-  }
-
-  static address in_cset_fast_test_addr() {
-    return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
-  }
-
-  void clear_cset_fast_test() {
-    assert(_in_cset_fast_test_base != NULL, "sanity");
-    memset(_in_cset_fast_test_base, false,
-           (size_t) _in_cset_fast_test_length * sizeof(bool));
-  }
-
-  GCTracer* tracer();
-  ShenandoahHeapRegionSet* collection_set() { return _collection_set; }
-  size_t tlab_used(Thread* ignored) const;
-
-private:
-
-  bool grow_heap_by();
-
-  void verify_evacuation(ShenandoahHeapRegion* from_region);
-  void set_concurrent_mark_in_progress(bool in_progress);
-
-  void oom_during_evacuation();
-  void cancel_concgc();
-public:
-  bool cancelled_concgc();
-  void clear_cancelled_concgc();
-
-  void shutdown();
-
-  bool concurrent_mark_in_progress();
-  size_t calculateUsed();
-  size_t calculateFree();
-
-private:
-  void verify_live();
-  void verify_liveness_after_concurrent_mark();
-
-  HeapWord* allocate_memory_with_lock(size_t word_size);
-  HeapWord* allocate_memory_heap_lock(size_t word_size);
-  HeapWord* allocate_memory_shenandoah_lock(size_t word_size);
-  HeapWord* allocate_memory_work(size_t word_size);
-  HeapWord* allocate_large_memory(size_t word_size);
-  ShenandoahHeapRegion* check_skip_humongous(ShenandoahHeapRegion* region) const;
-  ShenandoahHeapRegion* get_next_region_skip_humongous() const;
-  ShenandoahHeapRegion* get_current_region_skip_humongous() const;
-  ShenandoahHeapRegion* check_grow_heap(ShenandoahHeapRegion* current);
-  ShenandoahHeapRegion* get_next_region();
-  ShenandoahHeapRegion* get_current_region();
-
-  void set_from_region_protection(bool protect);
-
-public:
-  // Delete entries for dead interned string and clean up unreferenced symbols
-  // in symbol table, possibly in parallel.
-  void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
-  
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
-Copyright 2015 Red Hat, Inc. and/or its affiliates.
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
-
-#include "gc/shared/cmBitMap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "runtime/atomic.inline.hpp"
-
-/*
- * Marks the object. Returns true if the object has not been marked before and has
- * been marked by this thread. Returns false if the object has already been marked,
- * or if a competing thread succeeded in marking this object.
- */
-inline bool ShenandoahHeap::mark_current(oop obj) const {
-#ifdef ASSERT
-  if (obj != oopDesc::bs()->resolve_oop(obj)) {
-    tty->print_cr("heap region containing obj:");
-    ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
-    obj_region->print();
-    tty->print_cr("heap region containing forwardee:");
-    ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->resolve_oop(obj));
-    forward_region->print();    
-  }
-#endif
-
-  assert(obj == oopDesc::bs()->resolve_oop(obj), "only mark forwarded copy of objects");
-  return mark_current_no_checks(obj);
-}
-
-inline bool ShenandoahHeap::mark_current_no_checks(oop obj) const {
-  return _next_mark_bit_map->parMark((HeapWord*) obj);
-}
-
-inline bool ShenandoahHeap::is_marked_current(oop obj) const {
-  return _next_mark_bit_map->isMarked((HeapWord*) obj);
-}
-
-inline bool ShenandoahHeap::need_update_refs() const {
-  return _need_update_refs;
-}
-
-inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
-  uintptr_t region_start = ((uintptr_t) addr); // & ~(ShenandoahHeapRegion::RegionSizeBytes - 1);
-  uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
-#ifdef ASSERT
-  if (!(index < _num_regions)) {
-    tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions[0]->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
-  }
-#endif
-  assert(index < _num_regions, "heap region index must be in range");
-  return index;
-}
-
-oop ShenandoahHeap::maybe_update_oop_ref(oop* p) {
-
-  assert((! is_in(p)) || (! heap_region_containing(p)->is_in_collection_set()),
-         "never update refs in from-space, unless evacuation has been cancelled"); 
-
-  oop heap_oop = oopDesc::load_heap_oop(p); // read p
-  if (! oopDesc::is_null(heap_oop)) {
-
-#ifdef ASSERT
-    if (! is_in(heap_oop)) {
-      print_heap_regions();
-      tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
-      assert(is_in(heap_oop), "object must be in heap");
-    }
-#endif
-    assert(is_in(heap_oop), "only ever call this on objects in the heap");
-    assert((! (is_in(p) && heap_region_containing(p)->is_in_collection_set())), "we don't want to update references in from-space");
-    oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
-    if (forwarded_oop != heap_oop) {
-      // tty->print_cr("updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
-      assert(forwarded_oop->is_oop(), "oop required");
-      assert(is_in(forwarded_oop), "forwardee must be in heap");
-      assert(! heap_region_containing(forwarded_oop)->is_in_collection_set(), "forwardee must not be in collection set");
-      // If this fails, another thread wrote to p before us, it will be logged in SATB and the
-      // reference be updated later.
-      oop result = (oop) Atomic::cmpxchg_ptr(forwarded_oop, p, heap_oop);
-
-      if (result == heap_oop) { // CAS successful.
-	  return forwarded_oop;
-      } else {
-	return result;
-      }
-    } else {
-      return forwarded_oop;
-    }
-    /*
-      else {
-      tty->print_cr("not updating ref: "PTR_FORMAT, p2i(heap_oop));
-      }
-    */
-  }
-  return NULL;
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,339 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "memory/allocation.hpp"
-#include "gc/g1/heapRegionBounds.inline.hpp"
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "memory/universe.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-
-size_t ShenandoahHeapRegion::RegionSizeShift = 0;
-size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
-
-jint ShenandoahHeapRegion::initialize_heap_region(HeapWord* start, 
-						  size_t regionSizeWords, int index) {
-
-  reserved = MemRegion((HeapWord*) start, regionSizeWords);
-  ContiguousSpace::initialize(reserved, true, false);
-  liveData = 0;
-  _is_in_collection_set = false;
-  _region_number = index;
-#ifdef ASSERT
-  _mem_protection_level = 1; // Off, level 1.
-#endif
-  return JNI_OK;
-}
-
-int ShenandoahHeapRegion::region_number() {
-  return _region_number;
-}
-
-bool ShenandoahHeapRegion::rollback_allocation(uint size) {
-  set_top(top() - size);
-  return true;
-}
-
-void ShenandoahHeapRegion::clearLiveData() {
-  setLiveData(0);
-}
-
-void ShenandoahHeapRegion::setLiveData(size_t s) {
-  Atomic::store_ptr(s, (intptr_t*) &liveData);
-}
-
-void ShenandoahHeapRegion::increase_live_data(size_t s) {
-  size_t new_live_data = Atomic::add(s, &liveData);
-  assert(new_live_data <= used() || is_humongous(), "can't have more live data than used");
-}
-
-size_t ShenandoahHeapRegion::getLiveData() const {
-  return liveData;
-}
-
-size_t ShenandoahHeapRegion::garbage() const {
-  assert(used() >= getLiveData() || is_humongous(), err_msg("Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, getLiveData(), used()));
-  size_t result = used() - getLiveData();
-  return result;
-}
-
-bool ShenandoahHeapRegion::is_in_collection_set() const {
-  return _is_in_collection_set;
-}
-
-#include <sys/mman.h>
-
-#ifdef ASSERT
-
-void ShenandoahHeapRegion::memProtectionOn() {
-  /*
-  tty->print_cr("protect memory on region level: "INT32_FORMAT, _mem_protection_level);
-  print(tty);
-  */
-  MutexLockerEx ml(ShenandoahMemProtect_lock, true);
-  assert(_mem_protection_level >= 1, "invariant");
-
-  if (--_mem_protection_level == 0) {
-    if (ShenandoahVerifyWritesToFromSpace) {
-      assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
-      os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
-    } else {
-      assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
-      assert(! ShenandoahConcurrentEvacuation, "concurrent evacuation needs to be turned off for verifying from-space-reads");
-      os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
-    }
-  }
-}
-
-void ShenandoahHeapRegion::memProtectionOff() {
-  /*
-  tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
-  print(tty);
-  */
-  MutexLockerEx ml(ShenandoahMemProtect_lock, true);
-  assert(_mem_protection_level >= 0, "invariant");
-  if (_mem_protection_level++ == 0) {
-    os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
-  }
-}
-
-#endif
-
-void ShenandoahHeapRegion::set_is_in_collection_set(bool b) {
-  assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
-
-  _is_in_collection_set = b;
-
-  if (b) {
-    // tty->print_cr("registering region in fast-cset");
-    // print();
-    ShenandoahHeap::heap()->register_region_with_in_cset_fast_test(this);
-  }
-
-#ifdef ASSERT
-  if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {    
-    if (b) {
-      memProtectionOn();
-      assert(_mem_protection_level == 0, "need to be protected here");
-    } else {
-      assert(_mem_protection_level == 0, "need to be protected here");
-      memProtectionOff();
-    }
-  }
-#endif
-}
-
-ByteSize ShenandoahHeapRegion::is_in_collection_set_offset() {
-  return byte_offset_of(ShenandoahHeapRegion, _is_in_collection_set);
-}
-
-void ShenandoahHeapRegion::print_on(outputStream* st) const {
-  st->print_cr("ShenandoahHeapRegion: "PTR_FORMAT"/"INT32_FORMAT, p2i(this), _region_number);
-
-  if (is_in_collection_set())
-    st->print("C");
-  if (is_humongous_start()) {
-    st->print("H");
-  }
-  if (is_humongous_continuation()) {
-    st->print("h");
-  }
-  //else
-    st->print(" ");
-
-  st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
-               getLiveData(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
-}
-
-
-class SkipUnreachableObjectToOopClosure: public ObjectClosure {
-  ExtendedOopClosure* _cl;
-  bool _skip_unreachable_objects;
-  ShenandoahHeap* _heap;
-
-public:
-  SkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
-    _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
-  
-  void do_object(oop obj) {
-    
-    if ((! _skip_unreachable_objects) || _heap->is_marked_current(obj)) {
-      if (_skip_unreachable_objects) {
-        assert(_heap->is_marked_current(obj), "obj must be live");
-      }
-      obj->oop_iterate(_cl);
-    }
-    
-  }
-};
-
-void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
-  HeapWord* p = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  while (p < top() && !(allow_cancel && heap->cancelled_concgc())) {
-    blk->do_object(oop(p));
-#ifdef ASSERT
-    if (ShenandoahVerifyReadsToFromSpace) {
-      memProtectionOff();
-      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-      memProtectionOn();
-    } else {
-      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-    }
-#else
-      p += oop(p)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-#endif
-  }
-}
-
-HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
-  HeapWord * limit = concurrent_iteration_safe_limit();
-  assert(limit <= top(), "sanity check");
-  for (HeapWord* p = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE; p < limit;) {
-    size_t size = blk->do_object_careful(oop(p));
-    if (size == 0) {
-      return p;  // failed at p
-    } else {
-      p += size + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-    }
-  }
-  return NULL; // all done
-}
-
-void ShenandoahHeapRegion::oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects) {
-  SkipUnreachableObjectToOopClosure cl2(cl, skip_unreachable_objects);
-  object_iterate_interruptible(&cl2, false);
-}
-
-void ShenandoahHeapRegion::fill_region() {
-  ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
-  
-  if (free() > (BrooksPointer::BROOKS_POINTER_OBJ_SIZE + CollectedHeap::min_fill_size())) {
-    HeapWord* filler = allocate(BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-    HeapWord* obj = allocate(end() - top());
-    sh->fill_with_object(obj, end() - obj);
-    sh->initialize_brooks_ptr(filler, obj);
-  } 
-}
-
-void ShenandoahHeapRegion::set_humongous_start(bool start) {
-  _humongous_start = start;
-}
-
-void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
-  _humongous_continuation = continuation;
-}
-
-bool ShenandoahHeapRegion::is_humongous() const {
-  return _humongous_start || _humongous_continuation;
-}
-
-bool ShenandoahHeapRegion::is_humongous_start() const {
-  return _humongous_start;
-}
-
-bool ShenandoahHeapRegion::is_humongous_continuation() const {
-  return _humongous_continuation;
-}
-
-void ShenandoahHeapRegion::do_reset() {
-  Space::initialize(reserved, true, false);
-  clearLiveData();
-  _humongous_start = false;
-  _humongous_continuation = false;
-}
-
-void ShenandoahHeapRegion::recycle() {
-  do_reset();
-  set_is_in_collection_set(false);
-}
-
-void ShenandoahHeapRegion::reset() {
-  assert(_mem_protection_level == 1, "needs to be unprotected here");
-  do_reset();
-  _is_in_collection_set = false;
-}
-
-HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
-  assert(MemRegion(bottom(), end()).contains(p),
-         err_msg("p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
-                 p2i(p), p2i(bottom()), p2i(end())));
-  if (p >= top()) {
-    return top();
-  } else {
-    HeapWord* last = bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-    HeapWord* cur = last;
-    while (cur <= p) {
-      last = cur;
-      cur += oop(cur)->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-    }
-    assert(oop(last)->is_oop(),
-           err_msg(PTR_FORMAT" should be an object start", p2i(last)));
-    return last;
-  }
-}
-
-void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
-  uintx region_size = ShenandoahHeapRegionSize;
-  if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
-    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
-    region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
-                       (uintx) HeapRegionBounds::min_size());
-  }
-
-  int region_size_log = log2_long((jlong) region_size);
-  // Recalculate the region size to make sure it's a power of
-  // 2. This means that region_size is the largest power of 2 that's
-  // <= what we've calculated so far.
-  region_size = ((uintx)1 << region_size_log);
-
-  // Now make sure that we don't go over or under our limits.
-  if (region_size < HeapRegionBounds::min_size()) {
-    region_size = HeapRegionBounds::min_size();
-  } else if (region_size > HeapRegionBounds::max_size()) {
-    region_size = HeapRegionBounds::max_size();
-  }
-
-  // And recalculate the log.
-  region_size_log = log2_long((jlong) region_size);
-
-  // Now, set up the globals.
-  guarantee(RegionSizeShift == 0, "we should only set it once");
-  RegionSizeShift = region_size_log;
-
-  guarantee(RegionSizeBytes == 0, "we should only set it once");
-  RegionSizeBytes = (size_t)region_size;
-
-  if (ShenandoahLogConfig) {
-    tty->print_cr("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
-    tty->print_cr("Region size shift: "SIZE_FORMAT, RegionSizeShift);
-    tty->print_cr("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
-    tty->print_cr("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
-  }
-}
-
-CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
-  return ShenandoahHeap::heap()->next_compaction_region(this);
-}
-
-void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
-  scan_and_forward(this, cp);
-}
-
-void ShenandoahHeapRegion::adjust_pointers() {
-  // Check first is there is any work to do.
-  if (used() == 0) {
-    return;   // Nothing to do.
-  }
-
-  scan_and_adjust_pointers(this);
-}
-
-void ShenandoahHeapRegion::compact() {
-  scan_and_compact(this);
-}
-
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,142 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
-
-#include "gc/shared/space.hpp"
-#include "memory/universe.hpp"
-#include "utilities/sizes.hpp"
-
-class ShenandoahHeapRegion : public ContiguousSpace {
-
-  // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
-
-private:
-  // Auxiliary functions for scan_and_forward support.
-  // See comments for CompactibleSpace for more information.
-  inline HeapWord* scan_limit() const {
-    return top();
-  }
-
-  inline bool scanned_block_is_obj(const HeapWord* addr) const {
-    return true; // Always true, since scan_limit is top
-  }
-
-  inline size_t scanned_block_size(const HeapWord* addr) const {
-    oop obj = oop(addr+1);
-    size_t size = obj->size() + 1;
-    return size;
-  }
-
-    // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
-  inline size_t adjust_obj_size(size_t size) const {
-    return size + 1;
-  }
-
-  inline size_t obj_size(const HeapWord* addr) const {
-    return oop(addr+1)->size() + 1;
-  }
-
-  inline oop make_oop(HeapWord* addr) const {
-    return oop(addr+1);
-  }
-public:
-  static size_t RegionSizeBytes;
-  static size_t RegionSizeShift;
-
-private:
-  int _region_number;
-  volatile size_t liveData;
-  MemRegion reserved;
-  bool _is_in_collection_set;
-
-  bool _humongous_start;
-  bool _humongous_continuation;
-
-#ifdef ASSERT
-  int _mem_protection_level;
-#endif
-
-public:
-  static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
-
-  jint initialize_heap_region(HeapWord* start, size_t regionSize, int index);
-
-
-  int region_number();
-
-  // Roll back the previous allocation of an object with specified size.
-  // Returns TRUE when successful, FALSE if not successful or not supported.
-  bool rollback_allocation(uint size);
-
-  void clearLiveData();
-  void setLiveData(size_t s);
-  void increase_live_data(size_t s);
-
-  size_t getLiveData() const;
-
-  void print_on(outputStream* st) const;
-
-  size_t garbage() const;
-
-  void recycle();
-  void reset();
-
-  void oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects);
-
-  void object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel);
-
-  HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
-
-  HeapWord* block_start_const(const void* p) const;
-
-  // Just before GC we need to fill the current region.
-  void fill_region();
-
-  bool is_in_collection_set() const;
-
-  void set_is_in_collection_set(bool b);
-
-  void set_humongous_start(bool start);
-  void set_humongous_continuation(bool continuation);
-
-  bool is_humongous() const;
-  bool is_humongous_start() const;
-  bool is_humongous_continuation() const;
-
-#ifdef ASSERT
-  void memProtectionOn();
-  void memProtectionOff();
-#endif
-
-  static ByteSize is_in_collection_set_offset();
-  // The following are for humongous regions.  We need to save the 
-  markOop saved_mark_word;
-  void save_mark_word(oop obj) {saved_mark_word = obj->mark();}
-  markOop mark_word() {return saved_mark_word;}
-
-  virtual CompactibleSpace* next_compaction_space() const;
-
-  // Override for scan_and_forward support.
-  void prepare_for_compaction(CompactPoint* cp);
-  void adjust_pointers();
-  void compact();
-
-  virtual oop compact_oop(HeapWord* addr) const {
-    return oop(addr + 1);
-  }
-private:
-  void do_reset();
-
-};
-
-
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,343 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
-#include "memory/resourceArea.hpp"
-#include "utilities/quickSort.hpp"
-
-ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions) :
-  _max_regions(max_regions),
-  _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
-  _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
-  _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
-  _available(0), _used(0)
-{
-
-  _next = &_regions[0];
-  _current = NULL;
-  _next_free = &_regions[0];
-}
-
-ShenandoahHeapRegionSet::ShenandoahHeapRegionSet(size_t max_regions, ShenandoahHeapRegion** regions, size_t num_regions) :
-  _max_regions(num_regions),
-  _regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
-  _garbage_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2),
-  _free_threshold(ShenandoahHeapRegion::RegionSizeBytes / 2) {
-
-  // Make copy of the regions array so that we can sort without destroying the original.
-  memcpy(_regions, regions, sizeof(ShenandoahHeapRegion*) * num_regions);
-
-  _next = &_regions[0];
-  _current = NULL;
-  _next_free = &_regions[num_regions];
-}
-
-ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() {
-  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, _regions);
-}
-
-int compareHeapRegionsByGarbage(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
-  if (a == NULL) {
-    if (b == NULL) {
-      return 0;
-    } else {
-      return 1;
-    }
-  } else if (b == NULL) {
-    return -1;
-  }
-
-  size_t garbage_a = a->garbage();
-  size_t garbage_b = b->garbage();
-  
-  if (garbage_a > garbage_b) 
-    return -1;
-  else if (garbage_a < garbage_b)
-    return 1;
-  else return 0;
-}
-
-ShenandoahHeapRegion* ShenandoahHeapRegionSet::current() {
-  ShenandoahHeapRegion** current = _current;
-  if (current == NULL) {
-    return get_next();
-  } else {
-    return *(limit_region(current));
-  }
-}
-
-size_t ShenandoahHeapRegionSet::length() {
-  return _next_free - _regions;
-}
-
-size_t ShenandoahHeapRegionSet::available_regions() {
-  return (_regions + _max_regions) - _next_free;
-}
-
-void ShenandoahHeapRegionSet::append(ShenandoahHeapRegion* region) {
-  assert(_next_free < _regions + _max_regions, "need space for additional regions");
-  assert(SafepointSynchronize::is_at_safepoint() || ShenandoahHeap_lock->owned_by_self() || ! Universe::is_fully_initialized(), "only append regions to list while world is stopped");
-
-  // Grab next slot.
-  ShenandoahHeapRegion** next_free = _next_free;
-  _next_free++;
-
-  // Insert new region into slot.
-  *next_free = region;
-
-  _available += region->free();
-}
-
-void ShenandoahHeapRegionSet::clear() {
-  _current = NULL;
-  _next = _regions;
-  _next_free = _regions;
-  _available = 0;
-  _used = 0;
-}
-
-ShenandoahHeapRegion* ShenandoahHeapRegionSet::claim_next() {
-  ShenandoahHeapRegion** next = (ShenandoahHeapRegion**) Atomic::add_ptr(sizeof(ShenandoahHeapRegion**), &_next);
-  next--;
-  if (next < _next_free) {
-    return *next;
-  } else {
-    return NULL;
-  }
-}
-
-ShenandoahHeapRegion* ShenandoahHeapRegionSet::get_next() {
-
-  ShenandoahHeapRegion** next = _next;
-  if (next < _next_free) {
-    _current = next;
-    _next++;
-    return *next;
-  } else {
-    return NULL;
-  }
-}
-
-ShenandoahHeapRegion** ShenandoahHeapRegionSet::limit_region(ShenandoahHeapRegion** region) {
-  if (region >= _next_free) {
-    return NULL;
-  } else {
-    return region;
-  }
-}
-
-void ShenandoahHeapRegionSet::print() {
-  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
-    if (i == _current) {
-      tty->print_cr("C->");
-    }
-    if (i == _next) {
-      tty->print_cr("N->");
-    }
-    (*i)->print();
-  }
-}
-
-void ShenandoahHeapRegionSet::choose_collection_and_free_sets(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set) {
-  col_set->choose_collection_set(_regions, length());
-  free_set->choose_free_set(_regions, length());
-  //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
-
-}
-
-void ShenandoahHeapRegionSet::choose_collection_and_free_sets_min_garbage(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set, size_t min_garbage) {
-  col_set->choose_collection_set_min_garbage(_regions, length(), min_garbage);
-  free_set->choose_free_set(_regions, length());
-  //  assert(col_set->length() > 0 && free_set->length() > 0, "Better have some regions in the collection and free sets");
-}
-
-void ShenandoahHeapRegionSet::choose_collection_set(ShenandoahHeapRegion** regions, size_t length) {
-
-  clear();
-
-  assert(length <= _max_regions, "must not blow up array");
-
-  ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
-
-  memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
-
-  QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
-
-  ShenandoahHeapRegion** r = tmp;
-  ShenandoahHeapRegion** end = tmp + length;
-
-  // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
-
-  while (r < end) {
-    ShenandoahHeapRegion* region = *r;
-    if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
-      //      tty->print("choose region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
-      //		 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
-
-      assert(! region->is_humongous(), "no humongous regions in collection set");
-
-      if (region->getLiveData() == 0) {
-        // We can recycle it right away and put it in the free set.
-        ShenandoahHeap::heap()->decrease_used(region->used());
-        region->recycle();
-      } else {
-        append(region);
-        region->set_is_in_collection_set(true);
-      }
-      //    } else {
-      //      tty->print("rejected region %d with garbage = " SIZE_FORMAT " and live = " SIZE_FORMAT " and _garbage_threshold = " SIZE_FORMAT "\n",
-      //		 region->region_number(), region->garbage(), region->getLiveData(), _garbage_threshold);
-    }
-    r++;
-  }
-
-  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
-
-}
-
-void ShenandoahHeapRegionSet::choose_collection_set_min_garbage(ShenandoahHeapRegion** regions, size_t length, size_t min_garbage) {
-
-  clear();
-
-  assert(length <= _max_regions, "must not blow up array");
-
-  ShenandoahHeapRegion** tmp = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, length, mtGC);
-
-  memcpy(tmp, regions, sizeof(ShenandoahHeapRegion*) * length);
-
-  QuickSort::sort<ShenandoahHeapRegion*>(tmp, length, compareHeapRegionsByGarbage, false);
-
-  ShenandoahHeapRegion** r = tmp;
-  ShenandoahHeapRegion** end = tmp + length;
-
-  // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
-
-  size_t garbage = 0;
-  while (r < end && garbage < min_garbage) {
-    ShenandoahHeapRegion* region = *r;
-    if (region->garbage() > _garbage_threshold && ! region->is_humongous()) {
-      append(region);
-      garbage += region->garbage();
-      region->set_is_in_collection_set(true);
-    }
-    r++;
-  }
-
-  FREE_C_HEAP_ARRAY(ShenandoahHeapRegion*, tmp);
-
-  /*
-  tty->print_cr("choosen region with "SIZE_FORMAT" garbage given "SIZE_FORMAT" min_garbage", garbage, min_garbage);
-  */
-}
-
-
-void ShenandoahHeapRegionSet::choose_free_set(ShenandoahHeapRegion** regions, size_t length) {
-
-  clear();
-  ShenandoahHeapRegion** end = regions + length;
-
-  for (ShenandoahHeapRegion** r = regions; r < end; r++) {
-    ShenandoahHeapRegion* region = *r;
-    if ((! region->is_in_collection_set())
-        && (! region->is_humongous())) {
-      append(region);
-    }
-  }
-}  
-
-void ShenandoahHeapRegionSet::reclaim_humongous_regions() {
-
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  for (ShenandoahHeapRegion** r = _regions; r < _next_free; r++) {
-    // We can immediately reclaim humongous objects/regions that are no longer reachable.
-    ShenandoahHeapRegion* region = *r;
-    if (region->is_humongous_start()) {
-      oop humongous_obj = oop(region->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-      if (! heap->is_marked_current(humongous_obj)) {
-        reclaim_humongous_region_at(r);
-      }
-    }
-  }
-
-}
-
-void ShenandoahHeapRegionSet::reclaim_humongous_region_at(ShenandoahHeapRegion** r) {
-  assert((*r)->is_humongous_start(), "reclaim regions starting with the first one");
-
-  oop humongous_obj = oop((*r)->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-  size_t size = humongous_obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-  uint required_regions = (size * HeapWordSize) / ShenandoahHeapRegion::RegionSizeBytes  + 1;
-
-  if (ShenandoahTraceHumongous) {
-    tty->print_cr("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
-  }
-
-  assert((*r)->getLiveData() == 0, "liveness must be zero");
-
-  for (ShenandoahHeapRegion** i = r; i < r + required_regions; i++) {
-    ShenandoahHeapRegion* region = *i;
-
-    assert(i == r ? region->is_humongous_start() : region->is_humongous_continuation(),
-           "expect correct humongous start or continuation");
-
-    if (ShenandoahTraceHumongous) {
-      region->print();
-    }
-
-    region->reset();
-    ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
-  }
-}
-
-void ShenandoahHeapRegionSet::set_concurrent_iteration_safe_limits() {
-  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
-    ShenandoahHeapRegion* region = *i;
-    region->set_concurrent_iteration_safe_limit(region->top());
-  }
-}
-
-size_t ShenandoahHeapRegionSet::garbage() {
-  size_t garbage = 0;
-  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
-    ShenandoahHeapRegion* region = *i;
-    garbage += region->garbage();
-  }
-  return garbage;
-}
-
-size_t ShenandoahHeapRegionSet::used() {
-  size_t used = 0;
-  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
-    ShenandoahHeapRegion* region = *i;
-    used += region->used();
-  }
-  return used;
-}
-
-size_t ShenandoahHeapRegionSet::live_data() {
-  size_t live = 0;
-  for (ShenandoahHeapRegion** i = _regions; i < _next_free; i++) {
-    ShenandoahHeapRegion* region = *i;
-    live += region->getLiveData();
-  }
-  return live;
-}
-
-void ShenandoahHeapRegionSet::decrease_available(size_t num_bytes) {
-  assert(_available >= num_bytes, "can't use more than available");
-  _available -= num_bytes;
-  _used += num_bytes;
-}
-
-size_t ShenandoahHeapRegionSet::available() const {
-  assert(ShenandoahHeap::heap()->capacity() - ShenandoahHeap::heap()->used()>= _available, "must not be > heap free");
-  return _available;
-}
-
-size_t ShenandoahHeapRegionSet::used() const {
-  assert(ShenandoahHeap::heap()->used() >= _used, "must not be > heap used");
-  return _used;
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
-
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-
-
-class ShenandoahHeapRegionSet : public CHeapObj<mtGC> {
-private:
-  ShenandoahHeapRegion** _regions;
-  // current region to be returned from get_next()
-  ShenandoahHeapRegion** _current;
-  ShenandoahHeapRegion** _next;
-
-  // last inserted region.
-  ShenandoahHeapRegion** _next_free;
-  ShenandoahHeapRegion** _concurrent_next_free;
-
-  // Maximum size of the set.
-  const size_t _max_regions;
-
-  size_t _garbage_threshold;
-  size_t _free_threshold;
-
-  size_t _available;
-  size_t _used;
-
-  void choose_collection_set(ShenandoahHeapRegion** regions, size_t length);
-  void choose_collection_set_min_garbage(ShenandoahHeapRegion** regions, size_t length, size_t min_garbage);
-  void choose_free_set(ShenandoahHeapRegion** regions, size_t length);
-
-public:
-  ShenandoahHeapRegionSet(size_t max_regions);
-
-  ShenandoahHeapRegionSet(size_t max_regions, ShenandoahHeapRegion** regions, size_t num_regions);
-
-  ~ShenandoahHeapRegionSet();
-
-  void set_garbage_threshold(size_t minimum_garbage) { _garbage_threshold = minimum_garbage;}
-  void set_free_threshold(size_t minimum_free) { _free_threshold = minimum_free;}
-
-  /**
-   * Appends a region to the set. This is implemented to be concurrency-safe.
-   */
-  void append(ShenandoahHeapRegion* region);
-
-  void clear();
-
-  size_t length();
-  size_t used_regions() {
-    return _current - _regions;
-  }
-  size_t available_regions();
-  void print();
-
-  size_t garbage();
-  size_t used();
-  size_t live_data();
-  size_t reclaimed() {return _reclaimed;}
-
-  /**
-   * Returns a pointer to the current region.
-   */
-   ShenandoahHeapRegion* current();
-
-  /**
-   * Gets the next region for allocation (from free-list).
-   * If multiple threads are competing, one will succeed to
-   * increment to the next region, the others will fail and return
-   * the region that the succeeding thread got.
-   */
-  ShenandoahHeapRegion* get_next();
-
-  /**
-   * Claims next region for processing. This is implemented to be concurrency-safe.
-   */
-  ShenandoahHeapRegion* claim_next();
-
-  void choose_collection_and_free_sets(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set);
-  void choose_collection_and_free_sets_min_garbage(ShenandoahHeapRegionSet* col_set, ShenandoahHeapRegionSet* free_set, size_t min_garbage);
-
-  // Check for unreachable humongous regions and reclaim them.
-  void reclaim_humongous_regions();
-
-  void set_concurrent_iteration_safe_limits();
-
-  void decrease_available(size_t num_bytes);
-
-  size_t available() const;
-  size_t used() const;
-
-private:
-  void reclaim_humongous_region_at(ShenandoahHeapRegion** r);
-
-  ShenandoahHeapRegion** limit_region(ShenandoahHeapRegion** region);
-  size_t _reclaimed;
-
-};
-
-#endif //SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHumongous.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-
-/*
-Copyright 2015 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHUMONGOUS_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHHUMONGOUS_HPP
-
-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
-
-class ShenandoahHumongous : public AllStatic {
-
-public:
-  static uint required_regions(size_t bytes) {
-    return (bytes + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
-  }
-};
-
-#endif
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahJNICritical.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
-Copyright 2015 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "gc_implementation/shenandoah/shenandoahJNICritical.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-
-#include "gc/shared/gcLocker.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/vmThread.hpp"
-
-class VM_ShenandoahJNICriticalOperation : public VM_Operation {
-private:
-  VM_Operation* _target;
-public:
-  VM_ShenandoahJNICriticalOperation(VM_Operation* target);
-  VMOp_Type type() const;
-  bool doit_prologue();
-  void doit_epilogue();
-  void doit();
-  const char* name() const;
-};
-
-ShenandoahJNICritical::ShenandoahJNICritical() : _op_waiting_for_jni_critical(NULL) {
-}
-
-/*
- * This is called by the Java thread who leaves the last JNI critical block.
- */
-void ShenandoahJNICritical::notify_jni_critical() {
-  assert(Thread::current()->is_Java_thread(), "call only from Java thread");
-  assert(_op_waiting_for_jni_critical != NULL, "must be waiting for jni critical notification");  
-
-  MonitorLockerEx ml(ShenandoahJNICritical_lock, true);
-
-  VMThread::execute(_op_waiting_for_jni_critical);
-  _op_waiting_for_jni_critical = NULL;
-
-  ml.notify_all();
-
-}
-
-/*
- * This is called by the VM thread, if it determines that the task must wait
- * for JNI critical regions to be left.
- */
-void ShenandoahJNICritical::set_waiting_for_jni_before_gc(VM_Operation* op) {
-  assert(Thread::current()->is_VM_thread(), "call only from VM thread");
-  _op_waiting_for_jni_critical = op;
-}
-
-/**
- * This is called by the Shenandoah concurrent thread in order
- * to execute a VM_Operation on the VM thread, that needs to perform
- * a JNI critical region check.
- */
-void ShenandoahJNICritical::execute_in_vm_thread(VM_Operation* op) {
-  MonitorLockerEx ml(ShenandoahJNICritical_lock, true);
-  VM_ShenandoahJNICriticalOperation jni_op(op);
-  VMThread::execute(&jni_op);
-  while (_op_waiting_for_jni_critical != NULL) {
-    ml.wait(true);
-  }
-}
-
-
-VM_ShenandoahJNICriticalOperation::VM_ShenandoahJNICriticalOperation(VM_Operation* target)
-  : _target(target) {
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahJNICriticalOperation::type() const {
-  return _target->type();
-}
-
-const char* VM_ShenandoahJNICriticalOperation::name() const {
-  return _target->name();
-}
-
-bool VM_ShenandoahJNICriticalOperation::doit_prologue() {
-  return _target->doit_prologue();
-}
-
-void VM_ShenandoahJNICriticalOperation::doit_epilogue() {
-  _target->doit_epilogue();
-}
-
-void VM_ShenandoahJNICriticalOperation::doit() {
-  if (! GC_locker::check_active_before_gc()) {
-    _target->doit();
-  } else {
-
-    if (ShenandoahTraceJNICritical) {
-      gclog_or_tty->print_cr("Deferring JNI critical op because of active JNI critical regions");
-    }
-
-    // This makes the GC background thread wait, and kick off evacuation as
-    // soon as JNI notifies us that critical regions have all been left.
-    ShenandoahHeap *sh = ShenandoahHeap::heap();
-    sh->jni_critical()->set_waiting_for_jni_before_gc(this);
-  }
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahJNICritical.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-/*
-Copyright 2015 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
-
-#include "gc/shared/vmGCOperations.hpp"
-#include "memory/allocation.hpp"
-
-class ShenandoahJNICritical : public CHeapObj<mtGC> {
-private:
-  VM_Operation* _op_waiting_for_jni_critical;
-
-public:
-  ShenandoahJNICritical();
-  void notify_jni_critical();
-  void set_waiting_for_jni_before_gc(VM_Operation* op);
-  void execute_in_vm_thread(VM_Operation* op);
-};
-
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHJNICRITICAL_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,397 +0,0 @@
-/*
-  Copyright 2014 Red Hat, Inc. and/or its affiliates.
-*/
-
-#include "code/codeCache.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc_implementation/shenandoah/brooksPointer.hpp"
-#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
-#include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
-#include "gc/serial/markSweep.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/thread.hpp"
-#include "utilities/copy.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/workgroup.hpp"
-
-
-
-void ShenandoahMarkCompact::allocate_stacks() {
-  MarkSweep::_preserved_count_max = 0;
-  MarkSweep::_preserved_marks = NULL;
-  MarkSweep::_preserved_count = 0;
-}
-
-void ShenandoahMarkCompact::do_mark_compact() {
-  ShenandoahHeap* _heap = ShenandoahHeap::heap();
-
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  IsGCActiveMark is_active;
-
-  // if concgc gets cancelled between phases the bitmap doesn't get cleared up.
-  _heap->reset_mark_bitmap();
- 
-  assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
-  assert(_heap->is_bitmap_clear(), "require cleared bitmap");
-  assert(!_heap->concurrent_mark_in_progress(), "can't do full-GC while marking is in progress");
-  assert(!_heap->is_evacuation_in_progress(), "can't do full-GC while evacuation is in progress");
-  assert(!_heap->is_update_references_in_progress(), "can't do full-GC while updating of references is in progress");
-  BarrierSet* _old_barrier_set = oopDesc::bs();
-
-  oopDesc::set_bs(new ShenandoahMarkCompactBarrierSet());
- 
-  _heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::full_gc);
- 
-  // We need to clear the is_in_collection_set flag in all regions.
-  ShenandoahHeapRegion** regions = _heap->heap_regions();
-  size_t num_regions = _heap->num_regions();
-  for (size_t i = 0; i < num_regions; i++) {
-    regions[i]->set_is_in_collection_set(false);
-  }
-  _heap->clear_cset_fast_test();
-
-  if (ShenandoahVerify) {
-    // Full GC should only be called between regular concurrent cycles, therefore
-    // those verifications should be valid.
-    _heap->verify_heap_after_evacuation();
-    _heap->verify_heap_after_update_refs();
-  }
- 
-  if (ShenandoahTraceFullGC) {
-    gclog_or_tty->print_cr("Shenandoah-full-gc: start with heap used: "SIZE_FORMAT" MB", _heap->used() / M);
-    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 1: marking the heap");
-    // _heap->print_heap_regions();
-  }
- 
-  if (UseTLAB) {
-    _heap->accumulate_statistics_all_tlabs();
-    _heap->ensure_parsability(true);
-  }
-  
-  _heap->cleanup_after_cancelconcgc();
-  
-  ReferenceProcessor* rp = _heap->ref_processor();
- 
-  // hook up weak ref data so it can be used during Mark-Sweep
-  assert(MarkSweep::ref_processor() == NULL, "no stomping");
-  assert(rp != NULL, "should be non-NULL");
-  assert(rp == ShenandoahHeap::heap()->ref_processor(), "Precondition"); 
-  bool clear_all_softrefs = true;  //fixme
-  MarkSweep::_ref_processor = rp;
-  rp->setup_policy(clear_all_softrefs);
-
-  CodeCache::gc_prologue();
-  allocate_stacks();
-
-  // We should save the marks of the currently locked biased monitors.
-  // The marking doesn't preserve the marks of biased objects.
-  BiasedLocking::preserve_marks();
-
-  phase1_mark_heap();
- 
-  if (ShenandoahTraceFullGC) {
-    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 2: calculating target addresses");
-  }
-  phase2_calculate_target_addresses();
- 
-  if (ShenandoahTraceFullGC) {
-    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 3: updating references");
-  }
-
-  // Don't add any more derived pointers during phase3
-  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
-
-  phase3_update_references();
- 
-  if (ShenandoahTraceFullGC) {
-    gclog_or_tty->print_cr("Shenandoah-full-gc: phase 4: compacting objects");
-  }
-
-  phase4_compact_objects();
-
- 
-  MarkSweep::restore_marks();
-  BiasedLocking::restore_marks();
-  GenMarkSweep::deallocate_stacks();
-
-  CodeCache::gc_epilogue();
-  JvmtiExport::gc_epilogue();
-
-  // refs processing: clean slate
-  MarkSweep::_ref_processor = NULL;
-
- 
-  if (ShenandoahVerify) {
-    _heap->verify_heap_after_evacuation();
-    _heap->verify_heap_after_update_refs();
-  }
-
-  _heap->reset_mark_bitmap();
-
-  if (UseTLAB) {
-    _heap->resize_all_tlabs();
-  }
-
-  if (ShenandoahTraceFullGC) {
-    gclog_or_tty->print_cr("Shenandoah-full-gc: finish with heap used: "SIZE_FORMAT" MB", _heap->used() / M);
-  }
-
-  _heap->_bytesAllocSinceCM = 0;
-
-  oopDesc::set_bs(_old_barrier_set); 
-
-  _heap->set_need_update_refs(false);
-
-  _heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::full_gc);
-}
- 
-void ShenandoahMarkCompact::phase1_mark_heap() {
-  ShenandoahHeap* _heap = ShenandoahHeap::heap();
-  ReferenceProcessor* rp = _heap->ref_processor();
-
-  MarkSweep::_ref_processor = rp;
- 
-  // Need cleared claim bits for the roots processing
-  ClassLoaderDataGraph::clear_claimed_marks();
- 
-  MarkingCodeBlobClosure follow_code_closure(&MarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
-  {
-    ShenandoahRootProcessor rp(_heap, 1);
-    rp.process_strong_roots(&MarkSweep::follow_root_closure,
-			    &MarkSweep::follow_cld_closure,
-			    &follow_code_closure);
-  }
- 
- 
-  _heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::weakrefs);
-  bool clear_soft_refs = false; //fixme 
-  rp->setup_policy(clear_soft_refs);
- 
-  const ReferenceProcessorStats& stats =
-    rp->process_discovered_references(&MarkSweep::is_alive,
-				      &MarkSweep::keep_alive,
-				      &MarkSweep::follow_stack_closure,
-				      NULL,
-				      _heap->collector_policy()->conc_timer(),
-				      _heap->tracer()->gc_id());
- 
-  //     heap->tracer()->report_gc_reference_stats(stats);
- 
-  _heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::weakrefs);
- 
-  // Unload classes and purge the SystemDictionary.
-  bool purged_class = SystemDictionary::do_unloading(&MarkSweep::is_alive);
- 
-  // Unload nmethods.
-  CodeCache::do_unloading(&MarkSweep::is_alive, purged_class);
- 
-  // Prune dead klasses from subklass/sibling/implementor lists.
-  Klass::clean_weak_klass_links(&MarkSweep::is_alive);
- 
-  // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
-  _heap->unlink_string_and_symbol_table(&MarkSweep::is_alive);
- 
-  if (VerifyDuringGC) {
-    HandleMark hm;  // handle scope
-    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-    //    Universe::heap()->prepare_for_verify();
-    _heap->prepare_for_verify();
-    // Note: we can verify only the heap here. When an object is
-    // marked, the previous value of the mark word (including
-    // identity hash values, ages, etc) is preserved, and the mark
-    // word is set to markOop::marked_value - effectively removing
-    // any hash values from the mark word. These hash values are
-    // used when verifying the dictionaries and so removing them
-    // from the mark word can make verification of the dictionaries
-    // fail. At the end of the GC, the original mark word values
-    // (including hash values) are restored to the appropriate
-    // objects.
-    if (!VerifySilently) {
-      gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
-    }
-    //    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
-    _heap->verify(VerifySilently, VerifyOption_G1UseMarkWord);
-    if (!VerifySilently) {
-      gclog_or_tty->print_cr("]");
-    }
-  }
-}
- 
-class ShenandoahPrepareForCompaction : public ShenandoahHeapRegionClosure {
-  CompactPoint _cp;
-  ShenandoahHeap* _heap;
-  bool _dead_humongous;
-
-public:
-  ShenandoahPrepareForCompaction() :
-    _heap(ShenandoahHeap::heap()),
-    _dead_humongous(false) {
-  }
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    // We need to save the contents
-    if (!r->is_humongous()) {
-      if (_cp.space == NULL) {
-	_cp.space = r;
-	_cp.threshold = _heap->start_of_heap();
-      }
-      _dead_humongous = false;
-      r->prepare_for_compaction(&_cp);
-    }  else {
-      if (r->is_humongous_start()) {
-        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-	if (obj->is_gc_marked()) {
-	  obj->forward_to(obj);
-	  _dead_humongous = false;
-	} else {
-	  if (_cp.space == NULL) {
-	    _cp.space = r;
-	    _cp.threshold = _heap->start_of_heap();
-	  }
-	  _dead_humongous = true;
-	  r->reset();
-	}
-      } else {
-	assert(r->is_humongous_continuation(), "expect humongous continuation");
-	if (_dead_humongous) {
-	  r->reset();
-	}
-      }
-    }
-    return false;
-  }
-};
-  
-void ShenandoahMarkCompact::phase2_calculate_target_addresses() {
-  ShenandoahPrepareForCompaction prepare;
-  ShenandoahHeap::heap()->heap_region_iterate(&prepare);
-}
- 
-
-class ShenandoahMarkCompactAdjustPointersClosure : public ShenandoahHeapRegionClosure {
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if (r->is_humongous()) {
-      if (r->is_humongous_start()) {
-        // We must adjust the pointers on the single H object.
-        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-	assert(obj->is_gc_marked(), "should be marked");
-	// point all the oops to the new location
-	MarkSweep::adjust_pointers(obj);
-      }
-    } else {
-      r->adjust_pointers();
-    }
-    return false;
-  }
-};
-
-void ShenandoahMarkCompact::phase3_update_references() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
- 
-    // Need cleared claim bits for the roots processing
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  CodeBlobToOopClosure adjust_code_closure(&MarkSweep::adjust_pointer_closure,
-					   CodeBlobToOopClosure::FixRelocations);
-
-  {
-    ShenandoahRootProcessor rp(heap, 1);
-    rp.process_all_roots(&MarkSweep::adjust_pointer_closure,
-			 &MarkSweep::adjust_cld_closure,
-			 &adjust_code_closure);
-  }
-
-  assert(MarkSweep::ref_processor() == heap->ref_processor(), "Sanity");
-
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  heap->weak_roots_iterate(&MarkSweep::adjust_pointer_closure);
-
-  //  if (G1StringDedup::is_enabled()) {
-  //    G1StringDedup::oops_do(&MarkSweep::adjust_pointer_closure);
-  //  }
-
-  MarkSweep::adjust_marks();
-
-  ShenandoahMarkCompactAdjustPointersClosure apc;
-  heap->heap_region_iterate(&apc);
-}
-
-class ShenandoahCleanupObjectClosure : public ObjectClosure {
-  void  do_object(oop p) {
-    ShenandoahHeap::heap()->initialize_brooks_ptr(p);
-  }
-};
-
-class CompactObjectsClosure : public ShenandoahHeapRegionClosure {
-
-public:
-
-  CompactObjectsClosure() {
-  }
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if (r->is_humongous()) {
-      if (r->is_humongous_start()) {
-        oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-	assert(obj->is_gc_marked(), "expect marked humongous object");
-	obj->init_mark();
-      }
-    } else {
-      r->compact();
-    }
-
-    return false;
-  }
-
-};
-
-class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
-  size_t _live;
-  ShenandoahHeap* _heap;
-public:
-
-  ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) { 
-    _heap->clear_free_regions();
-  }
-
-  bool doHeapRegion(ShenandoahHeapRegion* r) {
-    if (r->is_humongous()) {
-      if (r->is_humongous_start()) {
-	oop obj = oop(r->bottom() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE);
-	size_t size = obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
-      }
-      _live += ShenandoahHeapRegion::RegionSizeBytes;
-
-    } else {
-      size_t live = r->used();
-      if (live == 0) _heap->add_free_region(r);
-      r->setLiveData(live);
-      _live += live;
-    }
-
-    return false;
-  }
-  
-  size_t getLive() { return _live;}
-
-};
-
-void ShenandoahMarkCompact::phase4_compact_objects() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  CompactObjectsClosure coc;
-  heap->heap_region_iterate(&coc);
-  
-  ShenandoahCleanupObjectClosure cleanup;
-  heap->object_iterate(&cleanup);
-
-  ShenandoahPostCompactClosure post_compact;
-  heap->heap_region_iterate(&post_compact);
-
-  heap->set_used(post_compact.getLive());
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
-
-#include "gc/serial/genMarkSweep.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-
-class HeapWord;
-class ShenandoahMarkCompactBarrierSet;
-
-/**
- * This implements full-GC (e.g. when invoking System.gc() ) using a
- * mark-compact algorithm. It's implemented in four phases:
- *
- * 1. Mark all live objects of the heap by traversing objects starting at GC roots.
- * 2. Calculate the new location of each live object. This is done by sequentially scanning
- *    the heap, keeping track of a next-location-pointer, which is then written to each
- *    object's brooks ptr field.
- * 3. Update all references. This is implemented by another scan of the heap, and updates
- *    all references in live objects by what's stored in the target object's brooks ptr.
- * 3. Compact the heap by copying all live objects to their new location.
- */
-
-class ShenandoahMarkCompact: AllStatic {
-
-public:
-
-  static void do_mark_compact();
-
-private:
-
-  static void phase1_mark_heap();
-  static void phase2_calculate_target_addresses();
-  static void phase3_update_references();
-  static void phase4_compact_objects();
-  static void finish_compaction(HeapWord* last_addr);
-
-  static void allocate_stacks();
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,118 +0,0 @@
-#include "precompiled.hpp"
-
-#include "classfile/stringTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
-#include "memory/allocation.inline.hpp"
-#include "runtime/fprofiler.hpp"
-#include "runtime/mutex.hpp"
-#include "services/management.hpp"
-
-ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers) :
-  _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
-  _srs(n_workers)
-{
-}
-
-void ShenandoahRootProcessor::process_roots(OopClosure* strong_oops,
-					    OopClosure* weak_oops,
-					    CLDClosure* strong_clds,
-					    CLDClosure* weak_clds,
-					    CLDClosure* thread_stack_clds,
-					    CodeBlobClosure* strong_code) {
-  process_java_roots(strong_oops, thread_stack_clds, strong_clds, weak_clds, strong_code, 0);
-  process_vm_roots(strong_oops, weak_oops, 0);
-  
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_CodeCache_oops_do)) {
-    CodeCache::blobs_do(strong_code);
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) {
-    ShenandoahAlwaysTrueClosure always_true;
-    JNIHandles::weak_oops_do(&always_true, weak_oops);
-  }
-
-  _process_strong_tasks->all_tasks_completed(n_workers());
-}
-
-void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops,
-                                           CLDClosure* clds,
-                                           CodeBlobClosure* blobs) {
-
-  process_java_roots(oops, clds, clds, NULL, blobs, 0);
-  process_vm_roots(oops, NULL, 0);
-
-  _process_strong_tasks->all_tasks_completed(n_workers());
-}
-
-void ShenandoahRootProcessor::process_all_roots(OopClosure* oops,
-                                        CLDClosure* clds,
-                                        CodeBlobClosure* blobs) {
-
-  process_java_roots(oops, NULL, clds, clds, NULL, 0);
-  process_vm_roots(oops, oops, 0);
-
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_CodeCache_oops_do)) {
-    CodeCache::blobs_do(blobs);
-  }
-
-  _process_strong_tasks->all_tasks_completed(n_workers());
-}
-
-void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots,
-                                                 CLDClosure* thread_stack_clds,
-                                                 CLDClosure* strong_clds,
-                                                 CLDClosure* weak_clds,
-                                                 CodeBlobClosure* strong_code,
-                                                 uint worker_i)
-{
-  //assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
-  // Iterating over the CLDG and the Threads are done early to allow us to
-  // first process the strong CLDs and nmethods and then, after a barrier,
-  // let the thread process the weak CLDs and nmethods.
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_ClassLoaderDataGraph_oops_do)) {
-    ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
-  }
-
-  bool is_par = n_workers() > 1;
-  ResourceMark rm;
-  Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
-}
-
-void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots,
-                                               OopClosure* weak_roots,
-                                               uint worker_i)
-{
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Universe_oops_do)) {
-    Universe::oops_do(strong_roots);
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_oops_do)) {
-    JNIHandles::oops_do(strong_roots);
-  }
-  if (!_process_strong_tasks-> is_task_claimed(SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do)) {
-    ObjectSynchronizer::oops_do(strong_roots);
-  }
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_FlatProfiler_oops_do)) {
-    FlatProfiler::oops_do(strong_roots);
-  }
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Management_oops_do)) {
-    Management::oops_do(strong_roots);
-  }
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_jvmti_oops_do)) {
-    JvmtiExport::oops_do(strong_roots);
-  }
-  if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) {
-    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
-  }
-  // All threads execute the following. A specific chunk of buckets
-  // from the StringTable are the individual tasks.
-  if (weak_roots != NULL) {
-    StringTable::possibly_parallel_oops_do(weak_roots);
-  }
-}
-
-uint ShenandoahRootProcessor::n_workers() const {
-  return _srs.n_threads();
-}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-
-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
-
-#include "gc/shared/strongRootsScope.hpp"
-#include "memory/allocation.hpp"
-#include "runtime/mutex.hpp"
-
-class CLDClosure;
-class CodeBlobClosure;
-class G1CollectedHeap;
-class G1GCPhaseTimes;
-class G1ParPushHeapRSClosure;
-class Monitor;
-class OopClosure;
-class SubTasksDone;
-
-class ShenandoahRootProcessor : public StackObj {
-  SubTasksDone* _process_strong_tasks;
-  StrongRootsScope _srs;
-
-  enum Shenandoah_process_roots_tasks {
-    SHENANDOAH_RP_PS_Universe_oops_do,
-    SHENANDOAH_RP_PS_JNIHandles_oops_do,
-    SHENANDOAH_RP_PS_JNIHandles_weak_oops_do,
-    SHENANDOAH_RP_PS_ObjectSynchronizer_oops_do,
-    SHENANDOAH_RP_PS_FlatProfiler_oops_do,
-    SHENANDOAH_RP_PS_Management_oops_do,
-    SHENANDOAH_RP_PS_SystemDictionary_oops_do,
-    SHENANDOAH_RP_PS_ClassLoaderDataGraph_oops_do,
-    SHENANDOAH_RP_PS_jvmti_oops_do,
-    SHENANDOAH_RP_PS_CodeCache_oops_do,
-    SHENANDOAH_RP_PS_filter_satb_buffers,
-    SHENANDOAH_RP_PS_refProcessor_oops_do,
-    // Leave this one last.
-    SHENANDOAH_RP_PS_NumElements
-  };
-
-  void process_java_roots(OopClosure* scan_non_heap_roots,
-                          CLDClosure* thread_stack_clds,
-                          CLDClosure* scan_strong_clds,
-                          CLDClosure* scan_weak_clds,
-                          CodeBlobClosure* scan_strong_code,
-                          uint worker_i);
-
-  void process_vm_roots(OopClosure* scan_non_heap_roots,
-                        OopClosure* scan_non_heap_weak_roots,
-                        uint worker_i);
-
-public:
-  ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers);
-
-  void process_roots(OopClosure* strong_oops,
-		     OopClosure* weak_oops,
-		     CLDClosure* strong_clds,
-		     CLDClosure* weak_clds,
-		     CLDClosure* thread_stack_clds,
-		     CodeBlobClosure* strong_code);
-
-  // Apply oops, clds and blobs to all strongly reachable roots in the system
-  void process_strong_roots(OopClosure* oops,
-                            CLDClosure* clds,
-                            CodeBlobClosure* blobs);
-
-  // Apply oops, clds and blobs to strongly and weakly reachable roots in the system
-  void process_all_roots(OopClosure* oops,
-                         CLDClosure* clds,
-                         CodeBlobClosure* blobs);
-
-  // Number of worker threads used by the root processor.
-  uint n_workers() const;
-};
-
-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,19 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-
-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "oops/oop.inline.hpp"
-
-JRT_LEAF(bool, ShenandoahRuntime::compare_and_swap_object(HeapWord* addr, oopDesc* newval, oopDesc* old))
-  bool success;
-  oop expected;
-  do {
-    expected = old;
-    old = oopDesc::atomic_compare_exchange_oop(newval, addr, expected, true);
-    success  = (old == expected);
-  } while ((! success) && oopDesc::bs()->resolve_oop(old) == oopDesc::bs()->resolve_oop(expected));
-
-  return success;
-JRT_END
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHRUNTIME_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHRUNTIME_HPP
-
-#include "oops/oop.hpp"
-
-class ShenandoahRuntime : AllStatic {
-public:
-  static bool compare_and_swap_object(HeapWord* adr, oopDesc* newval, oopDesc* expected);
-};
-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHRUNTIME_HPP
--- a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,244 +0,0 @@
-/*
-  Copyright 2014 Red Hat, Inc. and/or its affiliates.
-*/
-#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
-#include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
-
-VM_Operation::VMOp_Type VM_ShenandoahInitMark::type() const {
-  return VMOp_ShenandoahInitMark;
-}
-
-const char* VM_ShenandoahInitMark::name() const {
-  return "Shenandoah Initial Marking";
-}
-
-void VM_ShenandoahInitMark::doit() {
-  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark);
-
-  if (sh->need_reset_bitmaps()) {
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
-    sh->reset_mark_bitmap();
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
-  }
-
-  assert(sh->is_bitmap_clear(), "need clear marking bitmap");
-
-  sh->set_need_reset_bitmaps(true);
-
-  if (ShenandoahGCVerbose)
-    tty->print("vm_ShenandoahInitMark\n");
-  sh->start_concurrent_marking();
-
-  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark);
-
-  if (! ShenandoahConcurrentMarking) {
-    sh->concurrentMark()->mark_from_roots();
-    VM_ShenandoahStartEvacuation finishMark;
-    finishMark.doit();
-  }
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahFullGC::type() const {
-  return VMOp_ShenandoahFullGC;
-}
-
-void VM_ShenandoahFullGC::doit() {
-
-  ShenandoahMarkCompact::do_mark_compact();
-
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::resize_tlabs);
-  sh->resize_all_tlabs();
-  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::resize_tlabs);
-}
-
-const char* VM_ShenandoahFullGC::name() const {
-  return "Shenandoah Full GC";
-}
-
-
-bool VM_ShenandoahReferenceOperation::doit_prologue() {
-  ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
-  sh->acquire_pending_refs_lock();
-  return true;
-}
-
-void VM_ShenandoahReferenceOperation::doit_epilogue() {
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  sh->release_pending_refs_lock();
-}
-
-void VM_ShenandoahStartEvacuation::doit() {
-
-  // We need to do the finish mark here, so that a JNI critical region
-  // can't divide it from evacuation start. It is critical that we
-  // evacuate roots right after finishing marking, so that we don't
-  // get unmarked objects in the roots.
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  if (!sh->cancelled_concgc()) {
-    if (ShenandoahGCVerbose)
-      tty->print("vm_ShenandoahFinalMark\n");
-
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark);
-    sh->concurrentMark()->finish_mark_from_roots();
-    sh->stop_concurrent_marking();
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark);
-
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::prepare_evac);
-    sh->prepare_for_concurrent_evacuation();
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::prepare_evac);
-
-    if (!sh->cancelled_concgc()){
-      sh->set_evacuation_in_progress(true);
-
-      // From here on, we need to update references.
-      sh->set_need_update_refs(true);
-
-      if (! ShenandoahConcurrentEvacuation) {
-	VM_ShenandoahEvacuation evacuation;
-	evacuation.doit();
-      } else {
-	if (!sh->cancelled_concgc()) {
-	  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_evac);
-	  sh->evacuate_and_update_roots();
-	  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_evac);
-	}
-      }
-    } else {
-      sh->free_regions()->set_concurrent_iteration_safe_limits();
-      //      sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::prepare_evac);
-      //      sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark);
-    }
-  } else {
-    sh->concurrentMark()->cancel();
-    sh->stop_concurrent_marking();
-  }    
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahStartEvacuation::type() const {
-  return VMOp_ShenandoahStartEvacuation;
-}
-
-const char* VM_ShenandoahStartEvacuation::name() const {
-  return "Start shenandoah evacuation";
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahVerifyHeapAfterEvacuation::type() const {
-  return VMOp_ShenandoahVerifyHeapAfterEvacuation;
-}
-
-const char* VM_ShenandoahVerifyHeapAfterEvacuation::name() const {
-  return "Shenandoah verify heap after evacuation";
-}
-
-void VM_ShenandoahVerifyHeapAfterEvacuation::doit() {
-
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  sh->verify_heap_after_evacuation();
-
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahEvacuation::type() const {
-  return VMOp_ShenandoahEvacuation;
-}
-
-const char* VM_ShenandoahEvacuation::name() const {
-  return "Shenandoah evacuation";
-}
-
-void VM_ShenandoahEvacuation::doit() {
-  if (ShenandoahGCVerbose)
-    tty->print("vm_ShenandoahEvacuation\n");
-
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  sh->do_evacuation();
-
-  if (! ShenandoahConcurrentUpdateRefs) {
-    assert(! ShenandoahConcurrentEvacuation, "turn off concurrent evacuation");
-    sh->prepare_for_update_references();
-    sh->update_references();
-  }
-}
-/*
-  VM_Operation::VMOp_Type VM_ShenandoahVerifyHeapAfterUpdateRefs::type() const {
-  return VMOp_ShenandoahVerifyHeapAfterUpdateRefs;
-  }
-
-  const char* VM_ShenandoahVerifyHeapAfterUpdateRefs::name() const {
-  return "Shenandoah verify heap after updating references";
-  }
-
-  void VM_ShenandoahVerifyHeapAfterUpdateRefs::doit() {
-
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  sh->verify_heap_after_update_refs();
-
-  }
-*/
-VM_Operation::VMOp_Type VM_ShenandoahUpdateRootRefs::type() const {
-  return VMOp_ShenandoahUpdateRootRefs;
-}
-
-const char* VM_ShenandoahUpdateRootRefs::name() const {
-  return "Shenandoah update root references";
-}
-
-void VM_ShenandoahUpdateRootRefs::doit() {
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  if (! sh->cancelled_concgc()) {
-
-    if (ShenandoahGCVerbose)
-      tty->print("vm_ShenandoahUpdateRootRefs\n");
-
-
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_uprefs);
-
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
-
-    sh->update_roots();
-
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
-
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_uprefs);
-  }
-
-  sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::recycle_regions);
-  sh->recycle_dirty_regions();
-  sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::recycle_regions);
-
-  if (ShenandoahVerify && ! sh->cancelled_concgc()) {
-    sh->verify_heap_after_update_refs();
-    sh->verify_regions_after_update_refs();
-  }
-#ifdef ASSERT
-  if (! ShenandoahVerify) {
-    assert(sh->is_bitmap_clear(), "need cleared bitmap here");
-  }
-#endif
-
-}
-
-VM_Operation::VMOp_Type VM_ShenandoahUpdateRefs::type() const {
-  return VMOp_ShenandoahUpdateRefs;
-}
-
-const char* VM_ShenandoahUpdateRefs::name() const {
-  return "Shenandoah update references";
-}
-
-void VM_ShenandoahUpdateRefs::doit() {
-  ShenandoahHeap *sh = ShenandoahHeap::heap();
-  if (!sh->cancelled_concgc()) {
-
-    if (ShenandoahGCVerbose)
-      tty->print("vm_ShenandoahUpdateRefs\n");
-    
-    sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_evac);
-    sh->set_evacuation_in_progress(false);
-    sh->prepare_for_update_references();
-    assert(ShenandoahConcurrentUpdateRefs, "only do this when concurrent update references is turned on");
-    sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_evac);
-  }
-}
--- a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
-Copyright 2014 Red Hat, Inc. and/or its affiliates.
- */
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
-
-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-
-// VM_operations for the Shenandoah Collector.
-// For now we are just doing two pauses.  The initial marking pause, and the final finish up marking and perform evacuation pause.
-//    VM_ShenandoahInitMark
-//    VM_ShenandoahFinishMark
-
-class VM_ShenandoahInitMark: public VM_Operation {
-  
-public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-};
-
-class VM_ShenandoahReferenceOperation : public VM_Operation {
-  bool doit_prologue();
-  void doit_epilogue();
-
-};
-
-class VM_ShenandoahStartEvacuation: public VM_ShenandoahReferenceOperation {
-
- public:
-  VMOp_Type type() const;
-  void doit();
-  const char* name() const;
-
-};
-
-class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation {
- public:
-  VMOp_Type type() const;
-  void doit();
-  const char* name() const;
-};
-
-class VM_ShenandoahVerifyHeapAfterEvacuation: public VM_Operation {
-
- public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-
-};
-
-class VM_ShenandoahEvacuation: public VM_Operation {
-
- public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-
-};
-
-/*
-class VM_ShenandoahVerifyHeapAfterUpdateRefs: public VM_Operation {
-
- public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-
-};
-*/
-class VM_ShenandoahUpdateRootRefs: public VM_Operation {
-
- public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-
-};
-
-class VM_ShenandoahUpdateRefs: public VM_Operation {
-
- public:
-  virtual VMOp_Type type() const;
-  virtual void doit();
-
-  virtual const char* name() const;
-
-};
-
-#endif //SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP
--- a/src/share/vm/memory/universe.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/memory/universe.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -74,8 +74,8 @@
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 #include "gc/cms/cmsCollectorPolicy.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy_ext.hpp"
--- a/src/share/vm/opto/graphKit.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/opto/graphKit.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -24,8 +24,8 @@
 
 #include "precompiled.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
--- a/src/share/vm/opto/library_call.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/opto/library_call.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -28,8 +28,8 @@
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahRuntime.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "opto/addnode.hpp"
 #include "opto/arraycopynode.hpp"
--- a/src/share/vm/opto/runtime.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/opto/runtime.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -35,7 +35,7 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/compilerOracle.hpp"
 #include "compiler/oopMap.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
--- a/src/share/vm/runtime/arguments.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/runtime/arguments.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -52,7 +52,7 @@
 #include "utilities/macros.hpp"
 #include "utilities/stringUtils.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
--- a/src/share/vm/runtime/safepoint.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/runtime/safepoint.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -56,7 +56,7 @@
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
+#include "gc/shenandoah/shenandoahConcurrentThread.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/g1/suspendibleThreadSet.hpp"
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/runtime/thread.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/runtime/thread.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -93,7 +93,7 @@
 #include "utilities/macros.hpp"
 #include "utilities/preserveException.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
+#include "gc/shenandoah/shenandoahConcurrentThread.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/parallel/pcTasks.hpp"
--- a/src/share/vm/services/memoryService.cpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/services/memoryService.cpp	Wed Aug 19 23:00:20 2015 +0200
@@ -46,7 +46,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
--- a/src/share/vm/services/shenandoahMemoryPool.hpp	Wed Aug 19 20:37:30 2015 +0200
+++ b/src/share/vm/services/shenandoahMemoryPool.hpp	Wed Aug 19 23:00:20 2015 +0200
@@ -7,7 +7,7 @@
 #define SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP
 
 #ifndef SERIALGC
-#include "gc_implementation/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
 #include "services/memoryPool.hpp"
 #include "services/memoryUsage.hpp"
 #endif