changeset 5367:54d28edc92cd concurrent

First pass at concurrent evacuation
author cflood
date Thu, 17 Oct 2013 15:28:20 -0400
parents b77db1b4cde3
children de2c5fe7fc51 eb6b641f7b41
files src/share/vm/gc_implementation/shenandoah/brooksPointer.cpp src/share/vm/gc_implementation/shenandoah/brooksPointer.hpp src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp src/share/vm/memory/barrierSet.hpp src/share/vm/oops/objArrayKlass.cpp src/share/vm/oops/objArrayOop.hpp src/share/vm/oops/oop.inline.hpp src/share/vm/prims/jvm.cpp src/share/vm/prims/unsafe.cpp src/share/vm/runtime/synchronizer.cpp
diffstat 19 files changed, 511 insertions(+), 107 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/shenandoah/brooksPointer.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/brooksPointer.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -31,4 +31,15 @@
 
 void BrooksPointer::set_forwardee(oop forwardee) {
   *heap_word = (*heap_word & AGE_MASK) | ((uintptr_t) forwardee & FORWARDEE_MASK);
+  //  tty->print("setting_forwardee to %p = %p\n", forwardee, *heap_word);
 }
+
+HeapWord* BrooksPointer::cas_forwardee(HeapWord* old, HeapWord* forwardee) {
+  HeapWord* o = (HeapWord*) ((*heap_word & AGE_MASK) | ((uintptr_t) forwardee & FORWARDEE_MASK));
+  HeapWord* n = (HeapWord*) ((*heap_word & AGE_MASK) | ((uintptr_t) old & FORWARDEE_MASK));
+	
+  tty->print("Attempting to CAS %p from %p to %p\n", heap_word, o, n);
+  return (HeapWord*) ((uintptr_t) Atomic::cmpxchg_ptr(o, heap_word, n) & FORWARDEE_MASK);
+}
+					 
+  
--- a/src/share/vm/gc_implementation/shenandoah/brooksPointer.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/brooksPointer.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -23,6 +23,7 @@
 
   oop get_forwardee();
   void set_forwardee(oop forwardee);
+  HeapWord* cas_forwardee(HeapWord* old, HeapWord* forwardee);
 
   static BrooksPointer get(oop obj);
 };
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -170,6 +170,14 @@
 template <class T>
 void ShenandoahBarrierSet::write_ref_field_pre_static(T* field, oop newVal) {
   T heap_oop = oopDesc::load_heap_oop(field);
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+    if (sh->is_in(field) && 
+	sh->heap_region_containing((HeapWord*)field)->is_in_collection_set()){
+      tty->print("field = %p\n", field);
+      sh->heap_region_containing((HeapWord*)field)->print();
+      assert(false, "We should have fixed this earlier");   
+    }   
+
   if (!oopDesc::is_null(heap_oop)) {
     G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop(heap_oop));
     // tty->print("write_ref_field_pre_static: v = "PTR_FORMAT" o = "PTR_FORMAT" old: %p\n", field, newVal, heap_oop);
@@ -236,9 +244,22 @@
 
 oopDesc* ShenandoahBarrierSet::get_shenandoah_forwardee(oopDesc* p) {
   oop result = get_shenandoah_forwardee_helper(p);
-  // We should never be forwarded more than once.
-  assert(get_shenandoah_forwardee_helper(result) == result, "Only one fowarding per customer");  
-  return result;
+    if (result != p) {
+      oop second_forwarding = get_shenandoah_forwardee_helper(result);
+
+      // We should never be forwarded more than once.
+      if (result != second_forwarding) {
+	ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+	tty->print("first reference %p is in heap region:\n", p);
+	sh->heap_region_containing(p)->print();
+	tty->print("first_forwarding %p is in heap region:\n", result);
+	sh->heap_region_containing(result)->print();
+	tty->print("final reference %p is in heap region:\n", second_forwarding);
+	sh->heap_region_containing(second_forwarding)->print();
+	assert(get_shenandoah_forwardee_helper(result) == result, "Only one fowarding per customer");  
+      }
+    }
+    return result;
 }
 
 
@@ -272,6 +293,35 @@
   }
 }
 
+oopDesc* ShenandoahBarrierSet::resolve_and_maybe_copy_oopHelper(oopDesc* src) {
+    if (src != NULL) {
+      oopDesc* tmp = get_shenandoah_forwardee(src);
+      ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
+      if (sh->heap_region_containing(tmp)->is_in_collection_set()) {
+	oopDesc* dst = sh->evacuate_object(tmp);
+	tty->print("src = %p dst = %p tmp = %p src-2 = %p\n",
+		   src, dst, tmp, src-2);
+	assert(sh->is_in(dst), "result should be in the heap");
+	return dst;
+      } else {
+	return src;
+      }
+    } else {
+      return NULL;
+    }
+}
+
+oopDesc* ShenandoahBarrierSet::resolve_and_maybe_copy_oop(oopDesc* src) {
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
+    if (src != NULL && sh->is_in(src)) {
+      oopDesc* result = resolve_and_maybe_copy_oopHelper(src);
+      assert(sh->is_in(result), "result should be in the heap");
+      return result;
+    } else {
+      return src;
+    }
+  }
+
 
 #ifndef CC_INTERP
 // TODO: The following should really live in an X86 specific subclass.
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -50,14 +50,51 @@
 
   void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
 
+<<<<<<< local
+  template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
+    T heap_oop = oopDesc::load_heap_oop(field);
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();
+    if (sh->is_in(field) && 
+	sh->heap_region_containing((HeapWord*)field)->is_in_collection_set()){
+      tty->print("field = %p\n", field);
+      sh->heap_region_containing((HeapWord*)field)->print();
+      assert(false, "We should have fixed this earlier");   
+    }   
+  
+    if (!oopDesc::is_null(heap_oop)) {
+	G1SATBCardTableModRefBS::enqueue(oopDesc::decode_heap_oop(heap_oop));
+      // tty->print("write_ref_field_pre_static: v = "PTR_FORMAT" o = "PTR_FORMAT" old: %p\n", field, newVal, heap_oop);
+    }
+  }
+
+  // We export this to make it available in cases where the static
+  // type of the barrier set is known.  Note that it is non-virtual.
+  template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
+    write_ref_field_pre_static(field, newVal);
+    
+  }
+=======
   template <class T> static void write_ref_field_pre_static(T* field, oop newVal);
+>>>>>>> other
 
   // These are the more general virtual versions.
   void write_ref_field_pre_work(oop* field, oop new_val);
   void write_ref_field_pre_work(narrowOop* field, oop new_val);
   void write_ref_field_pre_work(void* field, oop new_val);
 
+<<<<<<< local
+
+  void write_ref_field_work(void* v, oop o){
+    if (!JavaThread::satb_mark_queue_set().is_active()) return;
+    assert (! UseCompressedOops, "compressed oops not supported yet");
+    enqueue_update_ref((oop*) v);
+    
+    // tty->print("write_ref_field_work: v = "PTR_FORMAT" o = "PTR_FORMAT"\n", v, o);
+  }
+
+=======
   void write_ref_field_work(void* v, oop o);
+>>>>>>> other
   void write_region_work(MemRegion mr);
 
   oopDesc* get_shenandoah_forwardee(oopDesc* p);
@@ -66,9 +103,92 @@
 
   static bool has_brooks_ptr(oopDesc* p);
 
+<<<<<<< local
+  inline oopDesc* get_shenandoah_forwardee(oopDesc* p) {
+    oop result = get_shenandoah_forwardee_helper(p);
+
+    if (result != p) {
+      oop second_forwarding = get_shenandoah_forwardee_helper(result);
+
+      // We should never be forwarded more than once.
+      if (result != second_forwarding) {
+	ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
+	tty->print("first reference %p is in heap region:\n", p);
+	sh->heap_region_containing(p)->print();
+	tty->print("first_forwarding %p is in heap region:\n", result);
+	sh->heap_region_containing(result)->print();
+	tty->print("final reference %p is in heap region:\n", second_forwarding);
+	sh->heap_region_containing(second_forwarding)->print();
+	assert(get_shenandoah_forwardee_helper(result) == result, "Only one fowarding per customer");  
+      }
+    }
+    return result;
+  }
+=======
   virtual oopDesc* resolve_oop(oopDesc* src);
+>>>>>>> other
+
+<<<<<<< local
+  static bool is_brooks_ptr(oopDesc* p) {
+    if (p->has_displaced_mark())
+      return false;
+
+    return p->mark()->age()==15;
+  }
+
+  static bool has_brooks_ptr(oopDesc* p) {
+    return is_brooks_ptr(oop(((HeapWord*) p) - BROOKS_POINTER_OBJ_SIZE));
+  }
+
+  virtual oopDesc* resolve_oop(oopDesc* src) {
 
+    if (src != NULL) {
+      return get_shenandoah_forwardee(src);
+    } else {
+      return NULL;
+    }
+  }
+
+  virtual oopDesc* maybe_resolve_oop(oopDesc* src) {
+    if (Universe::heap()->is_in(src)) {
+      return get_shenandoah_forwardee(src);
+    } else {
+      return src;
+    }
+  }
+=======
   virtual oopDesc* maybe_resolve_oop(oopDesc* src);
+>>>>>>> other
+
+
+  virtual oopDesc* resolve_and_maybe_copy_oopHelper(oopDesc* src) {
+    if (src != NULL) {
+      oopDesc* tmp = get_shenandoah_forwardee(src);
+      ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
+      if (sh->heap_region_containing(tmp)->is_in_collection_set()) {
+	oopDesc* dst = sh->evacuate_object(tmp);
+	tty->print("src = %p dst = %p tmp = %p src-2 = %p\n",
+		   src, dst, tmp, src-2);
+	assert(sh->is_in(dst), "result should be in the heap");
+	return dst;
+      } else {
+	return src;
+      }
+    } else {
+      return NULL;
+    }
+  }
+
+  virtual oopDesc* resolve_and_maybe_copy_oop(oopDesc* src) {
+    ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap();      
+    if (src != NULL && sh->is_in(src)) {
+      oopDesc* result = resolve_and_maybe_copy_oopHelper(src);
+      assert(sh->is_in(result), "result should be in the heap");
+      return result;
+    } else {
+      return src;
+    }
+  }
 
   void enqueue_update_ref(oop* ref);
 
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -12,7 +12,7 @@
   _concurrent_mark_started(false),
   _concurrent_mark_in_progress(false)
 {
-  create_and_start();
+  //  create_and_start();
 }
 
 class SCMCheckpointRootsFinalClosure : public VoidClosure {
@@ -29,46 +29,37 @@
   initialize_in_thread();
   tty->print("Starting to run %s with number %ld YAY!!!, %s\n", name(), os::current_thread_id());
   wait_for_universe_init();
-  ShenandoahHeap* sh = ShenandoahHeap::heap();
-  ShenandoahCollectorPolicy* sh_policy = sh->collector_policy();
-  ShenandoahConcurrentMark* scm = sh->concurrentMark();
-  Thread* current_thread = Thread::current();
-  clear_cm_aborted();
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+  size_t targetStartMarking = heap->capacity() / 64;
+  size_t targetBytesAllocated = ShenandoahHeapRegion::RegionSizeBytes;
+
 
   while (!_should_terminate) {
-    sleepBeforeNextCycle();
-    {
-      ResourceMark rm;
-      HandleMark hm;
-      
-      if (!cm_has_aborted()) {
-	scm->scanRootRegions();
-      }
+    if (heap->used() > targetStartMarking && heap->_bytesAllocSinceCM > targetBytesAllocated) {
+      heap->_bytesAllocSinceCM = 0;
+      if (ShenandoahGCVerbose) 
+        tty->print("Capacity = "SIZE_FORMAT" Used = "SIZE_FORMAT" Target = "SIZE_FORMAT" doing initMark\n", heap->capacity(), heap->used(), targetStartMarking);
+ 
+      if (ShenandoahGCVerbose) tty->print("Starting a mark");
+
+      VM_ShenandoahInitMark initMark;
+      VMThread::execute(&initMark);
 
-      if (!cm_has_aborted()) {
-         scm->markFromRoots();
-      } 
-      SCMCheckpointRootsFinalClosure final_cl(scm);
-      /*
-      VM_ShenandoahFinal op;
-      VMThread::execute(&op);
-      */
-      }
-   }
+      ShenandoahHeap::heap()->concurrentMark()->markFromRoots();
+
+      VM_ShenandoahFinishMark finishMark;
+      VMThread::execute(&finishMark);
+
+      ShenandoahHeap::heap()->parallel_evacuate();
+
+    } else {
+      yield();
+    }
+  }
 }
 
-ShenandoahConcurrentThread* ShenandoahConcurrentThread::start() {
-  ShenandoahConcurrentThread* th = new ShenandoahConcurrentThread();
-  return th;
-}
 
-void ShenandoahConcurrentThread::stop() {
-  tty->print("Attempt to stop concurrentThread");
-}
-
-void ShenandoahConcurrentThread::create_and_start() {
-}
-    
 void ShenandoahConcurrentThread::print() const {
   print_on(tty);
 }
@@ -111,3 +102,10 @@
   return _concurrent_mark_in_progress;  
 }
 
+void ShenandoahConcurrentThread::start() {
+  create_and_start();
+}
+
+void ShenandoahConcurrentThread::yield() {
+  _sts.yield("Concurrent Mark");
+}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -54,12 +54,8 @@
   bool during_cycle()      { return cm_started() || cm_in_progress(); }
 
   char* name() const { return (char*)"ShenandoahConcurrentThread";}
-
-  // shutdown
-  void stop();
-  void create_and_start();
-  ShenandoahConcurrentThread* start();
-  
+  void start();
+  void yield();
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -14,11 +14,9 @@
 
 void printHeapLocations(HeapWord* start, HeapWord* end) {
   HeapWord* cur = NULL;
-  int *val = NULL;
   for (cur = start; cur < end; cur++) {
-      val = (int *) cur;
-      tty->print(PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
-    }
+    tty->print("%p : %p \n", cur, *cur);
+  }
 }
 
 void printHeapObjects(HeapWord* start, HeapWord* end) {
@@ -37,8 +35,9 @@
   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
 
   bool doHeapRegion(ShenandoahHeapRegion* r) {
-    _st->print("Region %d bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT" used = %x free = %x live = %x dirty: %d\n", 
-	       r->regionNumber, r->bottom(), r->end(), r->top(), r->used(), r->free(), r->getLiveData(), r->is_dirty());
+    //  _st->print("Region %d bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT" used = %x free = %x live = %x dirty: %d\n", 
+    //	       r->regionNumber, r->bottom(), r->end(), r->top(), r->used(), r->free(), r->getLiveData(), r->is_dirty());
+    r->print();
     return false;
   }
 };
@@ -140,7 +139,7 @@
                                                20 /*G1SATBProcessCompletedThreshold */,
                                                Shared_SATB_Q_lock);
 
-  _concurrent_gc_thread = new ShenandoahConcurrentGCThread();
+  _concurrent_gc_thread = new ShenandoahConcurrentThread();
   _concurrent_gc_thread->start();
   return JNI_OK;
 }
@@ -170,7 +169,6 @@
 }
 
 void ShenandoahHeap::post_initialize() {
-  //  ShenandoahConcurrentThread* first = new ShenandoahConcurrentThread();
   _scm->initialize(workers());
 }
 
@@ -318,6 +316,8 @@
 };
 
 void ShenandoahHeap::update_current_region() {
+
+
   if (ShenandoahGCVerbose) {
      tty->print("Old current region = ");
      _current_region->print();
@@ -325,7 +325,9 @@
 
   do {
     if (_free_regions->has_next()) {
+      _current_region->set_is_current_allocation_region(false);
       _current_region = _free_regions->get_next();
+      _current_region->set_is_current_allocation_region(true);
     } else {
       if (ShenandoahGCVerbose) {
         print_heap_regions();
@@ -343,8 +345,7 @@
 }
 
 ShenandoahHeapRegion* ShenandoahHeap::cas_update_current_region(ShenandoahHeapRegion* expected) {
-
-  
+  _current_region->set_is_current_allocation_region(false);  
   if (_free_regions->has_next()) {
     ShenandoahHeapRegion* previous = (ShenandoahHeapRegion*) Atomic::cmpxchg_ptr(_free_regions->peek_next(), &_current_region, expected);
     if (previous == expected) {
@@ -353,8 +354,10 @@
     }
     // If the above CAS fails, we want the caller to get the _current_region that the other thread
     // CAS'ed.
+    _current_region->set_is_current_allocation_region(true);
     return _current_region;
   } else {
+    print_heap_regions();
     assert(false, "No GC implemented");
   }
 
@@ -416,8 +419,8 @@
 
   // This was used for allocation while holding the Heap_lock.
   // HeapWord* filler = allocate_memory(BROOKS_POINTER_OBJ_SIZE + size);
+
   HeapWord* filler = allocate_memory_gclab(BROOKS_POINTER_OBJ_SIZE + size);
-
   HeapWord* result = filler + BROOKS_POINTER_OBJ_SIZE;
   if (filler != NULL) {
     initialize_brooks_ptr(filler, result);
@@ -491,18 +494,23 @@
     _waste(0) { 
   }
 
-  void verify_copy(oop p,oop c){
-    assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly");
-    assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct");
-    if (p->klass() != c->klass()) {
-      _heap->print_heap_regions();
-    }
-    assert(p->klass() == c->klass(), err_msg("verify class p-size: %d c-size: %d", p->size(), c->size()));
-    assert(p->size() == c->size(), "verify size");
-    assert(p->mark() == c->mark(), "verify mark");
-    assert(c == oopDesc::bs()->resolve_oop(c), "verify only forwarded once");
-  }
+  // Call this if you know we have enough space.
+  void copy_object(oop p) {
+    HeapWord* hw = (HeapWord*) p;
+    HeapWord* filler = _region->allocate(BROOKS_POINTER_OBJ_SIZE + p->size());
+    HeapWord* copy = filler + BROOKS_POINTER_OBJ_SIZE;
+    _heap->copy_object(p, filler);
 
+<<<<<<< local
+    HeapWord* result = 
+      (HeapWord*) _heap->cas_brooks_ptr(hw, copy);
+
+    if (result == hw) {
+      tty->print("copy of object from %p to %p at epoch %d succeeded\n", p, copy, _heap->getEpoch());
+    } else {
+      tty->print("copy of object from %p to %p at epoch %d failed because it was already copied to %p\n", p, copy, _heap->getEpoch(), result);
+      _heap->fill_with_object(copy, p->size(), true);
+=======
   void assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy) {
     _heap->initialize_brooks_ptr(filler, copy);
     BrooksPointer::get(oop(copy)).set_age(BrooksPointer::get(p).get_age());
@@ -519,9 +527,12 @@
 	tty->print("copied object = \n");
 	oop(copy)->print();
       }
+>>>>>>> other
     }
   }
 
+<<<<<<< local
+=======
   // Call this if you know we have enough space.
   void copy_object(oop p) {
     HeapWord* filler = _region->allocate(BROOKS_POINTER_OBJ_SIZE + p->size());
@@ -549,30 +560,42 @@
       p->set_mark(p->displaced_mark());
   }    
   
+>>>>>>> other
   void do_object(oop p) {
+<<<<<<< local
+    tty->print("Calling ParallelEvacuateRegionObjectClosure on %p \n", p);
+    if (ShenandoahHeap::getMark(p)->age() == _epoch) {
+=======
     if ((! ShenandoahBarrierSet::is_brooks_ptr(p)) && BrooksPointer::get(p).get_age() == _epoch) {
+>>>>>>> other
       size_t required = BROOKS_POINTER_OBJ_SIZE + p->size();
-      // tty->print("required = %d\n", required);
+      tty->print("PEROC: %p region_size = %d required = %d\n", p, _region->region_size(), required);
 
       if (required < _region->space_available()) {
-	if (ShenandoahGCVerbose) 
-	  tty->print("required < _region->space_available() = %d\n", _region->space_available());
+	//	if (ShenandoahGCVerbose) 
+	tty->print("PEROC: %p required < _region->space_available() = %d\n", 
+		   p, _region->space_available());
 	copy_object(p);
       } else if (required < _region->region_size()) {
-	if (ShenandoahGCVerbose) 
-	  tty->print("required < _region->region_size = %d\n ", _region->region_size());
+	//	if (ShenandoahGCVerbose) 
+	tty->print("PEROC: %p required < _region->region_size = %d\n ", p, _region->region_size());
 	_waste += _region->space_available();
 	_region->fill_region();
 	_region->allocate_new_region();
 	copy_object(p);
       } else if (required < ShenandoahHeapRegion::RegionSizeBytes) {
-	if (ShenandoahGCVerbose) 
-	  tty->print("required < ShenandoahHeapRegion::RegionSizeBytes = %d\n ", ShenandoahHeapRegion::RegionSizeBytes);
+	//	if (ShenandoahGCVerbose) 
+	tty->print("PEROC: %p required < ShenandoahHeapRegion::RegionSizeBytes = %d\n ", 
+		   p, ShenandoahHeapRegion::RegionSizeBytes);
 	_waste += _region->space_available();
         _region->fill_region();
 	HeapWord* s = _heap->allocate_memory_gclab(required);
-	copy_object(p, s);
+	tty->print("PEROC: p = %p s = %p\n", p, s);
+	_heap->copy_object(p, s);
+	
+	tty->print("PEROC: post _heap->copy_object = %p s = %p\n", p, s);
       } else {
+	tty->print("PEROC:%p don't handle humongous objects\n", p);
 	assert(false, "Don't handle humongous objects yet");
       }
     }
@@ -581,6 +604,26 @@
 };
       
 
+<<<<<<< local
+void ShenandoahHeap::set_brooks_ptr(HeapWord* brooks_ptr, HeapWord* obj) {
+  // Set the brooks pointer
+  assert(ShenandoahBarrierSet::is_brooks_ptr(oop(brooks_ptr)), "brooks pointer must be brooks pointer");
+  HeapWord* first = brooks_ptr + (BROOKS_POINTER_OBJ_SIZE - 1);
+  uintptr_t first_ptr = (uintptr_t) first;
+  *(unsigned long*)(((unsigned long*)first_ptr)) = (unsigned long) obj;
+  //tty->print_cr("result, brooks obj, brooks ptr: %p, %p, %p", obj, filler, first);
+
+}
+
+// We know that the brooks pointer is the word previous to the old value
+
+HeapWord* ShenandoahHeap::cas_brooks_ptr(HeapWord* old, HeapWord* next) {
+  HeapWord* brooks_ptr = old - 1;
+  return (HeapWord*) Atomic::cmpxchg_ptr(next, brooks_ptr, old);
+}
+  
+=======
+>>>>>>> other
 void ShenandoahHeap::initialize_brooks_ptr(HeapWord* filler, HeapWord* obj) {
   CollectedHeap::fill_with_array(filler, BROOKS_POINTER_OBJ_SIZE, false, false);
   markOop mark = oop(filler)->mark();
@@ -620,16 +663,19 @@
 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region, 
 					      ShenandoahAllocRegion *alloc_region) {
   ParallelEvacuateRegionObjectClosure evacuate_region(_epoch, this, alloc_region);
-  if (ShenandoahGCVerbose) 
+  //  if (ShenandoahGCVerbose) 
     tty->print("parallel_evacuate_region starting from_region %d: free_regions = %d\n",  from_region->regionNumber, _free_regions->available_regions());
   from_region->object_iterate(&evacuate_region);
   from_region->set_dirty(true);
+
+  from_region->set_is_in_collection_set(false);
 #ifdef ASSERT
-  if (ShenandoahVerify) {
+  //  if (ShenandoahVerify) {
     verify_evacuated_region(from_region);
-  }
+    //  }
 #endif
-  if (ShenandoahGCVerbose)
+
+    //  if (ShenandoahGCVerbose)
     tty->print("parallel_evacuate_region after from_region = %d: Wasted %d bytes free_regions = %d\n", from_region->regionNumber, evacuate_region.wasted(), _free_regions->available_regions());
 }
 
@@ -654,12 +700,12 @@
     ShenandoahAllocRegion allocRegion = ShenandoahAllocRegion();
 
     while (from_hr != NULL) {
-      if (ShenandoahGCVerbose) {
+      //      if (ShenandoahGCVerbose) {
      	tty->print("Thread %d claimed Heap Region %d\n",
      		   worker_id,
      		   from_hr->regionNumber);
 	from_hr->print();
-      }
+	//      }
 
       // Not sure if the check is worth it or not.
       if (from_hr->getLiveData() != 0) {
@@ -667,6 +713,7 @@
       } else {
 	// We don't need to evacuate anything, but we still need to mark it dirty.
 	from_hr->set_dirty(true);
+	from_hr->set_is_in_collection_set(false);
       }
 
       from_hr = _cs->claim_next();
@@ -840,6 +887,8 @@
 };
 
 void ShenandoahHeap::verify_heap_after_marking() {
+  tty->print("verifying heap after marking\n");
+  print_all_refs("post-mark");
   VerifyAfterMarkingOopClosure cl;
   roots_iterate(&cl);
 
@@ -848,16 +897,15 @@
 }
 
 void ShenandoahHeap::parallel_evacuate() {
-
-  if (ShenandoahGCVerbose) {
+  //  if (ShenandoahGCVerbose) {
     tty->print_cr("starting parallel_evacuate");
-    PrintHeapRegionsClosure pc1;
-    heap_region_iterate(&pc1);
-  }
+    //    PrintHeapRegionsClosure pc1;
+    //    heap_region_iterate(&pc1);
+    //  }
 
 #ifdef ASSERT
   if (ShenandoahVerify) {
-    verify_heap_after_marking();
+    //    verify_heap_after_marking();
   }
 #endif
 
@@ -868,7 +916,7 @@
   _regions->choose_collection_set(_collection_set);
   _regions->choose_empty_regions(_free_regions);
   update_current_region();
-  if (ShenandoahGCVerbose) {
+  //  if (ShenandoahGCVerbose) {
     tty->print("Printing all available regions");
     print_heap_regions();
     tty->print("Printing collection set which contains %d regions:\n", _collection_set->available_regions());
@@ -876,7 +924,7 @@
 
     tty->print("Printing %d free regions:\n", _free_regions->available_regions());
     _free_regions->print();
-  }
+    //  }
 
   barrierSync.set_n_workers(_max_workers);
   
@@ -899,7 +947,7 @@
 
 #ifdef ASSERT
   if (ShenandoahVerify) {
-    verify_heap_after_evacuation();
+    //    verify_heap_after_evacuation();
   }
 #endif
 
@@ -1336,6 +1384,11 @@
     // TODO: The following resolution of obj is only ever needed when draining the SATB queues.
     // Wrap this closure to avoid this call in usual marking.
     obj = oopDesc::bs()->resolve_oop(obj);
+
+    if (sh->heap_region_containing(obj)->is_dirty()) {
+      sh->print_heap_regions();
+    }
+
     assert(! sh->heap_region_containing(obj)->is_dirty(), "we don't want to mark objects in from-space");
     assert(sh->is_in(obj), "referenced objects must be in the heap. No?");
     if (! sh->isMarkedCurrent(obj)) {
@@ -1587,4 +1640,80 @@
   // tty->print_cr("obj age: %d", BrooksPointer::get(obj).get_age());
   return BrooksPointer::get(obj).get_age() == _epoch;
 }
+
+void ShenandoahHeap::verify_copy(oop p,oop c){
+    assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly");
+    assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct");
+    if (p->klass() != c->klass()) {
+      print_heap_regions();
+    }
+    assert(p->klass() == c->klass(), err_msg("verify class p-size: %d c-size: %d", p->size(), c->size()));
+    assert(p->size() == c->size(), "verify size");
+    // Object may have been locked between copy and verification
+    //    assert(p->mark() == c->mark(), "verify mark");
+    assert(c == oopDesc::bs()->resolve_oop(c), "verify only forwarded once");
+  }
+
+void ShenandoahHeap::assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy) {
+  initialize_brooks_ptr(filler, copy);
+  HeapWord* old_brooks_ptr = ((HeapWord*) p) - BROOKS_POINTER_OBJ_SIZE;
+  set_brooks_ptr(old_brooks_ptr, copy);
+  if (ShenandoahGCVerbose) {
+    HandleMark hm;
+    tty->print_cr("evacuating object: %p, of size %d with age %d, epoch %d to %p of size %d", 
+		  p, p->size(), getMark(p)->age(), _epoch, copy, oop(copy)->size());
+    if (p->has_displaced_mark())
+      tty->print("object has displaced mark\n");
+    else {
+      tty->print("previous object = \n");
+      oop(p)->print();
+      tty->print("copied object = \n");
+      oop(copy)->print();
+    }
+  }
+}
+
+void ShenandoahHeap::copy_object(oop p, HeapWord* s) {
+  HeapWord* filler = s;
+  assert(s != NULL, "allocation of brooks pointer must not fail");
+  HeapWord* copy = s + BROOKS_POINTER_OBJ_SIZE;
+  assert(copy != NULL, "allocation of copy object must not fail");
+  Copy::aligned_disjoint_words((HeapWord*) p, copy, p->size());
+  initialize_brooks_ptr(filler, copy);
+  // assign_brooks_pointer(p, filler, copy);
+
+  if (ShenandoahGCVerbose) {
+    tty->print_cr("copy object from %p to: %p epoch: %d, age: %d", p, copy, ShenandoahHeap::heap()->getEpoch(), ShenandoahHeap::getMark(p)->age());
+  }
+#ifdef ASSERT
+  if (ShenandoahVerify) {
+    verify_copy(p, oop(copy));
+  }
+#endif
+  if (p->has_displaced_mark())
+    p->set_mark(p->displaced_mark());
+}
+
+oopDesc* ShenandoahHeap::evacuate_object(oopDesc* obj) {
+  oop p = oop(obj);
+  HeapWord* filler = allocate_new_gclab(BROOKS_POINTER_OBJ_SIZE + 
+				     p->size());
+  HeapWord* copy = filler + BROOKS_POINTER_OBJ_SIZE;
+  HeapWord* brooks_object = (HeapWord*) obj - 4;
+  HeapWord* brooks = (HeapWord*) obj - 1;
+
+  copy_object(obj, filler);
+  HeapWord* result = cas_brooks_ptr((HeapWord*) obj, copy);
   
+  if (result == (HeapWord*) obj) {
+    tty->print("Evacuate_Object Copy of %p to %p at epoch %d succeeded \n", obj, copy, getEpoch());
+    return (oopDesc*) copy;
+  }  else {
+    // Later we should undo the allocation of the copy and the brooks object.
+    // For now replace the copy with a bogus object.
+    tty->print("Evacuate_Object Copy of  %p to %p at epoch %d failed because object already copied to %p\n", 
+	       obj, copy, getEpoch(), result);
+    fill_with_object(copy, p->size(), true);
+    return (oopDesc*) result;
+  }
+}
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -5,6 +5,7 @@
 #include "gc_implementation/shenandoah/shenandoahConcurrentGCThread.hpp"
 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
 
@@ -34,13 +35,21 @@
   bool complete() { return _complete;}
 };
 
+<<<<<<< local
+=======
 
 
 
+>>>>>>> other
 // A "ShenandoahHeap" is an implementation of a java heap for HotSpot.
 // It uses a new pauseless GC algorithm based on Brooks pointers.
 // Derived from G1
 
+// 
+// CollectedHeap  
+//    SharedHeap
+//      ShenandoahHeap
+
 class ShenandoahHeap : public SharedHeap {
 
 private:
@@ -60,7 +69,7 @@
   ShenandoahHeapRegion* _currentAllocationRegion;
   ShenandoahConcurrentMark* _scm;
 
-  ShenandoahConcurrentGCThread* _concurrent_gc_thread;
+  ShenandoahConcurrentThread* _concurrent_gc_thread;
 
   size_t _numRegions;
   size_t _initialSize;
@@ -193,7 +202,13 @@
 
   void parallel_evacuate();
 
+<<<<<<< local
+  void initialize_brooks_ptr(HeapWord* brooks_ptr, HeapWord* obj);
+  void set_brooks_ptr(HeapWord* brooks_ptr, HeapWord* object);
+  HeapWord* cas_brooks_ptr(HeapWord* old,  HeapWord* obj);
+=======
   void initialize_brooks_ptr(HeapWord* brooks_ptr, HeapWord* object);
+>>>>>>> other
 
   oop maybe_update_oop_ref(oop* p);
   void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region);
@@ -210,10 +225,19 @@
 
   void print_all_refs(const char* prefix);
 
+  oopDesc*  evacuate_object(oopDesc* src);
+  bool is_in_collection_set(oop* p) {
+    return heap_region_containing(p)->is_in_collection_set();
+  }
+  
+  void copy_object(oop p, HeapWord* s);
+  void verify_copy(oop p, oop c);
+  void assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy);
+  void verify_heap_after_marking();
+  void verify_heap_after_evacuation();
+
 private:
 
-  void verify_heap_after_marking();
-  void verify_heap_after_evacuation();
 
   void verify_evacuation(ShenandoahHeapRegion* from_region);
   bool set_concurrent_mark_in_progress(bool in_progress);
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -19,8 +19,25 @@
 }
 
 void ShenandoahHeapRegion::print() {
-  tty->print("ShenandoahHeapRegion: %d live = %u garbage = %u claimed = %d bottom = %p end = %p top = %p\n", 
-	     regionNumber, liveData, garbage(), claimed, bottom(), end(), top());
+  tty->print("ShenandoahHeapRegion: %d ", regionNumber);
+
+  if (is_current_allocation_region()) 
+    tty->print("A");
+  else
+    tty->print(" ");
+
+  if (is_in_collection_set())
+    tty->print("C");
+  else
+    tty->print(" ");
+
+  if (is_dirty())
+    tty->print("D");
+  else
+    tty->print(" ");
+
+  tty->print("live = %u garbage = %u claimed = %d bottom = %p end = %p top = %p\n", 
+	      liveData, garbage(), claimed, bottom(), end(), top());
 }
 
 
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -14,6 +14,9 @@
 
 private:
   bool _dirty;
+  bool _is_in_collection_set;
+  bool _is_current_allocation_region;
+
 public:
    jint initialize(HeapWord* start, size_t regionSize);
 
@@ -71,6 +74,22 @@
 
   // Just before GC we need to fill the current region.
   void fill_region();
+
+  bool is_in_collection_set() {
+    return _is_in_collection_set;
+  }
+
+  void set_is_in_collection_set(bool b) {
+    _is_in_collection_set = b;
+  }
+
+  bool is_current_allocation_region() {
+    return _is_current_allocation_region;
+  }
+
+  void set_is_current_allocation_region(bool b) {
+    _is_current_allocation_region = b;
+  }
   
 };
 
--- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -130,14 +130,23 @@
 void ShenandoahHeapRegionSet::choose_collection_set(ShenandoahHeapRegionSet* region_set) {
   sortDescendingGarbage();
   int r = 0;
+  int cs_index = 0;
+
+  // We don't want the current allocation region in the collection set because a) it is still being allocated into and b) This is where the write barriers will allocate their copies.
+
   while (r < _numRegions && _regions[r]->garbage() > _garbage_threshold) {
-    region_set->_regions[r] = _regions[r];
-    r++;
+    if (_regions[r]->is_current_allocation_region()) {
+      r++;
+    } else {
+      region_set->_regions[cs_index++] = _regions[r];
+      _regions[r]->set_is_in_collection_set(true);
+      r++;
+    }
   }
 
-  region_set->_inserted = r;
+  region_set->_inserted = cs_index;
   region_set->_index = 0;
-  region_set->_numRegions = r;
+  region_set->_numRegions = cs_index;
 
 }
 
--- a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -13,6 +13,7 @@
   sh->concurrentMark()->finishMarkFromRoots();
   sh->stop_concurrent_marking();
 
-  sh->parallel_evacuate();
+  sh->verify_heap_after_marking();
+  //  sh->parallel_evacuate();
 }
   
--- a/src/share/vm/memory/barrierSet.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/memory/barrierSet.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -193,6 +193,9 @@
   virtual oopDesc* maybe_resolve_oop(oopDesc* src) {
     return src;
   }
+  virtual oopDesc* resolve_and_maybe_copy_oop(oopDesc* src) {
+    return src;
+  }
 
   virtual void compile_resolve_oop(MacroAssembler* masm, Register dst) {
     // Default implementation does nothing.
--- a/src/share/vm/oops/objArrayKlass.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/oops/objArrayKlass.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -213,6 +213,7 @@
       for (int index = 0; index < length; index++) {
         ArrayKlass* ak = ArrayKlass::cast(h_lower_dimension());
         oop sub_array = ak->multi_allocate(rank-1, &sizes[1], CHECK_NULL);
+	h_array = objArrayHandle(THREAD, (objArrayOop)oopDesc::bs()->resolve_and_maybe_copy_oop(h_array()));
         h_array->obj_at_put(index, sub_array);
       }
     } else {
--- a/src/share/vm/oops/objArrayOop.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/oops/objArrayOop.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -81,6 +81,7 @@
   oop obj_at(int index) const {
     // With UseCompressedOops decode the narrow oop in the objArray to an
     // uncompressed oop.  Otherwise this is simply a "*" operator.
+
     if (UseCompressedOops) {
       return load_decode_heap_oop(obj_at_addr<narrowOop>(index));
     } else {
@@ -89,6 +90,11 @@
   }
 
   void obj_at_put(int index, oop value) {
+    objArrayOopDesc* forwarded_copy = 
+      (objArrayOopDesc*) oopDesc::bs()->resolve_and_maybe_copy_oop(this);
+    if (forwarded_copy != this)
+      return forwarded_copy->obj_at_put(index, value);
+
     if (UseCompressedOops) {
       oop_store(obj_at_addr<narrowOop>(index), value);
     } else {
--- a/src/share/vm/oops/oop.inline.hpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/oops/oop.inline.hpp	Thu Oct 17 15:28:20 2013 -0400
@@ -326,6 +326,10 @@
   return value;
 }
 inline void oopDesc::obj_field_put(int offset, oop value) {
+  oopDesc* forwarded_copy = oopDesc::bs()->resolve_and_maybe_copy_oop(this);
+  if (forwarded_copy != this)
+    return forwarded_copy->obj_field_put(offset, value);
+
   UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
                       oop_store(obj_field_addr<oop>(offset),       value);
 }
@@ -384,6 +388,11 @@
                OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
 }
 inline void oopDesc::release_obj_field_put(int offset, oop value) {
+  oopDesc* forwarded_copy =
+    (oopDesc*) oopDesc::bs()->resolve_and_maybe_copy_oop(this);
+  if (forwarded_copy != this)
+    return forwarded_copy->release_obj_field_put(offset, value);
+
   UseCompressedOops ?
     oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
     oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
@@ -561,7 +570,10 @@
     if (prebarrier) {
       update_barrier_set_pre((oop*)dest, exchange_value);
     }
-    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
+
+    return (oop)Atomic::cmpxchg_ptr(exchange_value, 
+				    dest,
+				    compare_value);
   }
 }
 
--- a/src/share/vm/prims/jvm.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/prims/jvm.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -307,7 +307,10 @@
   assert(s->is_oop(), "JVM_ArrayCopy: src not an oop");
   assert(d->is_oop(), "JVM_ArrayCopy: dst not an oop");
   // Do copy
-  s->klass()->copy_array(s, src_pos, d, dst_pos, length, thread);
+  s->klass()->copy_array(s, src_pos, 
+			 (arrayOop(oopDesc::bs()->resolve_and_maybe_copy_oop(d))), 
+			  dst_pos,
+			 length, thread);
 JVM_END
 
 
--- a/src/share/vm/prims/unsafe.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/prims/unsafe.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -276,7 +276,7 @@
 UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h))
   UnsafeWrapper("Unsafe_SetObject");
   oop x = JNIHandles::resolve(x_h);
-  oop p = JNIHandles::resolve(obj);
+  oop p = oopDesc::bs()->resolve_and_maybe_copy_oop(JNIHandles::resolve(obj));
   if (UseCompressedOops) {
     oop_store((narrowOop*)index_oop_from_field_offset_long(p, offset), x);
   } else {
@@ -304,6 +304,7 @@
   oop x = JNIHandles::resolve(x_h);
   oop p = JNIHandles::resolve(obj);
   void* addr = index_oop_from_field_offset_long(p, offset);
+  
   OrderAccess::release();
   if (UseCompressedOops) {
     oop_store((narrowOop*)addr, x);
@@ -1172,7 +1173,8 @@
   UnsafeWrapper("Unsafe_CompareAndSwapObject");
   oop x = JNIHandles::resolve(x_h);
   oop e = JNIHandles::resolve(e_h);
-  oop p = JNIHandles::resolve(obj);
+  // We are about to write to this entry so check to see if we need to copy it.
+  oop p = oopDesc::bs()->resolve_and_maybe_copy_oop(JNIHandles::resolve(obj));
   HeapWord* addr = (HeapWord *)index_oop_from_field_offset_long(p, offset);
   oop res = oopDesc::atomic_compare_exchange_oop(x, addr, e, true);
   jboolean success  = (oopDesc::bs()->resolve_oop(res) == e);
--- a/src/share/vm/runtime/synchronizer.cpp	Wed Oct 16 17:50:13 2013 +0200
+++ b/src/share/vm/runtime/synchronizer.cpp	Thu Oct 17 15:28:20 2013 -0400
@@ -222,14 +222,16 @@
 // We don't need to use fast path here, because it must have been
 // failed in the interpreter/compiler code.
 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
-  markOop mark = obj->mark();
+  Handle n_obj(THREAD,
+	       oopDesc::bs()->resolve_and_maybe_copy_oop(obj()));
+  markOop mark = n_obj->mark();
   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 
   if (mark->is_neutral()) {
     // Anticipate successful CAS -- the ST of the displaced mark must
     // be visible <= the ST performed by the CAS.
     lock->set_displaced_header(mark);
-    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
+    if (mark == (markOop) Atomic::cmpxchg_ptr(lock, n_obj()->mark_addr(), mark)) {
       TEVENT (slow_enter: release stacklock) ;
       return ;
     }
@@ -237,7 +239,7 @@
   } else
   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
     assert(lock != mark->locker(), "must not re-lock the same lock");
-    assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
+    assert(lock != (BasicLock*)n_obj->mark(), "don't relock with same BasicLock");
     lock->set_displaced_header(NULL);
     return;
   }
@@ -255,7 +257,7 @@
   // must be non-zero to avoid looking like a re-entrant lock,
   // and must not look locked either.
   lock->set_displaced_header(markOopDesc::unused_mark());
-  ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
+  ObjectSynchronizer::inflate(THREAD, n_obj())->enter(THREAD);
 }
 
 // This routine is used to handle interpreter/compiler slow case