changeset 7588:227a9e5e4b4a

8057536: Refactor G1 to allow context specific allocations Summary: Splitting out a g1 allocator class to simply specialized allocators which can associate each allocation with a given context. Reviewed-by: mgerdin, brutisso
author sjohanss
date Fri, 05 Sep 2014 09:49:19 +0200
parents 8ec8971f511a
children fe392af93c23
files agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java src/share/vm/gc_implementation/g1/g1AllocRegion.cpp src/share/vm/gc_implementation/g1/g1AllocRegion.hpp src/share/vm/gc_implementation/g1/g1AllocationContext.hpp src/share/vm/gc_implementation/g1/g1Allocator.cpp src/share/vm/gc_implementation/g1/g1Allocator.hpp src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp src/share/vm/gc_implementation/g1/heapRegion.cpp src/share/vm/gc_implementation/g1/heapRegion.hpp src/share/vm/gc_implementation/g1/vmStructs_g1.hpp src/share/vm/gc_implementation/g1/vm_operations_g1.cpp src/share/vm/gc_implementation/g1/vm_operations_g1.hpp src/share/vm/runtime/vm_operations.hpp
diffstat 19 files changed, 829 insertions(+), 441 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java	Fri Sep 05 09:49:19 2014 +0200
@@ -0,0 +1,40 @@
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class G1Allocator extends VMObject {
+
+  //size_t _summary_bytes_used;
+  static private CIntegerField summaryBytesUsedField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+      public void update(Observable o, Object data) {
+        initialize(VM.getVM().getTypeDataBase());
+      }
+    });
+  }
+
+  static private synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("G1Allocator");
+
+    summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+  }
+
+  public long getSummaryBytes() {
+    return summaryBytesUsedField.getValue(addr);
+  }
+
+  public G1Allocator(Address addr) {
+    super(addr);
+
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Thu Sep 04 16:53:27 2014 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Sep 05 09:49:19 2014 +0200
@@ -36,7 +36,6 @@
 import sun.jvm.hotspot.runtime.VM;
 import sun.jvm.hotspot.runtime.VMObjectFactory;
 import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
@@ -47,8 +46,8 @@
     static private long hrmFieldOffset;
     // MemRegion _g1_reserved;
     static private long g1ReservedFieldOffset;
-    // size_t _summary_bytes_used;
-    static private CIntegerField summaryBytesUsedField;
+    // G1Allocator* _allocator
+    static private AddressField g1Allocator;
     // G1MonitoringSupport* _g1mm;
     static private AddressField g1mmField;
     // HeapRegionSet _old_set;
@@ -68,7 +67,7 @@
         Type type = db.lookupType("G1CollectedHeap");
 
         hrmFieldOffset = type.getField("_hrm").getOffset();
-        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        g1Allocator = type.getAddressField("_allocator");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
         humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
@@ -79,7 +78,7 @@
     }
 
     public long used() {
-        return summaryBytesUsedField.getValue(addr);
+        return allocator().getSummaryBytes();
     }
 
     public long n_regions() {
@@ -97,6 +96,11 @@
         return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
     }
 
+    public G1Allocator allocator() {
+        Address g1AllocatorAddr = g1Allocator.getValue(addr);
+        return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
+    }
+
     public HeapRegionSetBase oldSet() {
         Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
         return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -129,8 +129,7 @@
     // Note that we first perform the allocation and then we store the
     // region in _alloc_region. This is the reason why an active region
     // can never be empty.
-    _alloc_region = new_alloc_region;
-    _count += 1;
+    update_alloc_region(new_alloc_region);
     trace("region allocation successful");
     return result;
   } else {
@@ -172,6 +171,19 @@
   trace("set");
 }
 
+void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
+  trace("update");
+  // We explicitly check that the region is not empty to make sure we
+  // maintain the "the alloc region cannot be empty" invariant.
+  assert(alloc_region != NULL && !alloc_region->is_empty(),
+         ar_ext_msg(this, "pre-condition"));
+
+  _alloc_region = alloc_region;
+  _alloc_region->set_allocation_context(allocation_context());
+  _count += 1;
+  trace("updated");
+}
+
 HeapRegion* G1AllocRegion::release() {
   trace("releasing");
   HeapRegion* alloc_region = _alloc_region;
@@ -225,5 +237,70 @@
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
   : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
+    _alloc_region(NULL), _count(0), _used_bytes_before(0),
+    _allocation_context(AllocationContext::system()) { }
+
+
+HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
+                                                    bool force) {
+  return _g1h->new_mutator_alloc_region(word_size, force);
+}
+
+void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
+                                       size_t allocated_bytes) {
+  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+                                                       bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                          size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+                                                  bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
 
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                     size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForTenured);
+}
+
+HeapRegion* OldGCAllocRegion::release() {
+  HeapRegion* cur = get();
+  if (cur != NULL) {
+    // Determine how far we are from the next card boundary. If it is smaller than
+    // the minimum object size we can allocate into, expand into the next card.
+    HeapWord* top = cur->top();
+    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+    if (to_allocate_words != 0) {
+      // We are not at a card boundary. Fill up, possibly into the next, taking the
+      // end of the region and the minimum object size into account.
+      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+      // Skip allocation if there is not enough space to allocate even the smallest
+      // possible object. In this case this region will not be retained, so the
+      // original problem cannot occur.
+      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+        CollectedHeap::fill_with_object(dummy, to_allocate_words);
+      }
+    }
+  }
+  return G1AllocRegion::release();
+}
+
+
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -57,6 +57,9 @@
   // correct use of init() and release()).
   HeapRegion* volatile _alloc_region;
 
+  // Allocation context associated with this alloc region.
+  AllocationContext_t _allocation_context;
+
   // It keeps track of the distinct number of regions that are used
   // for allocation in the active interval of this object, i.e.,
   // between a call to init() and a call to release(). The count
@@ -110,6 +113,10 @@
   // else can allocate out of it.
   void retire(bool fill_up);
 
+  // After a region is allocated by alloc_new_region, this
+  // method is used to set it as the active alloc_region
+  void update_alloc_region(HeapRegion* alloc_region);
+
   // Allocate a new active region and use it to perform a word_size
   // allocation. The force parameter will be passed on to
   // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
@@ -137,6 +144,9 @@
     return (hr == _dummy_region) ? NULL : hr;
   }
 
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
+
   uint count() { return _count; }
 
   // The following two are the building blocks for the allocation method.
@@ -182,6 +192,40 @@
 #endif // G1_ALLOC_REGION_TRACING
 };
 
+class MutatorAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  MutatorAllocRegion()
+    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
+};
+
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  SurvivorGCAllocRegion()
+  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  OldGCAllocRegion()
+  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+  // This specialization of release() makes sure that the last card that has
+  // been allocated into has been completely filled by a dummy object.  This
+  // avoids races when remembered set scanning wants to update the BOT of the
+  // last card in the retained old gc alloc region, and allocation threads
+  // allocating into that card at the same time.
+  virtual HeapRegion* release();
+};
+
 class ar_ext_msg : public err_msg {
 public:
   ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+
+typedef unsigned char AllocationContext_t;
+
+class AllocationContext : AllStatic {
+public:
+  // Currently used context
+  static AllocationContext_t current() {
+    return 0;
+  }
+  // System wide default context
+  static AllocationContext_t system() {
+    return 0;
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+void G1DefaultAllocator::init_mutator_alloc_region() {
+  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
+  _mutator_alloc_region.init();
+}
+
+void G1DefaultAllocator::release_mutator_alloc_region() {
+  _mutator_alloc_region.release();
+  assert(_mutator_alloc_region.get() == NULL, "post-condition");
+}
+
+void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                            OldGCAllocRegion* old,
+                                            HeapRegion** retained_old) {
+  HeapRegion* retained_region = *retained_old;
+  *retained_old = NULL;
+
+  // We will discard the current GC alloc region if:
+  // a) it's in the collection set (it can happen!),
+  // b) it's already full (no point in using it),
+  // c) it's empty (this means that it was emptied during
+  // a cleanup and it should be on the free list now), or
+  // d) it's humongous (this means that it was emptied
+  // during a cleanup and was added to the free list, but
+  // has been subsequently used to allocate a humongous
+  // object that may be less than the region size).
+  if (retained_region != NULL &&
+      !retained_region->in_collection_set() &&
+      !(retained_region->top() == retained_region->end()) &&
+      !retained_region->is_empty() &&
+      !retained_region->isHumongous()) {
+    retained_region->record_top_and_timestamp();
+    // The retained region was added to the old region set when it was
+    // retired. We have to remove it now, since we don't allow regions
+    // we allocate to in the region sets. We'll re-add it later, when
+    // it's retired again.
+    _g1h->_old_set.remove(retained_region);
+    bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+    retained_region->note_start_of_copying(during_im);
+    old->set(retained_region);
+    _g1h->_hr_printer.reuse(retained_region);
+    evacuation_info.set_alloc_regions_used_before(retained_region->used());
+  }
+}
+
+void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+  reuse_retained_old_region(evacuation_info,
+                            &_old_gc_alloc_region,
+                            &_retained_old_gc_alloc_region);
+}
+
+void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+  AllocationContext_t context = AllocationContext::current();
+  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
+                                         old_gc_alloc_region(context)->count());
+  survivor_gc_alloc_region(context)->release();
+  // If we have an old GC alloc region to release, we'll save it in
+  // _retained_old_gc_alloc_region. If we don't
+  // _retained_old_gc_alloc_region will become NULL. This is what we
+  // want either way so no reason to check explicitly for either
+  // condition.
+  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+
+  if (ResizePLAB) {
+    _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+    _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+  }
+}
+
+void G1DefaultAllocator::abandon_gc_alloc_regions() {
+  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  _retained_old_gc_alloc_region = NULL;
+}
+
+G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
+  ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+
+HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+  HeapWord* obj = NULL;
+  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+    if (buf == NULL) {
+      return NULL; // Let caller handle allocation failure.
+    }
+    // Otherwise.
+    alloc_buf->set_word_size(gclab_word_size);
+    alloc_buf->set_buf(buf);
+
+    obj = alloc_buf->allocate(word_sz);
+    assert(obj != NULL, "buffer was definitely big enough...");
+  } else {
+    obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+  }
+  return obj;
+}
+
+G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
+            G1ParGCAllocator(g1h),
+            _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+            _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
+
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
+}
+
+void G1DefaultParGCAllocator::retire_alloc_buffers() {
+  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+    size_t waste = _alloc_buffers[ap]->words_remaining();
+    add_to_alloc_buffer_waste(waste);
+    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                               true /* end_of_gc */,
+                                               false /* retain */);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+
+enum GCAllocPurpose {
+  GCAllocForTenured,
+  GCAllocForSurvived,
+  GCAllocPurposeCount
+};
+
+// Base class for G1 allocators.
+class G1Allocator : public CHeapObj<mtGC> {
+  friend class VMStructs;
+protected:
+  G1CollectedHeap* _g1h;
+
+  // Outside of GC pauses, the number of bytes used in all regions other
+  // than the current allocation region.
+  size_t _summary_bytes_used;
+
+public:
+   G1Allocator(G1CollectedHeap* heap) :
+     _g1h(heap), _summary_bytes_used(0) { }
+
+   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
+
+   virtual void init_mutator_alloc_region() = 0;
+   virtual void release_mutator_alloc_region() = 0;
+
+   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
+   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
+   virtual void abandon_gc_alloc_regions() = 0;
+
+   virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
+   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual size_t                 used() = 0;
+   virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
+
+   void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                                            OldGCAllocRegion* old,
+                                                            HeapRegion** retained);
+
+   size_t used_unlocked() const {
+     return _summary_bytes_used;
+   }
+
+   void increase_used(size_t bytes) {
+     _summary_bytes_used += bytes;
+   }
+
+   void decrease_used(size_t bytes) {
+     assert(_summary_bytes_used >= bytes,
+            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
+                _summary_bytes_used, bytes));
+     _summary_bytes_used -= bytes;
+   }
+
+   void set_used(size_t bytes) {
+     _summary_bytes_used = bytes;
+   }
+};
+
+// The default allocator for G1.
+class G1DefaultAllocator : public G1Allocator {
+protected:
+  // Alloc region used to satisfy mutator allocation requests.
+  MutatorAllocRegion _mutator_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // survivor objects.
+  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // old objects.
+  OldGCAllocRegion _old_gc_alloc_region;
+
+  HeapRegion* _retained_old_gc_alloc_region;
+public:
+  G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
+
+  virtual void init_mutator_alloc_region();
+  virtual void release_mutator_alloc_region();
+
+  virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
+  virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+  virtual void abandon_gc_alloc_regions();
+
+  virtual bool is_retained_old_region(HeapRegion* hr) {
+    return _retained_old_gc_alloc_region == hr;
+  }
+
+  virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
+    return &_mutator_alloc_region;
+  }
+
+  virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
+    return &_survivor_gc_alloc_region;
+  }
+
+  virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
+    return &_old_gc_alloc_region;
+  }
+
+  virtual size_t used() {
+    assert(Heap_lock->owner() != NULL,
+           "Should be owned on this thread's behalf.");
+    size_t result = _summary_bytes_used;
+
+    // Read only once in case it is set to NULL concurrently
+    HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+    if (hr != NULL) {
+      result += hr->used();
+    }
+    return result;
+  }
+};
+
+class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+private:
+  bool _retired;
+
+public:
+  G1ParGCAllocBuffer(size_t gclab_word_size);
+  virtual ~G1ParGCAllocBuffer() {
+    guarantee(_retired, "Allocation buffer has not been retired");
+  }
+
+  virtual void set_buf(HeapWord* buf) {
+    ParGCAllocBuffer::set_buf(buf);
+    _retired = false;
+  }
+
+  virtual void retire(bool end_of_gc, bool retain) {
+    if (_retired) {
+      return;
+    }
+    ParGCAllocBuffer::retire(end_of_gc, retain);
+    _retired = true;
+  }
+};
+
+class G1ParGCAllocator : public CHeapObj<mtGC> {
+  friend class G1ParScanThreadState;
+protected:
+  G1CollectedHeap* _g1h;
+
+  size_t _alloc_buffer_waste;
+  size_t _undo_waste;
+
+  void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+  void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+
+  virtual void retire_alloc_buffers() = 0;
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+
+public:
+  G1ParGCAllocator(G1CollectedHeap* g1h) :
+    _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+  }
+
+  static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+
+  size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
+  size_t undo_waste() {return _undo_waste; }
+
+  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+    HeapWord* obj = NULL;
+    if (purpose == GCAllocForSurvived) {
+      obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+    } else {
+      obj = alloc_buffer(purpose, context)->allocate(word_sz);
+    }
+    if (obj != NULL) {
+      return obj;
+    }
+    return allocate_slow(purpose, word_sz, context);
+  }
+
+  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+    if (alloc_buffer(purpose, context)->contains(obj)) {
+      assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+             "should contain whole object");
+      alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+    } else {
+      CollectedHeap::fill_with_object(obj, word_sz);
+      add_to_undo_waste(word_sz);
+    }
+  }
+};
+
+class G1DefaultParGCAllocator : public G1ParGCAllocator {
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+
+public:
+  G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
+    return _alloc_buffers[purpose];
+  }
+
+  virtual void retire_alloc_buffers() ;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultAllocator(g1h);
+}
+
+G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultParGCAllocator(g1h);
+}
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -608,7 +608,8 @@
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
-                                                           size_t word_size) {
+                                                           size_t word_size,
+                                                           AllocationContext_t context) {
   assert(first != G1_NO_HRM_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
@@ -660,13 +661,14 @@
   // that there is a single object that starts at the bottom of the
   // first region.
   first_hr->set_startsHumongous(new_top, new_end);
-
+  first_hr->set_allocation_context(context);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
     hr->set_continuesHumongous(first_hr);
+    hr->set_allocation_context(context);
   }
   // If we have "continues humongous" regions (hr != NULL), then the
   // end of the last one should match new_end.
@@ -733,7 +735,7 @@
   check_bitmaps("Humongous Region Allocation", first_hr);
 
   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
-  _summary_bytes_used += first_hr->used();
+  _allocator->increase_used(first_hr->used());
   _humongous_set.add(first_hr);
 
   return new_obj;
@@ -742,7 +744,7 @@
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
-HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
@@ -810,7 +812,8 @@
 
   HeapWord* result = NULL;
   if (first != G1_NO_HRM_INDEX) {
-    result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions,
+                                                       word_size, context);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
@@ -854,6 +857,8 @@
 
     // Create the garbage collection operation...
     VM_G1CollectForAllocation op(gc_count_before, word_size);
+    op.set_allocation_context(AllocationContext::current());
+
     // ...and get the VM thread to execute it.
     VMThread::execute(&op);
 
@@ -889,8 +894,9 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
-                                           unsigned int *gc_count_before_ret,
-                                           int* gclocker_retry_count_ret) {
+                                                   AllocationContext_t context,
+                                                   unsigned int *gc_count_before_ret,
+                                                   int* gclocker_retry_count_ret) {
   // Make sure you read the note in attempt_allocation_humongous().
 
   assert_heap_not_locked_and_not_at_safepoint();
@@ -911,23 +917,22 @@
 
     {
       MutexLockerEx x(Heap_lock);
-
-      result = _mutator_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+      result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                    false /* bot_updates */);
       if (result != NULL) {
         return result;
       }
 
       // If we reach here, attempt_allocation_locked() above failed to
       // allocate a new region. So the mutator alloc region should be NULL.
-      assert(_mutator_alloc_region.get() == NULL, "only way to get here");
+      assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
-          result = _mutator_alloc_region.attempt_allocation_force(word_size,
-                                                      false /* bot_updates */);
+          result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
+                                                                                       false /* bot_updates */);
           if (result != NULL) {
             return result;
           }
@@ -987,8 +992,8 @@
     // first attempt (without holding the Heap_lock) here and the
     // follow-on attempt will be at the start of the next loop
     // iteration (after taking the Heap_lock).
-    result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                           false /* bot_updates */);
     if (result != NULL) {
       return result;
     }
@@ -1006,8 +1011,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
-                                          unsigned int * gc_count_before_ret,
-                                          int* gclocker_retry_count_ret) {
+                                                        unsigned int * gc_count_before_ret,
+                                                        int* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
   // into a single one is that such a method would require several "if
@@ -1048,7 +1053,7 @@
       // Given that humongous objects are not allocated in young
       // regions, we'll first try to do the allocation without doing a
       // collection hoping that there's enough space in the heap.
-      result = humongous_obj_allocate(word_size);
+      result = humongous_obj_allocate(word_size, AllocationContext::current());
       if (result != NULL) {
         return result;
       }
@@ -1124,17 +1129,18 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region) {
+                                                           AllocationContext_t context,
+                                                           bool expect_null_mutator_alloc_region) {
   assert_at_safepoint(true /* should_be_vm_thread */);
-  assert(_mutator_alloc_region.get() == NULL ||
+  assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
                                              !expect_null_mutator_alloc_region,
          "the current alloc region was unexpectedly found to be non-NULL");
 
   if (!isHumongous(word_size)) {
-    return _mutator_alloc_region.attempt_allocation_locked(word_size,
+    return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
                                                       false /* bot_updates */);
   } else {
-    HeapWord* result = humongous_obj_allocate(word_size);
+    HeapWord* result = humongous_obj_allocate(word_size, context);
     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
       g1_policy()->set_initiate_conc_mark_if_possible();
     }
@@ -1334,8 +1340,8 @@
       concurrent_mark()->abort();
 
       // Make sure we'll choose a new allocation region afterwards.
-      release_mutator_alloc_region();
-      abandon_gc_alloc_regions();
+      _allocator->release_mutator_alloc_region();
+      _allocator->abandon_gc_alloc_regions();
       g1_rem_set()->cleanupHRRS();
 
       // We should call this after we retire any currently active alloc
@@ -1509,7 +1515,7 @@
 
       clear_cset_fast_test();
 
-      init_mutator_alloc_region();
+      _allocator->init_mutator_alloc_region();
 
       double end = os::elapsedTime();
       g1_policy()->record_full_collection_end();
@@ -1645,6 +1651,7 @@
 
 HeapWord*
 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
+                                           AllocationContext_t context,
                                            bool* succeeded) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
@@ -1652,7 +1659,8 @@
   // Let's attempt the allocation first.
   HeapWord* result =
     attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                    context,
+                                    false /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1662,7 +1670,7 @@
   // incremental pauses.  Therefore, at least for now, we'll favor
   // expansion over collection.  (This might change in the future if we can
   // do something smarter than full collection to satisfy a failed alloc.)
-  result = expand_and_allocate(word_size);
+  result = expand_and_allocate(word_size, context);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1679,7 +1687,8 @@
 
   // Retry the allocation
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1696,7 +1705,8 @@
 
   // Retry the allocation once more
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1718,7 +1728,7 @@
 // successful, perform the allocation and return the address of the
 // allocated block, or else "NULL".
 
-HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
@@ -1733,7 +1743,8 @@
     _hrm.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           false /* expect_null_mutator_alloc_region */);
   }
   return NULL;
 }
@@ -1810,7 +1821,7 @@
   // We should only reach here at the end of a Full GC which means we
   // should not not be holding to any GC alloc regions. The method
   // below will make sure of that and do any remaining clean up.
-  abandon_gc_alloc_regions();
+  _allocator->abandon_gc_alloc_regions();
 
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
@@ -1843,7 +1854,7 @@
   _bot_shared(NULL),
   _evac_failure_scan_stack(NULL),
   _mark_in_progress(false),
-  _cg1r(NULL), _summary_bytes_used(0),
+  _cg1r(NULL),
   _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
@@ -1855,7 +1866,6 @@
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
-  _retained_old_gc_alloc_region(NULL),
   _survivor_plab_stats(YoungPLABSize, PLABWeight),
   _old_plab_stats(OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
@@ -1877,6 +1887,7 @@
     vm_exit_during_initialization("Failed necessary allocation.");
   }
 
+  _allocator = G1Allocator::create_allocator(_g1h);
   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
 
   int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -2122,7 +2133,7 @@
   dummy_region->set_top(dummy_region->end());
   G1AllocRegion::setup(this, dummy_region);
 
-  init_mutator_alloc_region();
+  _allocator->init_mutator_alloc_region();
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
@@ -2296,21 +2307,12 @@
 
 
 // Computes the sum of the storage used by the various regions.
-
 size_t G1CollectedHeap::used() const {
-  assert(Heap_lock->owner() != NULL,
-         "Should be owned on this thread's behalf.");
-  size_t result = _summary_bytes_used;
-  // Read only once in case it is set to NULL concurrently
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr != NULL)
-    result += hr->used();
-  return result;
+  return _allocator->used();
 }
 
 size_t G1CollectedHeap::used_unlocked() const {
-  size_t result = _summary_bytes_used;
-  return result;
+  return _allocator->used_unlocked();
 }
 
 class SumUsedClosure: public HeapRegionClosure {
@@ -2354,7 +2356,8 @@
 
   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
     // Let's use the existing mechanism for the allocation
-    HeapWord* dummy_obj = humongous_obj_allocate(word_size);
+    HeapWord* dummy_obj = humongous_obj_allocate(word_size,
+                                                 AllocationContext::system());
     if (dummy_obj != NULL) {
       MemRegion mr(dummy_obj, word_size);
       CollectedHeap::fill_with_object(mr);
@@ -2494,6 +2497,7 @@
                                  true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+      op.set_allocation_context(AllocationContext::current());
 
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
@@ -2894,7 +2898,7 @@
   // since we can't allow tlabs to grow big enough to accommodate
   // humongous objects.
 
-  HeapRegion* hr = _mutator_alloc_region.get();
+  HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
   size_t max_tlab = max_tlab_size() * wordSize;
   if (hr == NULL) {
     return max_tlab;
@@ -3615,6 +3619,8 @@
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
                              gc_cause);
+
+  op.set_allocation_context(AllocationContext::current());
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
@@ -3944,7 +3950,7 @@
 
         // Forget the current alloc region (we might even choose it to be part
         // of the collection set!).
-        release_mutator_alloc_region();
+        _allocator->release_mutator_alloc_region();
 
         // We should call this after we retire the mutator alloc
         // region(s) so that all the ALLOC / RETIRE events are generated
@@ -4035,7 +4041,7 @@
         setup_surviving_young_words();
 
         // Initialize the GC alloc regions.
-        init_gc_alloc_regions(evacuation_info);
+        _allocator->init_gc_alloc_regions(evacuation_info);
 
         // Actually do the work...
         evacuate_collection_set(evacuation_info);
@@ -4084,7 +4090,7 @@
         _young_list->reset_auxilary_lists();
 
         if (evacuation_failed()) {
-          _summary_bytes_used = recalculate_used();
+          _allocator->set_used(recalculate_used());
           uint n_queues = MAX2((int)ParallelGCThreads, 1);
           for (uint i = 0; i < n_queues; i++) {
             if (_evacuation_failed_info_array[i].has_failed()) {
@@ -4094,7 +4100,7 @@
         } else {
           // The "used" of the the collection set have already been subtracted
           // when they were freed.  Add in the bytes evacuated.
-          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+          _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
         if (g1_policy()->during_initial_mark_pause()) {
@@ -4116,7 +4122,7 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        init_mutator_alloc_region();
+        _allocator->init_mutator_alloc_region();
 
         {
           size_t expand_bytes = g1_policy()->expansion_amount();
@@ -4261,80 +4267,6 @@
   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 }
 
-void G1CollectedHeap::init_mutator_alloc_region() {
-  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
-  _mutator_alloc_region.init();
-}
-
-void G1CollectedHeap::release_mutator_alloc_region() {
-  _mutator_alloc_region.release();
-  assert(_mutator_alloc_region.get() == NULL, "post-condition");
-}
-
-void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
-  HeapRegion* retained_region = _retained_old_gc_alloc_region;
-  _retained_old_gc_alloc_region = NULL;
-
-  // We will discard the current GC alloc region if:
-  // a) it's in the collection set (it can happen!),
-  // b) it's already full (no point in using it),
-  // c) it's empty (this means that it was emptied during
-  // a cleanup and it should be on the free list now), or
-  // d) it's humongous (this means that it was emptied
-  // during a cleanup and was added to the free list, but
-  // has been subsequently used to allocate a humongous
-  // object that may be less than the region size).
-  if (retained_region != NULL &&
-      !retained_region->in_collection_set() &&
-      !(retained_region->top() == retained_region->end()) &&
-      !retained_region->is_empty() &&
-      !retained_region->isHumongous()) {
-    retained_region->record_top_and_timestamp();
-    // The retained region was added to the old region set when it was
-    // retired. We have to remove it now, since we don't allow regions
-    // we allocate to in the region sets. We'll re-add it later, when
-    // it's retired again.
-    _old_set.remove(retained_region);
-    bool during_im = g1_policy()->during_initial_mark_pause();
-    retained_region->note_start_of_copying(during_im);
-    _old_gc_alloc_region.set(retained_region);
-    _hr_printer.reuse(retained_region);
-    evacuation_info.set_alloc_regions_used_before(retained_region->used());
-  }
-}
-
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
-  assert_at_safepoint(true /* should_be_vm_thread */);
-
-  _survivor_gc_alloc_region.init();
-  _old_gc_alloc_region.init();
-
-  use_retained_old_gc_alloc_region(evacuation_info);
-}
-
-void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
-  evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
-                                         _old_gc_alloc_region.count());
-  _survivor_gc_alloc_region.release();
-  // If we have an old GC alloc region to release, we'll save it in
-  // _retained_old_gc_alloc_region. If we don't
-  // _retained_old_gc_alloc_region will become NULL. This is what we
-  // want either way so no reason to check explicitly for either
-  // condition.
-  _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
-
-  if (ResizePLAB) {
-    _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-    _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-  }
-}
-
-void G1CollectedHeap::abandon_gc_alloc_regions() {
-  assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
-  assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
-  _retained_old_gc_alloc_region = NULL;
-}
-
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
@@ -4475,25 +4407,26 @@
 }
 
 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
-                                                  size_t word_size) {
+                                                  size_t word_size,
+                                                  AllocationContext_t context) {
   if (purpose == GCAllocForSurvived) {
-    HeapWord* result = survivor_attempt_allocation(word_size);
+    HeapWord* result = survivor_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the old gen in case we can fit the
       // object there.
-      return old_attempt_allocation(word_size);
+      return old_attempt_allocation(word_size, context);
     }
   } else {
     assert(purpose ==  GCAllocForTenured, "sanity");
-    HeapWord* result = old_attempt_allocation(word_size);
+    HeapWord* result = old_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the survivors in case we can fit the
       // object there.
-      return survivor_attempt_allocation(word_size);
+      return survivor_attempt_allocation(word_size, context);
     }
   }
 
@@ -4502,9 +4435,6 @@
   return NULL;
 }
 
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
-  ParGCAllocBuffer(gclab_word_size), _retired(true) { }
-
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
@@ -5965,7 +5895,7 @@
     }
   }
 
-  release_gc_alloc_regions(n_workers, evacuation_info);
+  _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   // Reset and re-enable the hot card cache.
@@ -6079,10 +6009,7 @@
 }
 
 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
-  assert(_summary_bytes_used >= bytes,
-         err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
-                  _summary_bytes_used, bytes));
-  _summary_bytes_used -= bytes;
+  _allocator->decrease_used(bytes);
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
@@ -6710,6 +6637,7 @@
 
     if (r->is_empty()) {
       // Add free regions to the free list
+      r->set_allocation_context(AllocationContext::system());
       _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
@@ -6742,12 +6670,12 @@
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
-    _summary_bytes_used = cl.total_used();
-  }
-  assert(_summary_bytes_used == recalculate_used(),
-         err_msg("inconsistent _summary_bytes_used, "
+    _allocator->set_used(cl.total_used());
+  }
+  assert(_allocator->used_unlocked() == recalculate_used(),
+         err_msg("inconsistent _allocator->used_unlocked(), "
                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
-                 _summary_bytes_used, recalculate_used()));
+                 _allocator->used_unlocked(), recalculate_used()));
 }
 
 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@@ -6787,7 +6715,7 @@
   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
-  _summary_bytes_used += allocated_bytes;
+  _allocator->increase_used(allocated_bytes);
   _hr_printer.retire(alloc_region);
   // We update the eden sizes here, when the region is retired,
   // instead of when it's allocated, since this is the point that its
@@ -6795,11 +6723,6 @@
   g1mm()->update_eden_size();
 }
 
-HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
-                                                    bool force) {
-  return _g1h->new_mutator_alloc_region(word_size, force);
-}
-
 void G1CollectedHeap::set_par_threads() {
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
@@ -6816,11 +6739,6 @@
   set_par_threads(n_workers);
 }
 
-void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
-                                       size_t allocated_bytes) {
-  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
-}
-
 // Methods for the GC alloc regions
 
 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
@@ -6870,58 +6788,6 @@
   _hr_printer.retire(alloc_region);
 }
 
-HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
-                                                       bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
-}
-
-void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                          size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForSurvived);
-}
-
-HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
-                                                  bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
-}
-
-void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                     size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForTenured);
-}
-
-HeapRegion* OldGCAllocRegion::release() {
-  HeapRegion* cur = get();
-  if (cur != NULL) {
-    // Determine how far we are from the next card boundary. If it is smaller than
-    // the minimum object size we can allocate into, expand into the next card.
-    HeapWord* top = cur->top();
-    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
-
-    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
-
-    if (to_allocate_words != 0) {
-      // We are not at a card boundary. Fill up, possibly into the next, taking the
-      // end of the region and the minimum object size into account.
-      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
-                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
-
-      // Skip allocation if there is not enough space to allocate even the smallest
-      // possible object. In this case this region will not be retained, so the
-      // original problem cannot occur.
-      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
-        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
-        CollectedHeap::fill_with_object(dummy, to_allocate_words);
-      }
-    }
-  }
-  return G1AllocRegion::release();
-}
-
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
@@ -80,12 +82,6 @@
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 
-enum GCAllocPurpose {
-  GCAllocForTenured,
-  GCAllocForSurvived,
-  GCAllocPurposeCount
-};
-
 class YoungList : public CHeapObj<mtGC> {
 private:
   G1CollectedHeap* _g1h;
@@ -158,40 +154,6 @@
   void          print();
 };
 
-class MutatorAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  MutatorAllocRegion()
-    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
-};
-
-class SurvivorGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  SurvivorGCAllocRegion()
-  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
-};
-
-class OldGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  OldGCAllocRegion()
-  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
-
-  // This specialization of release() makes sure that the last card that has been
-  // allocated into has been completely filled by a dummy object.
-  // This avoids races when remembered set scanning wants to update the BOT of the
-  // last card in the retained old gc alloc region, and allocation threads
-  // allocating into that card at the same time.
-  virtual HeapRegion* release();
-};
-
 // The G1 STW is alive closure.
 // An instance is embedded into the G1CH and used as the
 // (optional) _is_alive_non_header closure in the STW
@@ -222,6 +184,9 @@
   friend class MutatorAllocRegion;
   friend class SurvivorGCAllocRegion;
   friend class OldGCAllocRegion;
+  friend class G1Allocator;
+  friend class G1DefaultAllocator;
+  friend class G1ResManAllocator;
 
   // Closures used in implementation.
   template <G1Barrier barrier, G1Mark do_mark_object>
@@ -232,6 +197,8 @@
   friend class G1ParScanClosureSuper;
   friend class G1ParEvacuateFollowersClosure;
   friend class G1ParTask;
+  friend class G1ParGCAllocator;
+  friend class G1DefaultParGCAllocator;
   friend class G1FreeGarbageRegionClosure;
   friend class RefineCardTableEntryClosure;
   friend class G1PrepareCompactClosure;
@@ -293,44 +260,15 @@
   // The sequence of all heap regions in the heap.
   HeapRegionManager _hrm;
 
-  // Alloc region used to satisfy mutator allocation requests.
-  MutatorAllocRegion _mutator_alloc_region;
-
-  // Alloc region used to satisfy allocation requests by the GC for
-  // survivor objects.
-  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+  // Class that handles the different kinds of allocations.
+  G1Allocator* _allocator;
 
   // PLAB sizing policy for survivors.
   PLABStats _survivor_plab_stats;
 
-  // Alloc region used to satisfy allocation requests by the GC for
-  // old objects.
-  OldGCAllocRegion _old_gc_alloc_region;
-
   // PLAB sizing policy for tenured objects.
   PLABStats _old_plab_stats;
 
-  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
-    PLABStats* stats = NULL;
-
-    switch (purpose) {
-    case GCAllocForSurvived:
-      stats = &_survivor_plab_stats;
-      break;
-    case GCAllocForTenured:
-      stats = &_old_plab_stats;
-      break;
-    default:
-      assert(false, "unrecognized GCAllocPurpose");
-    }
-
-    return stats;
-  }
-
-  // The last old region we allocated to during the last GC.
-  // Typically, it is not full so we should re-use it during the next GC.
-  HeapRegion* _retained_old_gc_alloc_region;
-
   // It specifies whether we should attempt to expand the heap after a
   // region allocation failure. If heap expansion fails we set this to
   // false so that we don't re-attempt the heap expansion (it's likely
@@ -348,9 +286,6 @@
   // It initializes the GC alloc regions at the start of a GC.
   void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 
-  // Setup the retained old gc alloc region as the currrent old gc alloc region.
-  void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
-
   // It releases the GC alloc regions at the end of a GC.
   void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 
@@ -361,13 +296,6 @@
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
 
-  // Determines PLAB size for a particular allocation purpose.
-  size_t desired_plab_sz(GCAllocPurpose purpose);
-
-  // Outside of GC pauses, the number of bytes used in all regions other
-  // than the current allocation region.
-  size_t _summary_bytes_used;
-
   // Records whether the region at the given index is kept live by roots or
   // references from the young generation.
   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
@@ -525,11 +453,12 @@
   // humongous region.
   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
                                                       uint num_regions,
-                                                      size_t word_size);
+                                                      size_t word_size,
+                                                      AllocationContext_t context);
 
   // Attempt to allocate a humongous object of the given size. Return
   // NULL if unsuccessful.
-  HeapWord* humongous_obj_allocate(size_t word_size);
+  HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
 
   // The following two methods, allocate_new_tlab() and
   // mem_allocate(), are the two main entry points from the runtime
@@ -585,6 +514,7 @@
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
+                                    AllocationContext_t context,
                                     unsigned int* gc_count_before_ret,
                                     int* gclocker_retry_count_ret);
 
@@ -599,7 +529,8 @@
   // specifies whether the mutator alloc region is expected to be NULL
   // or not.
   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region);
+                                            AllocationContext_t context,
+                                            bool expect_null_mutator_alloc_region);
 
   // It dirties the cards that cover the block so that so that the post
   // write barrier never queues anything when updating objects on this
@@ -611,7 +542,9 @@
   // allocation region, either by picking one or expanding the
   // heap, and then allocate a block of the given size. The block
   // may not be a humongous - it must fit into a single heap region.
-  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
+  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
+                                   size_t word_size,
+                                   AllocationContext_t context);
 
   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
                                     HeapRegion*    alloc_region,
@@ -623,10 +556,12 @@
   void par_allocate_remaining_space(HeapRegion* r);
 
   // Allocation attempt during GC for a survivor object / PLAB.
-  inline HeapWord* survivor_attempt_allocation(size_t word_size);
+  inline HeapWord* survivor_attempt_allocation(size_t word_size,
+                                               AllocationContext_t context);
 
   // Allocation attempt during GC for an old object / PLAB.
-  inline HeapWord* old_attempt_allocation(size_t word_size);
+  inline HeapWord* old_attempt_allocation(size_t word_size,
+                                          AllocationContext_t context);
 
   // These methods are the "callbacks" from the G1AllocRegion class.
 
@@ -665,13 +600,15 @@
   // Callback from VM_G1CollectForAllocation operation.
   // This function does everything necessary/possible to satisfy a
   // failed allocation request (including collection, expansion, etc.)
-  HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
+  HeapWord* satisfy_failed_allocation(size_t word_size,
+                                      AllocationContext_t context,
+                                      bool* succeeded);
 
   // Attempting to expand the heap sufficiently
   // to support an allocation of the given "word_size".  If
   // successful, perform the allocation and return the address of the
   // allocated block, or else "NULL".
-  HeapWord* expand_and_allocate(size_t word_size);
+  HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 
   // Process any reference objects discovered during
   // an incremental evacuation pause.
@@ -694,6 +631,27 @@
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes);
 
+  // Returns the PLAB statistics given a purpose.
+  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
+    PLABStats* stats = NULL;
+
+    switch (purpose) {
+    case GCAllocForSurvived:
+      stats = &_survivor_plab_stats;
+      break;
+    case GCAllocForTenured:
+      stats = &_old_plab_stats;
+      break;
+    default:
+      assert(false, "unrecognized GCAllocPurpose");
+    }
+
+    return stats;
+  }
+
+  // Determines PLAB size for a particular allocation purpose.
+  size_t desired_plab_sz(GCAllocPurpose purpose);
+
   // Do anything common to GC's.
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
@@ -1271,7 +1229,7 @@
   // Determine whether the given region is one that we are using as an
   // old GC alloc region.
   bool is_old_gc_alloc_region(HeapRegion* hr) {
-    return hr == _retained_old_gc_alloc_region;
+    return _allocator->is_retained_old_region(hr);
   }
 
   // Perform a collection of the heap; intended for use in implementing
@@ -1752,28 +1710,4 @@
   size_t _max_heap_capacity;
 };
 
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
-private:
-  bool        _retired;
-
-public:
-  G1ParGCAllocBuffer(size_t gclab_word_size);
-  virtual ~G1ParGCAllocBuffer() {
-    guarantee(_retired, "Allocation buffer has not been retired");
-  }
-
-  virtual void set_buf(HeapWord* buf) {
-    ParGCAllocBuffer::set_buf(buf);
-    _retired = false;
-  }
-
-  virtual void retire(bool end_of_gc, bool retain) {
-    if (_retired) {
-      return;
-    }
-    ParGCAllocBuffer::retire(end_of_gc, retain);
-    _retired = true;
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -98,10 +98,12 @@
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
 
-  HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  AllocationContext_t context = AllocationContext::current();
+  HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                                   false /* bot_updates */);
   if (result == NULL) {
     result = attempt_allocation_slow(word_size,
+                                     context,
                                      gc_count_before_ret,
                                      gclocker_retry_count_ret);
   }
@@ -112,17 +114,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
-                                                              word_size) {
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
+                                                              AllocationContext_t context) {
   assert(!isHumongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                       false /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                      false /* bot_updates */);
   }
   if (result != NULL) {
     dirty_young_block(result, word_size);
@@ -130,16 +132,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
+                                                         AllocationContext_t context) {
   assert(!isHumongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
-                                                       true /* bot_updates */);
+  HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                  true /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                       true /* bot_updates */);
+    result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                 true /* bot_updates */);
   }
   return result;
 }
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -38,11 +38,8 @@
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
-    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
     _age_table(false), _scanner(g1h, rp),
-    _strong_roots_time(0), _term_time(0),
-    _alloc_buffer_waste(0), _undo_waste(0) {
+    _strong_roots_time(0), _term_time(0) {
   _scanner.set_par_scan_thread_state(this);
   // we allocate G1YoungSurvRateNumRegions plus one entries, since
   // we "sacrifice" entry 0 to keep track of surviving bytes for
@@ -60,14 +57,14 @@
   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
 
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+  _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
 
   _start = os::elapsedTime();
 }
 
 G1ParScanThreadState::~G1ParScanThreadState() {
-  retire_alloc_buffers();
+  _g1_par_allocator->retire_alloc_buffers();
+  delete _g1_par_allocator;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
 }
 
@@ -90,14 +87,16 @@
   const double elapsed_ms = elapsed_time() * 1000.0;
   const double s_roots_ms = strong_roots_time() * 1000.0;
   const double term_ms    = term_time() * 1000.0;
+  const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
+  const size_t undo_waste         = _g1_par_allocator->undo_waste();
   st->print_cr("%3d %9.2f %9.2f %6.2f "
                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
-               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
-               alloc_buffer_waste() * HeapWordSize / K,
-               undo_waste() * HeapWordSize / K);
+               (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
+               alloc_buffer_waste * HeapWordSize / K,
+               undo_waste * HeapWordSize / K);
 }
 
 #ifdef ASSERT
@@ -164,12 +163,13 @@
                                            : m->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+  AllocationContext_t context = from_region->allocation_context();
+  HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
 #ifndef PRODUCT
   // Should this evacuation fail?
   if (_g1h->evacuation_should_fail()) {
     if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
       obj_ptr = NULL;
     }
   }
@@ -246,66 +246,8 @@
       obj->oop_iterate_backwards(&_scanner);
     }
   } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
     obj = forward_ptr;
   }
   return obj;
 }
-
-HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-  HeapWord* obj = NULL;
-  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
-  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
-    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
-    if (buf == NULL) {
-      return NULL; // Let caller handle allocation failure.
-    }
-    // Otherwise.
-    alloc_buf->set_word_size(gclab_word_size);
-    alloc_buf->set_buf(buf);
-
-    obj = alloc_buf->allocate(word_sz);
-    assert(obj != NULL, "buffer was definitely big enough...");
-  } else {
-    obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-  }
-  return obj;
-}
-
-void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-  if (alloc_buffer(purpose)->contains(obj)) {
-    assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-           "should contain whole object");
-    alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-  } else {
-    CollectedHeap::fill_with_object(obj, word_sz);
-    add_to_undo_waste(word_sz);
-  }
-}
-
-HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
-  HeapWord* obj = NULL;
-  if (purpose == GCAllocForSurvived) {
-    obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
-  } else {
-    obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
-  }
-  if (obj != NULL) {
-    return obj;
-  }
-  return allocate_slow(purpose, word_sz);
-}
-
-void G1ParScanThreadState::retire_alloc_buffers() {
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    size_t waste = _alloc_buffers[ap]->words_remaining();
-    add_to_alloc_buffer_waste(waste);
-    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                               true /* end_of_gc */,
-                                               false /* retain */);
-  }
-}
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -46,9 +46,8 @@
   G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  G1ParGCAllocator*   _g1_par_allocator;
+
   ageTable            _age_table;
 
   G1ParScanClosure    _scanner;
@@ -78,7 +77,6 @@
 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 
   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
@@ -104,13 +102,6 @@
 
   ageTable*         age_table()       { return &_age_table;       }
 
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return _alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
-  size_t undo_waste() const                      { return _undo_waste; }
-
 #ifdef ASSERT
   bool queue_is_empty() const { return _refs->is_empty(); }
 
@@ -126,12 +117,6 @@
 
   template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
 
- private:
-
-  inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
-  inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
-  inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
-
  public:
 
   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
@@ -177,8 +162,6 @@
   }
 
  private:
-  void retire_alloc_buffers();
-
   #define G1_PARTIAL_ARRAY_MASK 0x2
 
   inline bool has_partial_array_mask(oop* ref) const {
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -219,6 +219,7 @@
 
   _in_collection_set = false;
 
+  set_allocation_context(AllocationContext::system());
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
   set_young_type(NotYoung);
@@ -346,9 +347,9 @@
 
 HeapRegion::HeapRegion(uint hrm_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
-                       MemRegion mr) :
+                       MemRegion mr, AllocationContext_t context) :
     G1OffsetTableContigSpace(sharedOffsetArray, mr),
-    _hrm_index(hrm_index),
+    _hrm_index(hrm_index), _allocation_context(context),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
@@ -715,6 +716,8 @@
 
 void HeapRegion::print() const { print_on(gclog_or_tty); }
 void HeapRegion::print_on(outputStream* st) const {
+  st->print("AC%4u", allocation_context());
+
   if (isHumongous()) {
     if (startsHumongous())
       st->print(" HS");
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
 #include "gc_implementation/g1/survRateGroup.hpp"
@@ -236,6 +237,8 @@
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
 
+  AllocationContext_t _allocation_context;
+
   HumongousType _humongous_type;
   // For a humongous region, region in which it starts.
   HeapRegion* _humongous_start_region;
@@ -332,7 +335,8 @@
  public:
   HeapRegion(uint hrm_index,
              G1BlockOffsetSharedArray* sharedOffsetArray,
-             MemRegion mr);
+             MemRegion mr,
+             AllocationContext_t context = AllocationContext::system());
 
   // Initializing the HeapRegion not only resets the data structure, but also
   // resets the BOT for that heap region.
@@ -527,6 +531,14 @@
     _next_in_special_set = r;
   }
 
+  void set_allocation_context(AllocationContext_t context) {
+    _allocation_context = context;
+  }
+
+  AllocationContext_t  allocation_context() const {
+    return _allocation_context;
+  }
+
   // Methods used by the HeapRegionSetBase class and subclasses.
 
   // Getter and setter for the next and prev fields used to link regions into
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -45,11 +45,13 @@
   nonstatic_field(HeapRegionManager, _regions,          G1HeapRegionTable)    \
   nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
+  nonstatic_field(G1Allocator,     _summary_bytes_used, size_t)               \
+                                                                              \
   nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
-  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _humongous_set,      HeapRegionSetBase)    \
+  nonstatic_field(G1CollectedHeap, _allocator,          G1Allocator*)         \
                                                                               \
   nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
@@ -72,14 +74,16 @@
                                                                               \
   declare_type(G1OffsetTableContigSpace, CompactibleSpace)                    \
   declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
-  declare_toplevel_type(HeapRegionManager)                                        \
+  declare_toplevel_type(HeapRegionManager)                                    \
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(HeapRegionSetCount)                                   \
   declare_toplevel_type(G1MonitoringSupport)                                  \
+  declare_toplevel_type(G1Allocator)                                          \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
   declare_toplevel_type(HeapRegion*)                                          \
   declare_toplevel_type(G1MonitoringSupport*)                                 \
+  declare_toplevel_type(G1Allocator*)                                         \
 
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Sep 05 09:49:19 2014 +0200
@@ -45,7 +45,8 @@
 void VM_G1CollectForAllocation::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
-  _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
+
+  _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
   assert(_result == NULL || _pause_succeeded,
          "if we get back a result, the pause should have succeeded");
 }
@@ -99,7 +100,7 @@
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                      false /* expect_null_cur_alloc_region */);
     if (_result != NULL) {
       // If we can successfully allocate before we actually do the
@@ -152,7 +153,7 @@
     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   if (_pause_succeeded && _word_size > 0) {
     // An allocation had been requested.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                       true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 
 // VM_operations for the G1 collector.
@@ -40,6 +41,7 @@
   size_t    _word_size;
   HeapWord* _result;
   bool      _pause_succeeded;
+  AllocationContext_t _allocation_context;
 
 public:
   VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
@@ -49,6 +51,8 @@
       _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
   HeapWord* result() { return _result; }
   bool pause_succeeded() { return _pause_succeeded; }
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
 };
 
 class VM_G1CollectFull: public VM_GC_Operation {
--- a/src/share/vm/runtime/vm_operations.hpp	Thu Sep 04 16:53:27 2014 -0700
+++ b/src/share/vm/runtime/vm_operations.hpp	Fri Sep 05 09:49:19 2014 +0200
@@ -68,6 +68,7 @@
   template(G1CollectFull)                         \
   template(G1CollectForAllocation)                \
   template(G1IncCollectionPause)                  \
+  template(DestroyAllocationContext)              \
   template(EnableBiasedLocking)                   \
   template(RevokeBias)                            \
   template(BulkRevokeBias)                        \