changeset 4079:aaf61e68b255

6818524: G1: use ergonomic resizing of PLABs Summary: Employ PLABStats instances to record information about survivor and old PLABs, and use the recorded stats to adjust the sizes of survivor and old PLABS. Reviewed-by: johnc, ysr Contributed-by: Brandon Mitchell <brandon@twitter.com>
author johnc
date Mon, 06 Aug 2012 12:20:14 -0700
parents ef437ea56651
children eff5d59db7e1
files src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp src/share/vm/gc_implementation/parNew/parNewGeneration.cpp src/share/vm/gc_implementation/parNew/parNewGeneration.hpp src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp src/share/vm/memory/tenuredGeneration.cpp src/share/vm/precompiled/precompiled.hpp
diffstat 10 files changed, 648 insertions(+), 612 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Aug 06 12:20:14 2012 -0700
@@ -1891,6 +1891,8 @@
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
   _retained_old_gc_alloc_region(NULL),
+  _survivor_plab_stats(YoungPLABSize, PLABWeight),
+  _old_plab_stats(OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
   _surviving_young_words(NULL),
   _old_marking_cycles_started(0),
@@ -4099,17 +4101,22 @@
   size_t gclab_word_size;
   switch (purpose) {
     case GCAllocForSurvived:
-      gclab_word_size = YoungPLABSize;
+      gclab_word_size = _survivor_plab_stats.desired_plab_sz();
       break;
     case GCAllocForTenured:
-      gclab_word_size = OldPLABSize;
+      gclab_word_size = _old_plab_stats.desired_plab_sz();
       break;
     default:
       assert(false, "unknown GCAllocPurpose");
-      gclab_word_size = OldPLABSize;
+      gclab_word_size = _old_plab_stats.desired_plab_sz();
       break;
   }
-  return gclab_word_size;
+
+  // Prevent humongous PLAB sizes for two reasons:
+  // * PLABs are allocated using a similar paths as oops, but should
+  //   never be in a humongous region
+  // * Allowing humongous PLABs needlessly churns the region free lists
+  return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 }
 
 void G1CollectedHeap::init_mutator_alloc_region() {
@@ -4165,6 +4172,11 @@
   // want either way so no reason to check explicitly for either
   // condition.
   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
+
+  if (ResizePLAB) {
+    _survivor_plab_stats.adjust_desired_plab_sz();
+    _old_plab_stats.adjust_desired_plab_sz();
+  }
 }
 
 void G1CollectedHeap::abandon_gc_alloc_regions() {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Aug 06 12:20:14 2012 -0700
@@ -33,7 +33,7 @@
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"
-#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "memory/barrierSet.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/sharedHeap.hpp"
@@ -278,10 +278,33 @@
   // survivor objects.
   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 
+  // PLAB sizing policy for survivors.
+  PLABStats _survivor_plab_stats;
+
   // Alloc region used to satisfy allocation requests by the GC for
   // old objects.
   OldGCAllocRegion _old_gc_alloc_region;
 
+  // PLAB sizing policy for tenured objects.
+  PLABStats _old_plab_stats;
+
+  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
+    PLABStats* stats = NULL;
+
+    switch (purpose) {
+    case GCAllocForSurvived:
+      stats = &_survivor_plab_stats;
+      break;
+    case GCAllocForTenured:
+      stats = &_old_plab_stats;
+      break;
+    default:
+      assert(false, "unrecognized GCAllocPurpose");
+    }
+
+    return stats;
+  }
+
   // The last old region we allocated to during the last GC.
   // Typically, it is not full so we should re-use it during the next GC.
   HeapRegion* _retained_old_gc_alloc_region;
@@ -314,7 +337,7 @@
   G1MonitoringSupport* _g1mm;
 
   // Determines PLAB size for a particular allocation purpose.
-  static size_t desired_plab_sz(GCAllocPurpose purpose);
+  size_t desired_plab_sz(GCAllocPurpose purpose);
 
   // Outside of GC pauses, the number of bytes used in all regions other
   // than the current allocation region.
@@ -1811,19 +1834,19 @@
   }
 
   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-
     HeapWord* obj = NULL;
     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      assert(gclab_word_size == alloc_buf->word_sz(),
-             "dynamic resizing is not supported");
       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false, false);
+      alloc_buf->flush_stats_and_retire(_g1h->stats_for_purpose(purpose),
+                                        false /* end_of_gc */,
+                                        false /* retain */);
 
       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
       if (buf == NULL) return NULL; // Let caller handle allocation failure.
       // Otherwise.
+      alloc_buf->set_word_size(gclab_word_size);
       alloc_buf->set_buf(buf);
 
       obj = alloc_buf->allocate(word_sz);
@@ -1908,7 +1931,9 @@
     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
       size_t waste = _alloc_buffers[ap]->words_remaining();
       add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap]->retire(true, false);
+      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                                 true /* end_of_gc */,
+                                                 false /* retain */);
     }
   }
 
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp	Fri Aug 03 13:24:02 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,344 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
-#include "memory/sharedHeap.hpp"
-#include "oops/arrayOop.hpp"
-#include "oops/oop.inline.hpp"
-
-ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
-  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
-  _end(NULL), _hard_end(NULL),
-  _retained(false), _retained_filler(),
-  _allocated(0), _wasted(0)
-{
-  assert (min_size() > AlignmentReserve, "Inconsistency!");
-  // arrayOopDesc::header_size depends on command line initialization.
-  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
-  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
-}
-
-size_t ParGCAllocBuffer::FillerHeaderSize;
-
-// If the minimum object size is greater than MinObjAlignment, we can
-// end up with a shard at the end of the buffer that's smaller than
-// the smallest object.  We can't allow that because the buffer must
-// look like it's full of objects when we retire it, so we make
-// sure we have enough space for a filler int array object.
-size_t ParGCAllocBuffer::AlignmentReserve;
-
-void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // If the buffer had been retained shorten the previous filler object.
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    CollectedHeap::fill_with_object(_retained_filler);
-    // Wasted space book-keeping, otherwise (normally) done in invalidate()
-    _wasted += _retained_filler.word_size();
-    _retained = false;
-  }
-  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
-  if (_top < _hard_end) {
-    CollectedHeap::fill_with_object(_top, _hard_end);
-    if (!retain) {
-      invalidate();
-    } else {
-      // Is there wasted space we'd like to retain for the next GC?
-      if (pointer_delta(_end, _top) > FillerHeaderSize) {
-        _retained = true;
-        _retained_filler = MemRegion(_top, FillerHeaderSize);
-        _top = _top + FillerHeaderSize;
-      } else {
-        invalidate();
-      }
-    }
-  }
-}
-
-void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
-  assert(ResizePLAB, "Wasted work");
-  stats->add_allocated(_allocated);
-  stats->add_wasted(_wasted);
-  stats->add_unused(pointer_delta(_end, _top));
-}
-
-// Compute desired plab size and latch result for later
-// use. This should be called once at the end of parallel
-// scavenge; it clears the sensor accumulators.
-void PLABStats::adjust_desired_plab_sz() {
-  assert(ResizePLAB, "Not set");
-  if (_allocated == 0) {
-    assert(_unused == 0, "Inconsistency in PLAB stats");
-    _allocated = 1;
-  }
-  double wasted_frac    = (double)_unused/(double)_allocated;
-  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
-                                   TargetPLABWastePct);
-  if (target_refills == 0) {
-    target_refills = 1;
-  }
-  _used = _allocated - _wasted - _unused;
-  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
-  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
-  // Take historical weighted average
-  _filter.sample(plab_sz);
-  // Clip from above and below, and align to object boundary
-  plab_sz = MAX2(min_size(), (size_t)_filter.average());
-  plab_sz = MIN2(max_size(), plab_sz);
-  plab_sz = align_object_size(plab_sz);
-  // Latch the result
-  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
-  if (ResizePLAB) {
-    _desired_plab_sz = plab_sz;
-  }
-  // Now clear the accumulators for next round:
-  // note this needs to be fixed in the case where we
-  // are retaining across scavenges. FIX ME !!! XXX
-  _allocated = 0;
-  _wasted    = 0;
-  _unused    = 0;
-}
-
-#ifndef PRODUCT
-void ParGCAllocBuffer::print() {
-  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
-             "_retained: %c _retained_filler: [%p,%p)\n",
-             _bottom, _top, _end, _hard_end,
-             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
-}
-#endif // !PRODUCT
-
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
-     ((size_t)Generation::GenGrain)/HeapWordSize);
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
-     (size_t)Generation::GenGrain);
-
-ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
-                                                 BlockOffsetSharedArray* bsa) :
-  ParGCAllocBuffer(word_sz),
-  _bsa(bsa),
-  _bt(bsa, MemRegion(_bottom, _hard_end)),
-  _true_end(_hard_end)
-{}
-
-// The buffer comes with its own BOT, with a shared (obviously) underlying
-// BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on accasion we
-// need to do some buffer surgery at the extremities before we
-// start using the body of the buffer for allocations. Such surgery
-// (as explained elsewhere) is to prevent allocation on a card that
-// is in the process of being walked concurrently by another GC thread.
-// When such surgery happens at a point that is far removed (to the
-// right of the current allocation point, top), we use the "contig"
-// parameter below to directly manipulate the shared array without
-// modifying the _next_threshold state in the BOT.
-void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
-                                                     bool contig) {
-  CollectedHeap::fill_with_object(mr);
-  if (contig) {
-    _bt.alloc_block(mr.start(), mr.end());
-  } else {
-    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
-  }
-}
-
-HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
-  HeapWord* res = NULL;
-  if (_true_end > _hard_end) {
-    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
-                                      ChunkSizeInBytes) == _hard_end,
-           "or else _true_end should be equal to _hard_end");
-    assert(_retained, "or else _true_end should be equal to _hard_end");
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    CollectedHeap::fill_with_object(_retained_filler);
-    if (_top < _hard_end) {
-      fill_region_with_block(MemRegion(_top, _hard_end), true);
-    }
-    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
-    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
-    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
-    _top      = _retained_filler.end();
-    _hard_end = next_hard_end;
-    _end      = _hard_end - AlignmentReserve;
-    res       = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    }
-  }
-  return res;
-}
-
-void
-ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
-  ParGCAllocBuffer::undo_allocation(obj, word_sz);
-  // This may back us up beyond the previous threshold, so reset.
-  _bt.set_region(MemRegion(_top, _hard_end));
-  _bt.initialize_threshold();
-}
-
-void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // We're about to make the retained_filler into a block.
-    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
-                                      _retained_filler.end());
-  }
-  // Reset _hard_end to _true_end (and update _end)
-  if (retain && _hard_end != NULL) {
-    assert(_hard_end <= _true_end, "Invariant.");
-    _hard_end = _true_end;
-    _end      = MAX2(_top, _hard_end - AlignmentReserve);
-    assert(_end <= _hard_end, "Invariant.");
-  }
-  _true_end = _hard_end;
-  HeapWord* pre_top = _top;
-
-  ParGCAllocBuffer::retire(end_of_gc, retain);
-  // Now any old _retained_filler is cut back to size, the free part is
-  // filled with a filler object, and top is past the header of that
-  // object.
-
-  if (retain && _top < _end) {
-    assert(end_of_gc && retain, "Or else retain should be false.");
-    // If the lab does not start on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the first card with a garbage object.
-    size_t first_card_index = _bsa->index_for(pre_top);
-    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
-    if (first_card_start < pre_top) {
-      HeapWord* second_card_start =
-        _bsa->inc_by_region_size(first_card_start);
-
-      // Ensure enough room to fill with the smallest block
-      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
-
-      // If the end is already in the first card, don't go beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_hard_end < second_card_start ||
-          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
-        second_card_start = _hard_end;
-      }
-      if (pre_top < second_card_start) {
-        MemRegion first_card_suffix(pre_top, second_card_start);
-        fill_region_with_block(first_card_suffix, true);
-      }
-      pre_top = second_card_start;
-      _top = pre_top;
-      _end = MAX2(_top, _hard_end - AlignmentReserve);
-    }
-
-    // If the lab does not end on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the last card with a garbage object.
-    size_t last_card_index = _bsa->index_for(_hard_end);
-    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
-    if (last_card_start < _hard_end) {
-
-      // Ensure enough room to fill with the smallest block
-      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
-
-      // If the top is already in the last card, don't go back beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_top > last_card_start ||
-          pointer_delta(last_card_start, _top) < AlignmentReserve) {
-        last_card_start = _top;
-      }
-      if (last_card_start < _hard_end) {
-        MemRegion last_card_prefix(last_card_start, _hard_end);
-        fill_region_with_block(last_card_prefix, false);
-      }
-      _hard_end = last_card_start;
-      _end      = MAX2(_top, _hard_end - AlignmentReserve);
-      _true_end = _hard_end;
-      assert(_end <= _hard_end, "Invariant.");
-    }
-
-    // At this point:
-    //   1) we had a filler object from the original top to hard_end.
-    //   2) We've filled in any partial cards at the front and back.
-    if (pre_top < _hard_end) {
-      // Now we can reset the _bt to do allocation in the given area.
-      MemRegion new_filler(pre_top, _hard_end);
-      fill_region_with_block(new_filler, false);
-      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
-      // If there's no space left, don't retain.
-      if (_top >= _end) {
-        _retained = false;
-        invalidate();
-        return;
-      }
-      _retained_filler = MemRegion(pre_top, _top);
-      _bt.set_region(MemRegion(_top, _hard_end));
-      _bt.initialize_threshold();
-      assert(_bt.threshold() > _top, "initialize_threshold failed!");
-
-      // There may be other reasons for queries into the middle of the
-      // filler object.  When such queries are done in parallel with
-      // allocation, bad things can happen, if the query involves object
-      // iteration.  So we ensure that such queries do not involve object
-      // iteration, by putting another filler object on the boundaries of
-      // such queries.  One such is the object spanning a parallel card
-      // chunk boundary.
-
-      // "chunk_boundary" is the address of the first chunk boundary less
-      // than "hard_end".
-      HeapWord* chunk_boundary =
-        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
-      assert(chunk_boundary < _hard_end, "Or else above did not work.");
-      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
-             "Consequence of last card handling above.");
-
-      if (_top <= chunk_boundary) {
-        assert(_true_end == _hard_end, "Invariant.");
-        while (_top <= chunk_boundary) {
-          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
-                 "Consequence of last card handling above.");
-          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
-          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
-          _hard_end = chunk_boundary;
-          chunk_boundary -= ChunkSizeInWords;
-        }
-        _end = _hard_end - AlignmentReserve;
-        assert(_top <= _end, "Invariant.");
-        // Now reset the initial filler chunk so it doesn't overlap with
-        // the one(s) inserted above.
-        MemRegion new_filler(pre_top, _hard_end);
-        fill_region_with_block(new_filler, false);
-      }
-    } else {
-      _retained = false;
-      invalidate();
-    }
-  } else {
-    assert(!end_of_gc ||
-           (!_retained && _true_end == _hard_end), "Checking.");
-  }
-  assert(_end <= _hard_end, "Invariant.");
-  assert(_top < _end || _top == _hard_end, "Invariant");
-}
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp	Fri Aug 03 13:24:02 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,249 +0,0 @@
-/*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-
-#include "memory/allocation.hpp"
-#include "memory/blockOffsetTable.hpp"
-#include "memory/threadLocalAllocBuffer.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// Forward decl.
-
-class PLABStats;
-
-// A per-thread allocation buffer used during GC.
-class ParGCAllocBuffer: public CHeapObj<mtGC> {
-protected:
-  char head[32];
-  size_t _word_sz;          // in HeapWord units
-  HeapWord* _bottom;
-  HeapWord* _top;
-  HeapWord* _end;       // last allocatable address + 1
-  HeapWord* _hard_end;  // _end + AlignmentReserve
-  bool      _retained;  // whether we hold a _retained_filler
-  MemRegion _retained_filler;
-  // In support of ergonomic sizing of PLAB's
-  size_t    _allocated;     // in HeapWord units
-  size_t    _wasted;        // in HeapWord units
-  char tail[32];
-  static size_t FillerHeaderSize;
-  static size_t AlignmentReserve;
-
-public:
-  // Initializes the buffer to be empty, but with the given "word_sz".
-  // Must get initialized with "set_buf" for an allocation to succeed.
-  ParGCAllocBuffer(size_t word_sz);
-
-  static const size_t min_size() {
-    return ThreadLocalAllocBuffer::min_size();
-  }
-
-  static const size_t max_size() {
-    return ThreadLocalAllocBuffer::max_size();
-  }
-
-  // If an allocation of the given "word_sz" can be satisfied within the
-  // buffer, do the allocation, returning a pointer to the start of the
-  // allocated block.  If the allocation request cannot be satisfied,
-  // return NULL.
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* res = _top;
-    if (pointer_delta(_end, _top) >= word_sz) {
-      _top = _top + word_sz;
-      return res;
-    } else {
-      return NULL;
-    }
-  }
-
-  // Undo the last allocation in the buffer, which is required to be of the
-  // "obj" of the given "word_sz".
-  void undo_allocation(HeapWord* obj, size_t word_sz) {
-    assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
-    assert(pointer_delta(_top, obj)     == word_sz, "Bad undo");
-    _top = obj;
-  }
-
-  // The total (word) size of the buffer, including both allocated and
-  // unallocted space.
-  size_t word_sz() { return _word_sz; }
-
-  // Should only be done if we are about to reset with a new buffer of the
-  // given size.
-  void set_word_size(size_t new_word_sz) {
-    assert(new_word_sz > AlignmentReserve, "Too small");
-    _word_sz = new_word_sz;
-  }
-
-  // The number of words of unallocated space remaining in the buffer.
-  size_t words_remaining() {
-    assert(_end >= _top, "Negative buffer");
-    return pointer_delta(_end, _top, HeapWordSize);
-  }
-
-  bool contains(void* addr) {
-    return (void*)_bottom <= addr && addr < (void*)_hard_end;
-  }
-
-  // Sets the space of the buffer to be [buf, space+word_sz()).
-  void set_buf(HeapWord* buf) {
-    _bottom   = buf;
-    _top      = _bottom;
-    _hard_end = _bottom + word_sz();
-    _end      = _hard_end - AlignmentReserve;
-    assert(_end >= _top, "Negative buffer");
-    // In support of ergonomic sizing
-    _allocated += word_sz();
-  }
-
-  // Flush the stats supporting ergonomic sizing of PLAB's
-  void flush_stats(PLABStats* stats);
-  void flush_stats_and_retire(PLABStats* stats, bool retain) {
-    // We flush the stats first in order to get a reading of
-    // unused space in the last buffer.
-    if (ResizePLAB) {
-      flush_stats(stats);
-    }
-    // Retire the last allocation buffer.
-    retire(true, retain);
-  }
-
-  // Force future allocations to fail and queries for contains()
-  // to return false
-  void invalidate() {
-    assert(!_retained, "Shouldn't retain an invalidated buffer.");
-    _end    = _hard_end;
-    _wasted += pointer_delta(_end, _top);  // unused  space
-    _top    = _end;      // force future allocations to fail
-    _bottom = _end;      // force future contains() queries to return false
-  }
-
-  // Fills in the unallocated portion of the buffer with a garbage object.
-  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
-  // is true, attempt to re-use the unused portion in the next GC.
-  void retire(bool end_of_gc, bool retain);
-
-  void print() PRODUCT_RETURN;
-};
-
-// PLAB stats book-keeping
-class PLABStats VALUE_OBJ_CLASS_SPEC {
-  size_t _allocated;      // total allocated
-  size_t _wasted;         // of which wasted (internal fragmentation)
-  size_t _unused;         // Unused in last buffer
-  size_t _used;           // derived = allocated - wasted - unused
-  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
-  AdaptiveWeightedAverage
-         _filter;         // integrator with decay
-
- public:
-  PLABStats(size_t desired_plab_sz_, unsigned wt) :
-    _allocated(0),
-    _wasted(0),
-    _unused(0),
-    _used(0),
-    _desired_plab_sz(desired_plab_sz_),
-    _filter(wt)
-  {
-    size_t min_sz = min_size();
-    size_t max_sz = max_size();
-    size_t aligned_min_sz = align_object_size(min_sz);
-    size_t aligned_max_sz = align_object_size(max_sz);
-    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
-           min_sz <= max_sz,
-           "PLAB clipping computation in adjust_desired_plab_sz()"
-           " may be incorrect");
-  }
-
-  static const size_t min_size() {
-    return ParGCAllocBuffer::min_size();
-  }
-
-  static const size_t max_size() {
-    return ParGCAllocBuffer::max_size();
-  }
-
-  size_t desired_plab_sz() {
-    return _desired_plab_sz;
-  }
-
-  void adjust_desired_plab_sz(); // filter computation, latches output to
-                                 // _desired_plab_sz, clears sensor accumulators
-
-  void add_allocated(size_t v) {
-    Atomic::add_ptr(v, &_allocated);
-  }
-
-  void add_unused(size_t v) {
-    Atomic::add_ptr(v, &_unused);
-  }
-
-  void add_wasted(size_t v) {
-    Atomic::add_ptr(v, &_wasted);
-  }
-};
-
-class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
-  BlockOffsetArrayContigSpace _bt;
-  BlockOffsetSharedArray*     _bsa;
-  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
-
-  static const size_t ChunkSizeInWords;
-  static const size_t ChunkSizeInBytes;
-  HeapWord* allocate_slow(size_t word_sz);
-
-  void fill_region_with_block(MemRegion mr, bool contig);
-
-public:
-  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
-
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    } else {
-      res = allocate_slow(word_sz);
-    }
-    return res;
-  }
-
-  void undo_allocation(HeapWord* obj, size_t word_sz);
-
-  void set_buf(HeapWord* buf_start) {
-    ParGCAllocBuffer::set_buf(buf_start);
-    _true_end = _hard_end;
-    _bt.set_region(MemRegion(buf_start, word_sz()));
-    _bt.initialize_threshold();
-  }
-
-  void retire(bool end_of_gc, bool retain);
-
-  MemRegion range() {
-    return MemRegion(_top, _true_end);
-  }
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Aug 06 12:20:14 2012 -0700
@@ -24,11 +24,11 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
-#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -453,7 +453,8 @@
     // retire the last buffer.
     par_scan_state.to_space_alloc_buffer()->
       flush_stats_and_retire(_gen.plab_stats(),
-                             false /* !retain */);
+                             true /* end_of_gc */,
+                             false /* retain */);
 
     // Every thread has its own age table.  We need to merge
     // them all into one.
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Aug 06 12:20:14 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
 
-#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "memory/defNewGeneration.hpp"
 #include "utilities/taskqueue.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Mon Aug 06 12:20:14 2012 -0700
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "memory/sharedHeap.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/oop.inline.hpp"
+
+ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
+  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
+  _end(NULL), _hard_end(NULL),
+  _retained(false), _retained_filler(),
+  _allocated(0), _wasted(0)
+{
+  assert (min_size() > AlignmentReserve, "Inconsistency!");
+  // arrayOopDesc::header_size depends on command line initialization.
+  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
+  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
+}
+
+size_t ParGCAllocBuffer::FillerHeaderSize;
+
+// If the minimum object size is greater than MinObjAlignment, we can
+// end up with a shard at the end of the buffer that's smaller than
+// the smallest object.  We can't allow that because the buffer must
+// look like it's full of objects when we retire it, so we make
+// sure we have enough space for a filler int array object.
+size_t ParGCAllocBuffer::AlignmentReserve;
+
+void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
+  assert(!retain || end_of_gc, "Can only retain at GC end.");
+  if (_retained) {
+    // If the buffer had been retained shorten the previous filler object.
+    assert(_retained_filler.end() <= _top, "INVARIANT");
+    CollectedHeap::fill_with_object(_retained_filler);
+    // Wasted space book-keeping, otherwise (normally) done in invalidate()
+    _wasted += _retained_filler.word_size();
+    _retained = false;
+  }
+  assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
+  if (_top < _hard_end) {
+    CollectedHeap::fill_with_object(_top, _hard_end);
+    if (!retain) {
+      invalidate();
+    } else {
+      // Is there wasted space we'd like to retain for the next GC?
+      if (pointer_delta(_end, _top) > FillerHeaderSize) {
+        _retained = true;
+        _retained_filler = MemRegion(_top, FillerHeaderSize);
+        _top = _top + FillerHeaderSize;
+      } else {
+        invalidate();
+      }
+    }
+  }
+}
+
+void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
+  assert(ResizePLAB, "Wasted work");
+  stats->add_allocated(_allocated);
+  stats->add_wasted(_wasted);
+  stats->add_unused(pointer_delta(_end, _top));
+}
+
+// Compute desired plab size and latch result for later
+// use. This should be called once at the end of parallel
+// scavenge; it clears the sensor accumulators.
+void PLABStats::adjust_desired_plab_sz() {
+  assert(ResizePLAB, "Not set");
+  if (_allocated == 0) {
+    assert(_unused == 0, "Inconsistency in PLAB stats");
+    _allocated = 1;
+  }
+  double wasted_frac    = (double)_unused/(double)_allocated;
+  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
+                                   TargetPLABWastePct);
+  if (target_refills == 0) {
+    target_refills = 1;
+  }
+  _used = _allocated - _wasted - _unused;
+  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
+  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
+  // Take historical weighted average
+  _filter.sample(plab_sz);
+  // Clip from above and below, and align to object boundary
+  plab_sz = MAX2(min_size(), (size_t)_filter.average());
+  plab_sz = MIN2(max_size(), plab_sz);
+  plab_sz = align_object_size(plab_sz);
+  // Latch the result
+  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
+  _desired_plab_sz = plab_sz;
+  // Now clear the accumulators for next round:
+  // note this needs to be fixed in the case where we
+  // are retaining across scavenges. FIX ME !!! XXX
+  _allocated = 0;
+  _wasted    = 0;
+  _unused    = 0;
+}
+
+#ifndef PRODUCT
+void ParGCAllocBuffer::print() {
+  gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
+             "_retained: %c _retained_filler: [%p,%p)\n",
+             _bottom, _top, _end, _hard_end,
+             "FT"[_retained], _retained_filler.start(), _retained_filler.end());
+}
+#endif // !PRODUCT
+
+const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
+MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
+     ((size_t)Generation::GenGrain)/HeapWordSize);
+const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
+MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
+     (size_t)Generation::GenGrain);
+
+ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
+                                                 BlockOffsetSharedArray* bsa) :
+  ParGCAllocBuffer(word_sz),
+  _bsa(bsa),
+  _bt(bsa, MemRegion(_bottom, _hard_end)),
+  _true_end(_hard_end)
+{}
+
+// The buffer comes with its own BOT, with a shared (obviously) underlying
+// BlockOffsetSharedArray. We manipulate this BOT in the normal way
+// as we would for any contiguous space. However, on accasion we
+// need to do some buffer surgery at the extremities before we
+// start using the body of the buffer for allocations. Such surgery
+// (as explained elsewhere) is to prevent allocation on a card that
+// is in the process of being walked concurrently by another GC thread.
+// When such surgery happens at a point that is far removed (to the
+// right of the current allocation point, top), we use the "contig"
+// parameter below to directly manipulate the shared array without
+// modifying the _next_threshold state in the BOT.
+void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
+                                                     bool contig) {
+  CollectedHeap::fill_with_object(mr);
+  if (contig) {
+    _bt.alloc_block(mr.start(), mr.end());
+  } else {
+    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
+  }
+}
+
+HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
+  HeapWord* res = NULL;
+  if (_true_end > _hard_end) {
+    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
+                                      ChunkSizeInBytes) == _hard_end,
+           "or else _true_end should be equal to _hard_end");
+    assert(_retained, "or else _true_end should be equal to _hard_end");
+    assert(_retained_filler.end() <= _top, "INVARIANT");
+    CollectedHeap::fill_with_object(_retained_filler);
+    if (_top < _hard_end) {
+      fill_region_with_block(MemRegion(_top, _hard_end), true);
+    }
+    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
+    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
+    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
+    _top      = _retained_filler.end();
+    _hard_end = next_hard_end;
+    _end      = _hard_end - AlignmentReserve;
+    res       = ParGCAllocBuffer::allocate(word_sz);
+    if (res != NULL) {
+      _bt.alloc_block(res, word_sz);
+    }
+  }
+  return res;
+}
+
+void
+ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
+  ParGCAllocBuffer::undo_allocation(obj, word_sz);
+  // This may back us up beyond the previous threshold, so reset.
+  _bt.set_region(MemRegion(_top, _hard_end));
+  _bt.initialize_threshold();
+}
+
+void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
+  assert(!retain || end_of_gc, "Can only retain at GC end.");
+  if (_retained) {
+    // We're about to make the retained_filler into a block.
+    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
+                                      _retained_filler.end());
+  }
+  // Reset _hard_end to _true_end (and update _end)
+  if (retain && _hard_end != NULL) {
+    assert(_hard_end <= _true_end, "Invariant.");
+    _hard_end = _true_end;
+    _end      = MAX2(_top, _hard_end - AlignmentReserve);
+    assert(_end <= _hard_end, "Invariant.");
+  }
+  _true_end = _hard_end;
+  HeapWord* pre_top = _top;
+
+  ParGCAllocBuffer::retire(end_of_gc, retain);
+  // Now any old _retained_filler is cut back to size, the free part is
+  // filled with a filler object, and top is past the header of that
+  // object.
+
+  if (retain && _top < _end) {
+    assert(end_of_gc && retain, "Or else retain should be false.");
+    // If the lab does not start on a card boundary, we don't want to
+    // allocate onto that card, since that might lead to concurrent
+    // allocation and card scanning, which we don't support.  So we fill
+    // the first card with a garbage object.
+    size_t first_card_index = _bsa->index_for(pre_top);
+    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
+    if (first_card_start < pre_top) {
+      HeapWord* second_card_start =
+        _bsa->inc_by_region_size(first_card_start);
+
+      // Ensure enough room to fill with the smallest block
+      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
+
+      // If the end is already in the first card, don't go beyond it!
+      // Or if the remainder is too small for a filler object, gobble it up.
+      if (_hard_end < second_card_start ||
+          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
+        second_card_start = _hard_end;
+      }
+      if (pre_top < second_card_start) {
+        MemRegion first_card_suffix(pre_top, second_card_start);
+        fill_region_with_block(first_card_suffix, true);
+      }
+      pre_top = second_card_start;
+      _top = pre_top;
+      _end = MAX2(_top, _hard_end - AlignmentReserve);
+    }
+
+    // If the lab does not end on a card boundary, we don't want to
+    // allocate onto that card, since that might lead to concurrent
+    // allocation and card scanning, which we don't support.  So we fill
+    // the last card with a garbage object.
+    size_t last_card_index = _bsa->index_for(_hard_end);
+    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
+    if (last_card_start < _hard_end) {
+
+      // Ensure enough room to fill with the smallest block
+      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
+
+      // If the top is already in the last card, don't go back beyond it!
+      // Or if the remainder is too small for a filler object, gobble it up.
+      if (_top > last_card_start ||
+          pointer_delta(last_card_start, _top) < AlignmentReserve) {
+        last_card_start = _top;
+      }
+      if (last_card_start < _hard_end) {
+        MemRegion last_card_prefix(last_card_start, _hard_end);
+        fill_region_with_block(last_card_prefix, false);
+      }
+      _hard_end = last_card_start;
+      _end      = MAX2(_top, _hard_end - AlignmentReserve);
+      _true_end = _hard_end;
+      assert(_end <= _hard_end, "Invariant.");
+    }
+
+    // At this point:
+    //   1) we had a filler object from the original top to hard_end.
+    //   2) We've filled in any partial cards at the front and back.
+    if (pre_top < _hard_end) {
+      // Now we can reset the _bt to do allocation in the given area.
+      MemRegion new_filler(pre_top, _hard_end);
+      fill_region_with_block(new_filler, false);
+      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
+      // If there's no space left, don't retain.
+      if (_top >= _end) {
+        _retained = false;
+        invalidate();
+        return;
+      }
+      _retained_filler = MemRegion(pre_top, _top);
+      _bt.set_region(MemRegion(_top, _hard_end));
+      _bt.initialize_threshold();
+      assert(_bt.threshold() > _top, "initialize_threshold failed!");
+
+      // There may be other reasons for queries into the middle of the
+      // filler object.  When such queries are done in parallel with
+      // allocation, bad things can happen, if the query involves object
+      // iteration.  So we ensure that such queries do not involve object
+      // iteration, by putting another filler object on the boundaries of
+      // such queries.  One such is the object spanning a parallel card
+      // chunk boundary.
+
+      // "chunk_boundary" is the address of the first chunk boundary less
+      // than "hard_end".
+      HeapWord* chunk_boundary =
+        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
+      assert(chunk_boundary < _hard_end, "Or else above did not work.");
+      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
+             "Consequence of last card handling above.");
+
+      if (_top <= chunk_boundary) {
+        assert(_true_end == _hard_end, "Invariant.");
+        while (_top <= chunk_boundary) {
+          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
+                 "Consequence of last card handling above.");
+          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
+          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
+          _hard_end = chunk_boundary;
+          chunk_boundary -= ChunkSizeInWords;
+        }
+        _end = _hard_end - AlignmentReserve;
+        assert(_top <= _end, "Invariant.");
+        // Now reset the initial filler chunk so it doesn't overlap with
+        // the one(s) inserted above.
+        MemRegion new_filler(pre_top, _hard_end);
+        fill_region_with_block(new_filler, false);
+      }
+    } else {
+      _retained = false;
+      invalidate();
+    }
+  } else {
+    assert(!end_of_gc ||
+           (!_retained && _true_end == _hard_end), "Checking.");
+  }
+  assert(_end <= _hard_end, "Invariant.");
+  assert(_top < _end || _top == _hard_end, "Invariant");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Mon Aug 06 12:20:14 2012 -0700
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/blockOffsetTable.hpp"
+#include "memory/threadLocalAllocBuffer.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Forward decl.
+
+class PLABStats;
+
+// A per-thread allocation buffer used during GC.
+class ParGCAllocBuffer: public CHeapObj<mtGC> {
+protected:
+  char head[32];
+  size_t _word_sz;          // in HeapWord units
+  HeapWord* _bottom;
+  HeapWord* _top;
+  HeapWord* _end;       // last allocatable address + 1
+  HeapWord* _hard_end;  // _end + AlignmentReserve
+  bool      _retained;  // whether we hold a _retained_filler
+  MemRegion _retained_filler;
+  // In support of ergonomic sizing of PLAB's
+  size_t    _allocated;     // in HeapWord units
+  size_t    _wasted;        // in HeapWord units
+  char tail[32];
+  static size_t FillerHeaderSize;
+  static size_t AlignmentReserve;
+
+public:
+  // Initializes the buffer to be empty, but with the given "word_sz".
+  // Must get initialized with "set_buf" for an allocation to succeed.
+  ParGCAllocBuffer(size_t word_sz);
+
+  static const size_t min_size() {
+    return ThreadLocalAllocBuffer::min_size();
+  }
+
+  static const size_t max_size() {
+    return ThreadLocalAllocBuffer::max_size();
+  }
+
+  // If an allocation of the given "word_sz" can be satisfied within the
+  // buffer, do the allocation, returning a pointer to the start of the
+  // allocated block.  If the allocation request cannot be satisfied,
+  // return NULL.
+  HeapWord* allocate(size_t word_sz) {
+    HeapWord* res = _top;
+    if (pointer_delta(_end, _top) >= word_sz) {
+      _top = _top + word_sz;
+      return res;
+    } else {
+      return NULL;
+    }
+  }
+
+  // Undo the last allocation in the buffer, which is required to be of the
+  // "obj" of the given "word_sz".
+  void undo_allocation(HeapWord* obj, size_t word_sz) {
+    assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
+    assert(pointer_delta(_top, obj)     == word_sz, "Bad undo");
+    _top = obj;
+  }
+
+  // The total (word) size of the buffer, including both allocated and
+  // unallocted space.
+  size_t word_sz() { return _word_sz; }
+
+  // Should only be done if we are about to reset with a new buffer of the
+  // given size.
+  void set_word_size(size_t new_word_sz) {
+    assert(new_word_sz > AlignmentReserve, "Too small");
+    _word_sz = new_word_sz;
+  }
+
+  // The number of words of unallocated space remaining in the buffer.
+  size_t words_remaining() {
+    assert(_end >= _top, "Negative buffer");
+    return pointer_delta(_end, _top, HeapWordSize);
+  }
+
+  bool contains(void* addr) {
+    return (void*)_bottom <= addr && addr < (void*)_hard_end;
+  }
+
+  // Sets the space of the buffer to be [buf, space+word_sz()).
+  void set_buf(HeapWord* buf) {
+    _bottom   = buf;
+    _top      = _bottom;
+    _hard_end = _bottom + word_sz();
+    _end      = _hard_end - AlignmentReserve;
+    assert(_end >= _top, "Negative buffer");
+    // In support of ergonomic sizing
+    _allocated += word_sz();
+  }
+
+  // Flush the stats supporting ergonomic sizing of PLAB's
+  void flush_stats(PLABStats* stats);
+  void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
+    // We flush the stats first in order to get a reading of
+    // unused space in the last buffer.
+    if (ResizePLAB) {
+      flush_stats(stats);
+    }
+    // Retire the last allocation buffer.
+    retire(end_of_gc, retain);
+  }
+
+  // Force future allocations to fail and queries for contains()
+  // to return false
+  void invalidate() {
+    assert(!_retained, "Shouldn't retain an invalidated buffer.");
+    _end    = _hard_end;
+    _wasted += pointer_delta(_end, _top);  // unused  space
+    _top    = _end;      // force future allocations to fail
+    _bottom = _end;      // force future contains() queries to return false
+  }
+
+  // Fills in the unallocated portion of the buffer with a garbage object.
+  // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
+  // is true, attempt to re-use the unused portion in the next GC.
+  void retire(bool end_of_gc, bool retain);
+
+  void print() PRODUCT_RETURN;
+};
+
+// PLAB stats book-keeping
+class PLABStats VALUE_OBJ_CLASS_SPEC {
+  size_t _allocated;      // total allocated
+  size_t _wasted;         // of which wasted (internal fragmentation)
+  size_t _unused;         // Unused in last buffer
+  size_t _used;           // derived = allocated - wasted - unused
+  size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
+  AdaptiveWeightedAverage
+         _filter;         // integrator with decay
+
+ public:
+  PLABStats(size_t desired_plab_sz_, unsigned wt) :
+    _allocated(0),
+    _wasted(0),
+    _unused(0),
+    _used(0),
+    _desired_plab_sz(desired_plab_sz_),
+    _filter(wt)
+  {
+    size_t min_sz = min_size();
+    size_t max_sz = max_size();
+    size_t aligned_min_sz = align_object_size(min_sz);
+    size_t aligned_max_sz = align_object_size(max_sz);
+    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
+           min_sz <= max_sz,
+           "PLAB clipping computation in adjust_desired_plab_sz()"
+           " may be incorrect");
+  }
+
+  static const size_t min_size() {
+    return ParGCAllocBuffer::min_size();
+  }
+
+  static const size_t max_size() {
+    return ParGCAllocBuffer::max_size();
+  }
+
+  size_t desired_plab_sz() {
+    return _desired_plab_sz;
+  }
+
+  void adjust_desired_plab_sz(); // filter computation, latches output to
+                                 // _desired_plab_sz, clears sensor accumulators
+
+  void add_allocated(size_t v) {
+    Atomic::add_ptr(v, &_allocated);
+  }
+
+  void add_unused(size_t v) {
+    Atomic::add_ptr(v, &_unused);
+  }
+
+  void add_wasted(size_t v) {
+    Atomic::add_ptr(v, &_wasted);
+  }
+};
+
+class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
+  BlockOffsetArrayContigSpace _bt;
+  BlockOffsetSharedArray*     _bsa;
+  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
+
+  static const size_t ChunkSizeInWords;
+  static const size_t ChunkSizeInBytes;
+  HeapWord* allocate_slow(size_t word_sz);
+
+  void fill_region_with_block(MemRegion mr, bool contig);
+
+public:
+  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
+
+  HeapWord* allocate(size_t word_sz) {
+    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
+    if (res != NULL) {
+      _bt.alloc_block(res, word_sz);
+    } else {
+      res = allocate_slow(word_sz);
+    }
+    return res;
+  }
+
+  void undo_allocation(HeapWord* obj, size_t word_sz);
+
+  void set_buf(HeapWord* buf_start) {
+    ParGCAllocBuffer::set_buf(buf_start);
+    _true_end = _hard_end;
+    _bt.set_region(MemRegion(buf_start, word_sz()));
+    _bt.initialize_threshold();
+  }
+
+  void retire(bool end_of_gc, bool retain);
+
+  MemRegion range() {
+    return MemRegion(_top, _true_end);
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
--- a/src/share/vm/memory/tenuredGeneration.cpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Mon Aug 06 12:20:14 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
 #include "memory/generation.inline.hpp"
--- a/src/share/vm/precompiled/precompiled.hpp	Fri Aug 03 13:24:02 2012 -0700
+++ b/src/share/vm/precompiled/precompiled.hpp	Mon Aug 06 12:20:14 2012 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -306,7 +306,6 @@
 # include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
 # include "gc_implementation/g1/ptrQueue.hpp"
 # include "gc_implementation/g1/satbQueue.hpp"
-# include "gc_implementation/parNew/parGCAllocBuffer.hpp"
 # include "gc_implementation/parNew/parOopClosures.hpp"
 # include "gc_implementation/parallelScavenge/objectStartArray.hpp"
 # include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
@@ -322,6 +321,7 @@
 # include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 # include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
 # include "gc_implementation/shared/gcPolicyCounters.hpp"
+# include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #endif // SERIALGC
 
 #endif // !DONT_USE_PRECOMPILED_HEADER