changeset 1318:c475b10a610b

6423256: GC stacks should use a better data structure 6942771: SEGV in ParScanThreadState::take_from_overflow_stack Reviewed-by: apetrusenko, ysr, pbk
author jcoomes
date Tue, 16 Feb 2010 22:09:54 -0800
parents 1753407b5d19
children daafeed82cf5
files src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp src/share/vm/gc_implementation/g1/g1MarkSweep.cpp src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep src/share/vm/gc_implementation/includeDB_gc_parallelScavenge src/share/vm/gc_implementation/includeDB_gc_serial src/share/vm/gc_implementation/parNew/parNewGeneration.cpp src/share/vm/gc_implementation/parNew/parNewGeneration.hpp src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp src/share/vm/gc_implementation/shared/markSweep.cpp src/share/vm/gc_implementation/shared/markSweep.hpp src/share/vm/gc_implementation/shared/markSweep.inline.hpp src/share/vm/includeDB_core src/share/vm/memory/allocation.hpp src/share/vm/memory/defNewGeneration.cpp src/share/vm/memory/defNewGeneration.hpp src/share/vm/memory/genMarkSweep.cpp src/share/vm/runtime/globals.hpp src/share/vm/utilities/stack.hpp src/share/vm/utilities/stack.inline.hpp src/share/vm/utilities/taskqueue.cpp src/share/vm/utilities/taskqueue.hpp
diffstat 30 files changed, 757 insertions(+), 440 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -540,8 +540,6 @@
   _is_alive_closure(_span, &_markBitMap),
   _restart_addr(NULL),
   _overflow_list(NULL),
-  _preserved_oop_stack(NULL),
-  _preserved_mark_stack(NULL),
   _stats(cmsGen),
   _eden_chunk_array(NULL),     // may be set in ctor body
   _eden_chunk_capacity(0),     // -- ditto --
@@ -8815,23 +8813,10 @@
 // failures where possible, thus, incrementally hardening the VM
 // in such low resource situations.
 void CMSCollector::preserve_mark_work(oop p, markOop m) {
-  if (_preserved_oop_stack == NULL) {
-    assert(_preserved_mark_stack == NULL,
-           "bijection with preserved_oop_stack");
-    // Allocate the stacks
-    _preserved_oop_stack  = new (ResourceObj::C_HEAP)
-      GrowableArray<oop>(PreserveMarkStackSize, true);
-    _preserved_mark_stack = new (ResourceObj::C_HEAP)
-      GrowableArray<markOop>(PreserveMarkStackSize, true);
-    if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
-      vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
-                            "Preserved Mark/Oop Stack for CMS (C-heap)");
-    }
-  }
-  _preserved_oop_stack->push(p);
-  _preserved_mark_stack->push(m);
+  _preserved_oop_stack.push(p);
+  _preserved_mark_stack.push(m);
   assert(m == p->mark(), "Mark word changed");
-  assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(),
+  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "bijection");
 }
 
@@ -8873,42 +8858,30 @@
 // effect on performance so great that this will
 // likely just be in the noise anyway.
 void CMSCollector::restore_preserved_marks_if_any() {
-  if (_preserved_oop_stack == NULL) {
-    assert(_preserved_mark_stack == NULL,
-           "bijection with preserved_oop_stack");
-    return;
-  }
-
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
   assert(Thread::current()->is_ConcurrentGC_thread() ||
          Thread::current()->is_VM_thread(),
          "should be single-threaded");
-
-  int length = _preserved_oop_stack->length();
-  assert(_preserved_mark_stack->length() == length, "bijection");
-  for (int i = 0; i < length; i++) {
-    oop p = _preserved_oop_stack->at(i);
+  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
+         "bijection");
+
+  while (!_preserved_oop_stack.is_empty()) {
+    oop p = _preserved_oop_stack.pop();
     assert(p->is_oop(), "Should be an oop");
     assert(_span.contains(p), "oop should be in _span");
     assert(p->mark() == markOopDesc::prototype(),
            "Set when taken from overflow list");
-    markOop m = _preserved_mark_stack->at(i);
+    markOop m = _preserved_mark_stack.pop();
     p->set_mark(m);
   }
-  _preserved_mark_stack->clear();
-  _preserved_oop_stack->clear();
-  assert(_preserved_mark_stack->is_empty() &&
-         _preserved_oop_stack->is_empty(),
+  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
          "stacks were cleared above");
 }
 
 #ifndef PRODUCT
 bool CMSCollector::no_preserved_marks() const {
-  return (   (   _preserved_mark_stack == NULL
-              && _preserved_oop_stack == NULL)
-          || (   _preserved_mark_stack->is_empty()
-              && _preserved_oop_stack->is_empty()));
+  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
 }
 #endif
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -522,8 +522,8 @@
   // The following array-pair keeps track of mark words
   // displaced for accomodating overflow list above.
   // This code will likely be revisited under RFE#4922830.
-  GrowableArray<oop>*     _preserved_oop_stack;
-  GrowableArray<markOop>* _preserved_mark_stack;
+  Stack<oop>     _preserved_oop_stack;
+  Stack<markOop> _preserved_mark_stack;
 
   int*             _hash_seed;
 
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -96,20 +96,6 @@
   GenMarkSweep::_preserved_count_max = 0;
   GenMarkSweep::_preserved_marks = NULL;
   GenMarkSweep::_preserved_count = 0;
-  GenMarkSweep::_preserved_mark_stack = NULL;
-  GenMarkSweep::_preserved_oop_stack = NULL;
-
-  GenMarkSweep::_marking_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
-
-  int size = SystemDictionary::number_of_classes() * 2;
-  GenMarkSweep::_revisit_klass_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
-  // (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
-  // for now until we have a chance to work out a more optimal setting.
-  GenMarkSweep::_revisit_mdo_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
-
 }
 
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
@@ -138,7 +124,7 @@
 
   // Follow system dictionary roots and unload classes
   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
-  assert(GenMarkSweep::_marking_stack->is_empty(),
+  assert(GenMarkSweep::_marking_stack.is_empty(),
          "stack should be empty by now");
 
   // Follow code cache roots (has to be done after system dictionary,
@@ -150,19 +136,19 @@
 
   // Update subklass/sibling/implementor links of live klasses
   GenMarkSweep::follow_weak_klass_links();
-  assert(GenMarkSweep::_marking_stack->is_empty(),
+  assert(GenMarkSweep::_marking_stack.is_empty(),
          "stack should be empty by now");
 
   // Visit memoized MDO's and clear any unmarked weak refs
   GenMarkSweep::follow_mdo_weak_refs();
-  assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
+  assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
 
 
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(&GenMarkSweep::is_alive);
   StringTable::unlink(&GenMarkSweep::is_alive);
 
-  assert(GenMarkSweep::_marking_stack->is_empty(),
+  assert(GenMarkSweep::_marking_stack.is_empty(),
          "stack should be empty by now");
 }
 
--- a/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Tue Feb 16 22:09:54 2010 -0800
@@ -165,6 +165,7 @@
 concurrentMarkSweepGeneration.hpp       generation.hpp
 concurrentMarkSweepGeneration.hpp       generationCounters.hpp
 concurrentMarkSweepGeneration.hpp       mutexLocker.hpp
+concurrentMarkSweepGeneration.hpp       stack.inline.hpp
 concurrentMarkSweepGeneration.hpp       taskqueue.hpp
 concurrentMarkSweepGeneration.hpp       virtualspace.hpp
 concurrentMarkSweepGeneration.hpp       yieldingWorkgroup.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Tue Feb 16 22:09:54 2010 -0800
@@ -184,9 +184,11 @@
 psCompactionManager.cpp                 psParallelCompact.hpp
 psCompactionManager.cpp                 psCompactionManager.hpp
 psCompactionManager.cpp                 psOldGen.hpp
+psCompactionManager.cpp                 stack.inline.hpp
 psCompactionManager.cpp                 systemDictionary.hpp
 
 psCompactionManager.hpp                 allocation.hpp
+psCompactionManager.hpp                 stack.hpp
 psCompactionManager.hpp                 taskqueue.hpp
 
 psGCAdaptivePolicyCounters.hpp		gcAdaptivePolicyCounters.hpp
@@ -226,12 +228,14 @@
 psMarkSweep.cpp                         referenceProcessor.hpp
 psMarkSweep.cpp                         safepoint.hpp
 psMarkSweep.cpp                         spaceDecorator.hpp
+psMarkSweep.cpp                         stack.inline.hpp
 psMarkSweep.cpp                         symbolTable.hpp
 psMarkSweep.cpp                         systemDictionary.hpp
 psMarkSweep.cpp                         vmThread.hpp
 
 psMarkSweep.hpp                         markSweep.inline.hpp
 psMarkSweep.hpp                         collectorCounters.hpp
+psMarkSweep.hpp                         stack.hpp
 
 psMarkSweepDecorator.cpp                liveRange.hpp
 psMarkSweepDecorator.cpp                markSweep.inline.hpp
@@ -272,6 +276,7 @@
 psParallelCompact.cpp			referencePolicy.hpp
 psParallelCompact.cpp			referenceProcessor.hpp
 psParallelCompact.cpp			safepoint.hpp
+psParallelCompact.cpp			stack.inline.hpp
 psParallelCompact.cpp			symbolTable.hpp
 psParallelCompact.cpp			systemDictionary.hpp
 psParallelCompact.cpp			vmThread.hpp
@@ -358,6 +363,7 @@
 psScavenge.cpp                          referenceProcessor.hpp
 psScavenge.cpp                          resourceArea.hpp
 psScavenge.cpp                          spaceDecorator.hpp
+psScavenge.cpp                          stack.inline.hpp
 psScavenge.cpp                          threadCritical.hpp
 psScavenge.cpp                          vmThread.hpp
 psScavenge.cpp                          vm_operations.hpp
@@ -367,6 +373,7 @@
 psScavenge.hpp                          collectorCounters.hpp
 psScavenge.hpp                          oop.hpp
 psScavenge.hpp                          psVirtualspace.hpp
+psScavenge.hpp                          stack.hpp
 
 psScavenge.inline.hpp                   cardTableExtension.hpp
 psScavenge.inline.hpp                   parallelScavengeHeap.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_serial	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_serial	Tue Feb 16 22:09:54 2010 -0800
@@ -92,11 +92,13 @@
 markSweep.hpp                           growableArray.hpp
 markSweep.hpp                           markOop.hpp
 markSweep.hpp                           oop.hpp
+markSweep.hpp                           stack.hpp
 markSweep.hpp                           timer.hpp
 markSweep.hpp                           universe.hpp
 
 markSweep.inline.hpp                    collectedHeap.hpp
 markSweep.inline.hpp                    markSweep.hpp
+markSweep.inline.hpp                    stack.inline.hpp
 
 mutableSpace.hpp                        immutableSpace.hpp
 mutableSpace.hpp                        memRegion.hpp
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -34,12 +34,12 @@
                                        Generation* old_gen_,
                                        int thread_num_,
                                        ObjToScanQueueSet* work_queue_set_,
-                                       GrowableArray<oop>**  overflow_stack_set_,
+                                       Stack<oop>* overflow_stacks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
-  _overflow_stack(overflow_stack_set_[thread_num_]),
+  _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
   _ageTable(false), // false ==> not the global age table, no perf data.
   _to_space_alloc_buffer(desired_plab_sz_),
   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
@@ -156,11 +156,12 @@
   assert(ParGCUseLocalOverflow, "Else should not call");
   assert(young_gen()->overflow_list() == NULL, "Error");
   ObjToScanQueue* queue = work_queue();
-  GrowableArray<oop>* of_stack = overflow_stack();
-  uint num_overflow_elems = of_stack->length();
-  uint num_take_elems     = MIN2(MIN2((queue->max_elems() - queue->size())/4,
-                                      (juint)ParGCDesiredObjsFromOverflowList),
-                                 num_overflow_elems);
+  Stack<oop>* const of_stack = overflow_stack();
+  const size_t num_overflow_elems = of_stack->size();
+  const size_t space_available = queue->max_elems() - queue->size();
+  const size_t num_take_elems = MIN3(space_available / 4,
+                                     ParGCDesiredObjsFromOverflowList,
+                                     num_overflow_elems);
   // Transfer the most recent num_take_elems from the overflow
   // stack to our work queue.
   for (size_t i = 0; i != num_take_elems; i++) {
@@ -268,7 +269,7 @@
                         ParNewGeneration&       gen,
                         Generation&             old_gen,
                         ObjToScanQueueSet&      queue_set,
-                        GrowableArray<oop>**    overflow_stacks_,
+                        Stack<oop>*             overflow_stacks_,
                         size_t                  desired_plab_sz,
                         ParallelTaskTerminator& term);
   inline ParScanThreadState& thread_state(int i);
@@ -291,18 +292,20 @@
 ParScanThreadStateSet::ParScanThreadStateSet(
   int num_threads, Space& to_space, ParNewGeneration& gen,
   Generation& old_gen, ObjToScanQueueSet& queue_set,
-  GrowableArray<oop>** overflow_stack_set_,
+  Stack<oop>* overflow_stacks,
   size_t desired_plab_sz, ParallelTaskTerminator& term)
   : ResourceArray(sizeof(ParScanThreadState), num_threads),
     _gen(gen), _next_gen(old_gen), _term(term),
     _pushes(0), _pops(0), _steals(0)
 {
   assert(num_threads > 0, "sanity check!");
+  assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
+         "overflow_stack allocation mismatch");
   // Initialize states.
   for (int i = 0; i < num_threads; ++i) {
     new ((ParScanThreadState*)_data + i)
         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
-                           overflow_stack_set_, desired_plab_sz, term);
+                           overflow_stacks, desired_plab_sz, term);
   }
 }
 
@@ -548,14 +551,11 @@
   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
     _task_queues->queue(i2)->initialize();
 
-  _overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads);
-  guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure");
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    if (ParGCUseLocalOverflow) {
-      _overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true);
-      guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure.");
-    } else {
-      _overflow_stacks[i] = NULL;
+  _overflow_stacks = NULL;
+  if (ParGCUseLocalOverflow) {
+    _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);
+    for (size_t i = 0; i < ParallelGCThreads; ++i) {
+      new (_overflow_stacks + i) Stack<oop>();
     }
   }
 
@@ -896,12 +896,9 @@
   } else {
     assert(HandlePromotionFailure,
       "Should only be here if promotion failure handling is on");
-    if (_promo_failure_scan_stack != NULL) {
-      // Can be non-null because of reference processing.
-      // Free stack with its elements.
-      delete _promo_failure_scan_stack;
-      _promo_failure_scan_stack = NULL;
-    }
+    assert(_promo_failure_scan_stack.is_empty(), "post condition");
+    _promo_failure_scan_stack.clear(true); // Clear cached segments.
+
     remove_forwarding_pointers();
     if (PrintGCDetails) {
       gclog_or_tty->print(" (promotion failed)");
@@ -1353,8 +1350,8 @@
   size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
                                  (size_t)ParGCDesiredObjsFromOverflowList);
 
+  assert(!UseCompressedOops, "Error");
   assert(par_scan_state->overflow_stack() == NULL, "Error");
-  assert(!UseCompressedOops, "Error");
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -55,7 +55,7 @@
   friend class ParScanThreadStateSet;
  private:
   ObjToScanQueue *_work_queue;
-  GrowableArray<oop>* _overflow_stack;
+  Stack<oop>* const _overflow_stack;
 
   ParGCAllocBuffer _to_space_alloc_buffer;
 
@@ -120,7 +120,7 @@
   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
                      Generation* old_gen_, int thread_num_,
                      ObjToScanQueueSet* work_queue_set_,
-                     GrowableArray<oop>** overflow_stack_set_,
+                     Stack<oop>* overflow_stacks_,
                      size_t desired_plab_sz_,
                      ParallelTaskTerminator& term_);
 
@@ -144,7 +144,7 @@
   void trim_queues(int max_size);
 
   // Private overflow stack usage
-  GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
+  Stack<oop>* overflow_stack() { return _overflow_stack; }
   bool take_from_overflow_stack();
   void push_on_overflow_stack(oop p);
 
@@ -314,7 +314,7 @@
   ObjToScanQueueSet* _task_queues;
 
   // Per-worker-thread local overflow stacks
-  GrowableArray<oop>** _overflow_stacks;
+  Stack<oop>* _overflow_stacks;
 
   // Desired size of survivor space plab's
   PLABStats _plab_stats;
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -59,8 +59,6 @@
     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
-  assert(cm->stacks_have_been_allocated(),
-         "Stack space has not been allocated");
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
 
   switch (_root_type) {
@@ -119,7 +117,6 @@
 
   // Do the real work
   cm->drain_marking_stacks(&mark_and_push_closure);
-  // cm->deallocate_stacks();
 }
 
 
@@ -135,8 +132,6 @@
     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
-  assert(cm->stacks_have_been_allocated(),
-         "Stack space has not been allocated");
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -41,39 +41,8 @@
   _old_gen = heap->old_gen();
   _start_array = old_gen()->start_array();
 
-
   marking_stack()->initialize();
-
-  // We want the overflow stack to be permanent
-  _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
-#ifdef USE_RegionTaskQueueWithOverflow
-  region_stack()->initialize();
-#else
   region_stack()->initialize();
-
-  // We want the overflow stack to be permanent
-  _region_overflow_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
-#endif
-
-  // Note that _revisit_klass_stack is allocated out of the
-  // C heap (as opposed to out of ResourceArena).
-  int size =
-    (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
-  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
-  // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
-  // have to do for now until we are able to investigate a more optimal setting.
-  _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
-
-}
-
-ParCompactionManager::~ParCompactionManager() {
-  delete _overflow_stack;
-  delete _revisit_klass_stack;
-  delete _revisit_mdo_stack;
-  // _manager_array and _stack_array are statics
-  // shared with all instances of ParCompactionManager
-  // should not be deallocated.
 }
 
 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
@@ -197,9 +166,9 @@
 }
 
 void ParCompactionManager::reset() {
-  for(uint i=0; i<ParallelGCThreads+1; i++) {
-    manager_array(i)->revisit_klass_stack()->clear();
-    manager_array(i)->revisit_mdo_stack()->clear();
+  for(uint i = 0; i < ParallelGCThreads + 1; i++) {
+    assert(manager_array(i)->revisit_klass_stack()->is_empty(), "sanity");
+    assert(manager_array(i)->revisit_mdo_stack()->is_empty(), "sanity");
   }
 }
 
@@ -229,10 +198,10 @@
       // pop, but they can come from anywhere, unfortunately.
       obj->follow_contents(this);
     }
-  } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
+  } while(marking_stack()->size() != 0 || !overflow_stack()->is_empty());
 
   assert(marking_stack()->size() == 0, "Sanity");
-  assert(overflow_stack()->length() == 0, "Sanity");
+  assert(overflow_stack()->is_empty(), "Sanity");
 }
 
 void ParCompactionManager::drain_region_overflow_stack() {
@@ -281,15 +250,14 @@
       // pop, but they can come from anywhere, unfortunately.
       PSParallelCompact::fill_and_update_region(this, region_index);
     }
-  } while((region_stack()->size() != 0) ||
-          (region_overflow_stack()->length() != 0));
+  } while (region_stack()->size() != 0 || !region_overflow_stack()->is_empty());
 #endif
 
 #ifdef USE_RegionTaskQueueWithOverflow
   assert(region_stack()->is_empty(), "Sanity");
 #else
   assert(region_stack()->size() == 0, "Sanity");
-  assert(region_overflow_stack()->length() == 0, "Sanity");
+  assert(region_overflow_stack()->is_empty(), "Sanity");
 #endif
 #else
   oop obj;
@@ -298,10 +266,3 @@
   }
 #endif
 }
-
-#ifdef ASSERT
-bool ParCompactionManager::stacks_have_been_allocated() {
-  return (revisit_klass_stack()->data_addr() != NULL &&
-          revisit_mdo_stack()->data_addr() != NULL);
-}
-#endif
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -79,7 +79,7 @@
   static PSOldGen*              _old_gen;
 
   OopTaskQueue                  _marking_stack;
-  GrowableArray<oop>*           _overflow_stack;
+  Stack<oop>                    _overflow_stack;
   // Is there a way to reuse the _marking_stack for the
   // saving empty regions?  For now just create a different
   // type of TaskQueue.
@@ -88,13 +88,12 @@
   RegionTaskQueueWithOverflow   _region_stack;
 #else
   RegionTaskQueue               _region_stack;
-  GrowableArray<size_t>*        _region_overflow_stack;
+  Stack<size_t>                 _region_overflow_stack;
 #endif
 
-#if 1  // does this happen enough to need a per thread stack?
-  GrowableArray<Klass*>*        _revisit_klass_stack;
-  GrowableArray<DataLayout*>*   _revisit_mdo_stack;
-#endif
+  Stack<Klass*>                 _revisit_klass_stack;
+  Stack<DataLayout*>            _revisit_mdo_stack;
+
   static ParMarkBitMap* _mark_bitmap;
 
   Action _action;
@@ -109,14 +108,12 @@
   // Array of tasks.  Needed by the ParallelTaskTerminator.
   static RegionTaskQueueSet* region_array()      { return _region_array; }
   OopTaskQueue*  marking_stack()                 { return &_marking_stack; }
-  GrowableArray<oop>* overflow_stack()           { return _overflow_stack; }
+  Stack<oop>* overflow_stack()                   { return &_overflow_stack; }
 #ifdef USE_RegionTaskQueueWithOverflow
   RegionTaskQueueWithOverflow* region_stack()    { return &_region_stack; }
 #else
   RegionTaskQueue*  region_stack()               { return &_region_stack; }
-  GrowableArray<size_t>* region_overflow_stack() {
-    return _region_overflow_stack;
-  }
+  Stack<size_t>* region_overflow_stack() { return &_region_overflow_stack; }
 #endif
 
   // Pushes onto the marking stack.  If the marking stack is full,
@@ -136,10 +133,7 @@
   inline static ParCompactionManager* manager_array(int index);
 
   ParCompactionManager();
-  ~ParCompactionManager();
 
-  void allocate_stacks();
-  void deallocate_stacks();
   ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
 
   // Take actions in preparation for a compaction.
@@ -152,11 +146,8 @@
   bool should_verify_only();
   bool should_reset_only();
 
-#if 1
-  // Probably stays as a growable array
-  GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
-  GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
-#endif
+  Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
+  Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
 
   // Save oop for later processing.  Must not fail.
   void save_for_scanning(oop m);
@@ -187,11 +178,6 @@
 
   // Process tasks remaining on any stack
   void drain_region_overflow_stack();
-
-  // Debugging support
-#ifdef ASSERT
-  bool stacks_have_been_allocated();
-#endif
 };
 
 inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -474,31 +474,15 @@
   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
   // Now divide by the size of a PreservedMark
   _preserved_count_max /= sizeof(PreservedMark);
-
-  _preserved_mark_stack = NULL;
-  _preserved_oop_stack = NULL;
-
-  _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
-
-  int size = SystemDictionary::number_of_classes() * 2;
-  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
-  // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
-  // now until we investigate a more optimal setting.
-  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
 }
 
 
 void PSMarkSweep::deallocate_stacks() {
-  if (_preserved_oop_stack) {
-    delete _preserved_mark_stack;
-    _preserved_mark_stack = NULL;
-    delete _preserved_oop_stack;
-    _preserved_oop_stack = NULL;
-  }
-
-  delete _marking_stack;
-  delete _revisit_klass_stack;
-  delete _revisit_mdo_stack;
+  _preserved_mark_stack.clear(true);
+  _preserved_oop_stack.clear(true);
+  _marking_stack.clear();
+  _revisit_klass_stack.clear(true);
+  _revisit_mdo_stack.clear(true);
 }
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
@@ -548,17 +532,17 @@
 
   // Update subklass/sibling/implementor links of live klasses
   follow_weak_klass_links();
-  assert(_marking_stack->is_empty(), "just drained");
+  assert(_marking_stack.is_empty(), "just drained");
 
   // Visit memoized mdo's and clear unmarked weak refs
   follow_mdo_weak_refs();
-  assert(_marking_stack->is_empty(), "just drained");
+  assert(_marking_stack.is_empty(), "just drained");
 
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(is_alive_closure());
   StringTable::unlink(is_alive_closure());
 
-  assert(_marking_stack->is_empty(), "stack should be empty by now");
+  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -2172,6 +2172,16 @@
     }
   }
 
+#ifdef ASSERT
+  for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
+    ParCompactionManager* const cm =
+      ParCompactionManager::manager_array(int(i));
+    assert(cm->overflow_stack()->is_empty(),        "should be empty");
+    assert(cm->region_stack()->overflow_is_empty(), "should be empty");
+    assert(cm->revisit_klass_stack()->is_empty(),   "should be empty");
+  }
+#endif // ASSERT
+
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
     gclog_or_tty->print(" VerifyAfterGC:");
@@ -2730,21 +2740,22 @@
   // All klasses on the revisit stack are marked at this point.
   // Update and follow all subklass, sibling and implementor links.
   if (PrintRevisitStats) {
-    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("#classes in system dictionary = %d",
+                           SystemDictionary::number_of_classes());
   }
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
     KeepAliveClosure keep_alive_closure(cm);
-    int length = cm->revisit_klass_stack()->length();
+    Stack<Klass*>* const rks = cm->revisit_klass_stack();
     if (PrintRevisitStats) {
-      gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
+      gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
+                             i, rks->size());
     }
-    for (int j = 0; j < length; j++) {
-      cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
-        is_alive_closure(),
-        &keep_alive_closure);
+    while (!rks->is_empty()) {
+      Klass* const k = rks->pop();
+      k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
     }
-    // revisit_klass_stack is cleared in reset()
+
     follow_stack(cm);
   }
 }
@@ -2763,19 +2774,20 @@
   // we can visit and clear any weak references from MDO's which
   // we memoized during the strong marking phase.
   if (PrintRevisitStats) {
-    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("#classes in system dictionary = %d",
+                           SystemDictionary::number_of_classes());
   }
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
-    GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
-    int length = rms->length();
+    Stack<DataLayout*>* rms = cm->revisit_mdo_stack();
     if (PrintRevisitStats) {
-      gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
+      gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
+                             i, rms->size());
     }
-    for (int j = 0; j < length; j++) {
-      rms->at(j)->follow_weak_refs(is_alive_closure());
+    while (!rms->is_empty()) {
+      rms->pop()->follow_weak_refs(is_alive_closure());
     }
-    // revisit_mdo_stack is cleared in reset()
+
     follow_stack(cm);
   }
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -101,10 +101,10 @@
     // have a better idea of what went wrong
     if (i < ParallelGCThreads) {
       guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_depth()->length() <= 0),
+                 manager->overflow_stack_depth()->is_empty()),
                 "promotion manager overflow stack must be empty");
       guarantee((UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_breadth()->length() <= 0),
+                 manager->overflow_stack_breadth()->is_empty()),
                 "promotion manager overflow stack must be empty");
 
       guarantee((!UseDepthFirstScavengeOrder ||
@@ -115,11 +115,11 @@
                 "promotion manager claimed stack must be empty");
     } else {
       guarantee((!UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_depth()->length() <= 0),
+                 manager->overflow_stack_depth()->is_empty()),
                 "VM Thread promotion manager overflow stack "
                 "must be empty");
       guarantee((UseDepthFirstScavengeOrder ||
-                 manager->overflow_stack_breadth()->length() <= 0),
+                 manager->overflow_stack_breadth()->is_empty()),
                 "VM Thread promotion manager overflow stack "
                 "must be empty");
 
@@ -181,15 +181,9 @@
   if (depth_first()) {
     claimed_stack_depth()->initialize();
     queue_size = claimed_stack_depth()->max_elems();
-    // We want the overflow stack to be permanent
-    _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
-    _overflow_stack_breadth = NULL;
   } else {
     claimed_stack_breadth()->initialize();
     queue_size = claimed_stack_breadth()->max_elems();
-    // We want the overflow stack to be permanent
-    _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
-    _overflow_stack_depth = NULL;
   }
 
   _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
@@ -276,19 +270,17 @@
         process_popped_location_depth(p);
       }
     }
-  } while( (totally_drain && claimed_stack_depth()->size() > 0) ||
-           (overflow_stack_depth()->length() > 0) );
+  } while((totally_drain && claimed_stack_depth()->size() > 0) ||
+          !overflow_stack_depth()->is_empty());
 
   assert(!totally_drain || claimed_stack_empty(), "Sanity");
-  assert(totally_drain ||
-         claimed_stack_depth()->size() <= _target_stack_size,
+  assert(totally_drain || claimed_stack_depth()->size() <= _target_stack_size,
          "Sanity");
   assert(overflow_stack_empty(), "Sanity");
 }
 
 void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
   assert(!depth_first(), "invariant");
-  assert(overflow_stack_breadth() != NULL, "invariant");
   totally_drain = totally_drain || _totally_drain;
 
 #ifdef ASSERT
@@ -328,11 +320,11 @@
 
     // If we could not find any other work, flush the prefetch queue
     if (claimed_stack_breadth()->size() == 0 &&
-        (overflow_stack_breadth()->length() == 0)) {
+        overflow_stack_breadth()->is_empty()) {
       flush_prefetch_queue();
     }
   } while((totally_drain && claimed_stack_breadth()->size() > 0) ||
-          (overflow_stack_breadth()->length() > 0));
+          !overflow_stack_breadth()->is_empty());
 
   assert(!totally_drain || claimed_stack_empty(), "Sanity");
   assert(totally_drain ||
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -59,7 +59,7 @@
   uint                                _masked_pushes;
 
   uint                                _overflow_pushes;
-  uint                                _max_overflow_length;
+  uint                                _max_overflow_size;
 
   uint                                _arrays_chunked;
   uint                                _array_chunks_processed;
@@ -78,9 +78,9 @@
   PrefetchQueue                       _prefetch_queue;
 
   OopStarTaskQueue                    _claimed_stack_depth;
-  GrowableArray<StarTask>*            _overflow_stack_depth;
+  Stack<StarTask>                     _overflow_stack_depth;
   OopTaskQueue                        _claimed_stack_breadth;
-  GrowableArray<oop>*                 _overflow_stack_breadth;
+  Stack<oop>                          _overflow_stack_breadth;
 
   bool                                _depth_first;
   bool                                _totally_drain;
@@ -97,8 +97,8 @@
   template <class T> inline void claim_or_forward_internal_depth(T* p);
   template <class T> inline void claim_or_forward_internal_breadth(T* p);
 
-  GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
-  GrowableArray<oop>*  overflow_stack_breadth()   { return _overflow_stack_breadth; }
+  Stack<StarTask>* overflow_stack_depth()   { return &_overflow_stack_depth; }
+  Stack<oop>*      overflow_stack_breadth() { return &_overflow_stack_breadth; }
 
   // On the task queues we push reference locations as well as
   // partially-scanned arrays (in the latter case, we push an oop to
@@ -157,9 +157,9 @@
       overflow_stack_depth()->push(p);
 #if PS_PM_STATS
       ++_overflow_pushes;
-      uint stack_length = (uint) overflow_stack_depth()->length();
-      if (stack_length > _max_overflow_length) {
-        _max_overflow_length = stack_length;
+      uint stack_size = (uint) overflow_stack_depth()->size();
+      if (stack_size > _max_overflow_size) {
+        _max_overflow_size = stack_size;
       }
 #endif // PS_PM_STATS
     }
@@ -176,9 +176,9 @@
       overflow_stack_breadth()->push(o);
 #if PS_PM_STATS
       ++_overflow_pushes;
-      uint stack_length = (uint) overflow_stack_breadth()->length();
-      if (stack_length > _max_overflow_length) {
-        _max_overflow_length = stack_length;
+      uint stack_size = (uint) overflow_stack_breadth()->size();
+      if (stack_size > _max_overflow_size) {
+        _max_overflow_size = stack_size;
       }
 #endif // PS_PM_STATS
     }
@@ -248,16 +248,16 @@
 
   bool claimed_stack_empty() {
     if (depth_first()) {
-      return claimed_stack_depth()->size() <= 0;
+      return claimed_stack_depth()->size() == 0;
     } else {
-      return claimed_stack_breadth()->size() <= 0;
+      return claimed_stack_breadth()->size() == 0;
     }
   }
   bool overflow_stack_empty() {
     if (depth_first()) {
-      return overflow_stack_depth()->length() <= 0;
+      return overflow_stack_depth()->is_empty();
     } else {
-      return overflow_stack_breadth()->length() <= 0;
+      return overflow_stack_breadth()->is_empty();
     }
   }
   bool stacks_empty() {
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -34,9 +34,10 @@
 int                        PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
 elapsedTimer               PSScavenge::_accumulated_time;
-GrowableArray<markOop>*    PSScavenge::_preserved_mark_stack = NULL;
-GrowableArray<oop>*        PSScavenge::_preserved_oop_stack = NULL;
+Stack<markOop>             PSScavenge::_preserved_mark_stack;
+Stack<oop>                 PSScavenge::_preserved_oop_stack;
 CollectorCounters*         PSScavenge::_counters = NULL;
+bool                       PSScavenge::_promotion_failed = false;
 
 // Define before use
 class PSIsAliveClosure: public BoolObjectClosure {
@@ -230,6 +231,9 @@
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 
+  assert(_preserved_mark_stack.is_empty(), "should be empty");
+  assert(_preserved_oop_stack.is_empty(), "should be empty");
+
   TimeStamp scavenge_entry;
   TimeStamp scavenge_midpoint;
   TimeStamp scavenge_exit;
@@ -642,24 +646,20 @@
     young_gen->object_iterate(&unforward_closure);
 
     if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("Restoring %d marks",
-                              _preserved_oop_stack->length());
+      gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
     }
 
     // Restore any saved marks.
-    for (int i=0; i < _preserved_oop_stack->length(); i++) {
-      oop obj       = _preserved_oop_stack->at(i);
-      markOop mark  = _preserved_mark_stack->at(i);
+    while (!_preserved_oop_stack.is_empty()) {
+      oop obj      = _preserved_oop_stack.pop();
+      markOop mark = _preserved_mark_stack.pop();
       obj->set_mark(mark);
     }
 
-    // Deallocate the preserved mark and oop stacks.
-    // The stacks were allocated as CHeap objects, so
-    // we must call delete to prevent mem leaks.
-    delete _preserved_mark_stack;
-    _preserved_mark_stack = NULL;
-    delete _preserved_oop_stack;
-    _preserved_oop_stack = NULL;
+    // Clear the preserved mark and oop stack caches.
+    _preserved_mark_stack.clear(true);
+    _preserved_oop_stack.clear(true);
+    _promotion_failed = false;
   }
 
   // Reset the PromotionFailureALot counters.
@@ -667,27 +667,16 @@
 }
 
 // This method is called whenever an attempt to promote an object
-// fails. Some markOops will need preserving, some will not. Note
+// fails. Some markOops will need preservation, some will not. Note
 // that the entire eden is traversed after a failed promotion, with
 // all forwarded headers replaced by the default markOop. This means
 // it is not neccessary to preserve most markOops.
 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
-  if (_preserved_mark_stack == NULL) {
-    ThreadCritical tc; // Lock and retest
-    if (_preserved_mark_stack == NULL) {
-      assert(_preserved_oop_stack == NULL, "Sanity");
-      _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
-      _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
-    }
-  }
-
-  // Because we must hold the ThreadCritical lock before using
-  // the stacks, we should be safe from observing partial allocations,
-  // which are also guarded by the ThreadCritical lock.
+  _promotion_failed = true;
   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
     ThreadCritical tc;
-    _preserved_oop_stack->push(obj);
-    _preserved_mark_stack->push(obj_mark);
+    _preserved_oop_stack.push(obj);
+    _preserved_mark_stack.push(obj_mark);
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -61,9 +61,10 @@
   static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
                                                          // This is used to decide if an oop should be scavenged,
                                                          // cards should be marked, etc.
-  static GrowableArray<markOop>* _preserved_mark_stack; // List of marks to be restored after failed promotion
-  static GrowableArray<oop>*     _preserved_oop_stack;  // List of oops that need their mark restored.
+  static Stack<markOop>          _preserved_mark_stack; // List of marks to be restored after failed promotion
+  static Stack<oop>              _preserved_oop_stack;  // List of oops that need their mark restored.
   static CollectorCounters*      _counters;         // collector performance counters
+  static bool                    _promotion_failed;
 
   static void clean_up_failed_promotion();
 
@@ -79,8 +80,7 @@
   // Accessors
   static int              tenuring_threshold()  { return _tenuring_threshold; }
   static elapsedTimer*    accumulated_time()    { return &_accumulated_time; }
-  static bool             promotion_failed()
-    { return _preserved_mark_stack != NULL; }
+  static bool             promotion_failed()    { return _promotion_failed; }
   static int              consecutive_skipped_scavenges()
     { return _consecutive_skipped_scavenges; }
 
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -25,12 +25,12 @@
 #include "incls/_precompiled.incl"
 #include "incls/_markSweep.cpp.incl"
 
-GrowableArray<oop>*     MarkSweep::_marking_stack       = NULL;
-GrowableArray<Klass*>*  MarkSweep::_revisit_klass_stack = NULL;
-GrowableArray<DataLayout*>*  MarkSweep::_revisit_mdo_stack = NULL;
+Stack<oop>              MarkSweep::_marking_stack;
+Stack<Klass*>           MarkSweep::_revisit_klass_stack;
+Stack<DataLayout*>      MarkSweep::_revisit_mdo_stack;
 
-GrowableArray<oop>*     MarkSweep::_preserved_oop_stack = NULL;
-GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
+Stack<oop>              MarkSweep::_preserved_oop_stack;
+Stack<markOop>          MarkSweep::_preserved_mark_stack;
 size_t                  MarkSweep::_preserved_count = 0;
 size_t                  MarkSweep::_preserved_count_max = 0;
 PreservedMark*          MarkSweep::_preserved_marks = NULL;
@@ -57,37 +57,42 @@
 #endif
 
 void MarkSweep::revisit_weak_klass_link(Klass* k) {
-  _revisit_klass_stack->push(k);
+  _revisit_klass_stack.push(k);
 }
 
 void MarkSweep::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
   // Update and follow all subklass, sibling and implementor links.
   if (PrintRevisitStats) {
-    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
-    gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
+    gclog_or_tty->print_cr("#classes in system dictionary = %d",
+                           SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("Revisit klass stack size = " SIZE_FORMAT,
+                           _revisit_klass_stack.size());
   }
-  for (int i = 0; i < _revisit_klass_stack->length(); i++) {
-    _revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
+  while (!_revisit_klass_stack.is_empty()) {
+    Klass* const k = _revisit_klass_stack.pop();
+    k->follow_weak_klass_links(&is_alive, &keep_alive);
   }
   follow_stack();
 }
 
 void MarkSweep::revisit_mdo(DataLayout* p) {
-  _revisit_mdo_stack->push(p);
+  _revisit_mdo_stack.push(p);
 }
 
 void MarkSweep::follow_mdo_weak_refs() {
   // All strongly reachable oops have been marked at this point;
   // we can visit and clear any weak references from MDO's which
   // we memoized during the strong marking phase.
-  assert(_marking_stack->is_empty(), "Marking stack should be empty");
+  assert(_marking_stack.is_empty(), "Marking stack should be empty");
   if (PrintRevisitStats) {
-    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
-    gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
+    gclog_or_tty->print_cr("#classes in system dictionary = %d",
+                           SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("Revisit MDO stack size = " SIZE_FORMAT,
+                           _revisit_mdo_stack.size());
   }
-  for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
-    _revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
+  while (!_revisit_mdo_stack.is_empty()) {
+    _revisit_mdo_stack.pop()->follow_weak_refs(&is_alive);
   }
   follow_stack();
 }
@@ -104,8 +109,8 @@
 void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
 
 void MarkSweep::follow_stack() {
-  while (!_marking_stack->is_empty()) {
-    oop obj = _marking_stack->pop();
+  while (!_marking_stack.is_empty()) {
+    oop obj = _marking_stack.pop();
     assert (obj->is_gc_marked(), "p must be marked");
     obj->follow_contents();
   }
@@ -115,23 +120,19 @@
 
 void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
 
-// We preserve the mark which should be replaced at the end and the location that it
-// will go.  Note that the object that this markOop belongs to isn't currently at that
-// address but it will be after phase4
+// We preserve the mark which should be replaced at the end and the location
+// that it will go.  Note that the object that this markOop belongs to isn't
+// currently at that address but it will be after phase4
 void MarkSweep::preserve_mark(oop obj, markOop mark) {
-  // we try to store preserved marks in the to space of the new generation since this
-  // is storage which should be available.  Most of the time this should be sufficient
-  // space for the marks we need to preserve but if it isn't we fall back in using
-  // GrowableArrays to keep track of the overflow.
+  // We try to store preserved marks in the to space of the new generation since
+  // this is storage which should be available.  Most of the time this should be
+  // sufficient space for the marks we need to preserve but if it isn't we fall
+  // back in using Stacks to keep track of the overflow.
   if (_preserved_count < _preserved_count_max) {
     _preserved_marks[_preserved_count++].init(obj, mark);
   } else {
-    if (_preserved_mark_stack == NULL) {
-      _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
-      _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
-    }
-    _preserved_mark_stack->push(mark);
-    _preserved_oop_stack->push(obj);
+    _preserved_mark_stack.push(mark);
+    _preserved_oop_stack.push(obj);
   }
 }
 
@@ -142,8 +143,7 @@
 void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
 
 void MarkSweep::adjust_marks() {
-  assert(_preserved_oop_stack == NULL ||
-         _preserved_oop_stack->length() == _preserved_mark_stack->length(),
+  assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "inconsistent preserved oop stacks");
 
   // adjust the oops we saved earlier
@@ -152,21 +152,19 @@
   }
 
   // deal with the overflow stack
-  if (_preserved_oop_stack) {
-    for (int i = 0; i < _preserved_oop_stack->length(); i++) {
-      oop* p = _preserved_oop_stack->adr_at(i);
-      adjust_pointer(p);
-    }
+  StackIterator<oop> iter(_preserved_oop_stack);
+  while (!iter.is_empty()) {
+    oop* p = iter.next_addr();
+    adjust_pointer(p);
   }
 }
 
 void MarkSweep::restore_marks() {
-  assert(_preserved_oop_stack == NULL ||
-         _preserved_oop_stack->length() == _preserved_mark_stack->length(),
+  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
          "inconsistent preserved oop stacks");
   if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Restoring %d marks", _preserved_count +
-                  (_preserved_oop_stack ? _preserved_oop_stack->length() : 0));
+    gclog_or_tty->print_cr("Restoring %d marks",
+                           _preserved_count + _preserved_oop_stack.size());
   }
 
   // restore the marks we saved earlier
@@ -175,12 +173,10 @@
   }
 
   // deal with the overflow
-  if (_preserved_oop_stack) {
-    for (int i = 0; i < _preserved_oop_stack->length(); i++) {
-      oop obj       = _preserved_oop_stack->at(i);
-      markOop mark  = _preserved_mark_stack->at(i);
-      obj->set_mark(mark);
-    }
+  while (!_preserved_oop_stack.is_empty()) {
+    oop obj       = _preserved_oop_stack.pop();
+    markOop mark  = _preserved_mark_stack.pop();
+    obj->set_mark(mark);
   }
 }
 
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -104,22 +104,21 @@
   friend class KeepAliveClosure;
   friend class VM_MarkSweep;
   friend void marksweep_init();
-  friend class DataLayout;
 
   //
   // Vars
   //
  protected:
   // Traversal stack used during phase1
-  static GrowableArray<oop>*             _marking_stack;
+  static Stack<oop>                      _marking_stack;
   // Stack for live klasses to revisit at end of marking phase
-  static GrowableArray<Klass*>*          _revisit_klass_stack;
+  static Stack<Klass*>                   _revisit_klass_stack;
   // Set (stack) of MDO's to revisit at end of marking phase
-  static GrowableArray<DataLayout*>*    _revisit_mdo_stack;
+  static Stack<DataLayout*>              _revisit_mdo_stack;
 
   // Space for storing/restoring mark word
-  static GrowableArray<markOop>*         _preserved_mark_stack;
-  static GrowableArray<oop>*             _preserved_oop_stack;
+  static Stack<markOop>                  _preserved_mark_stack;
+  static Stack<oop>                      _preserved_oop_stack;
   static size_t                          _preserved_count;
   static size_t                          _preserved_count_max;
   static PreservedMark*                  _preserved_marks;
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -72,7 +72,7 @@
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     if (!obj->mark()->is_marked()) {
       mark_object(obj);
-      _marking_stack->push(obj);
+      _marking_stack.push(obj);
     }
   }
 }
--- a/src/share/vm/includeDB_core	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/includeDB_core	Tue Feb 16 22:09:54 2010 -0800
@@ -1448,12 +1448,14 @@
 defNewGeneration.cpp                    referencePolicy.hpp
 defNewGeneration.cpp                    space.inline.hpp
 defNewGeneration.cpp                    spaceDecorator.hpp
+defNewGeneration.cpp                    stack.inline.hpp
 defNewGeneration.cpp                    thread_<os_family>.inline.hpp
 
 defNewGeneration.hpp                    ageTable.hpp
 defNewGeneration.hpp                    cSpaceCounters.hpp
 defNewGeneration.hpp                    generation.inline.hpp
 defNewGeneration.hpp                    generationCounters.hpp
+defNewGeneration.hpp                    stack.hpp
 
 defNewGeneration.inline.hpp             cardTableRS.hpp
 defNewGeneration.inline.hpp             defNewGeneration.hpp
@@ -3855,6 +3857,10 @@
 
 specialized_oop_closures.hpp            atomic.hpp
 
+stack.hpp                               allocation.inline.hpp
+
+stack.inline.hpp                        stack.hpp
+
 stackMapFrame.cpp                       globalDefinitions.hpp
 stackMapFrame.cpp                       handles.inline.hpp
 stackMapFrame.cpp                       oop.inline.hpp
@@ -4097,6 +4103,7 @@
 
 taskqueue.cpp                           debug.hpp
 taskqueue.cpp                           os.hpp
+taskqueue.cpp                           stack.inline.hpp
 taskqueue.cpp                           taskqueue.hpp
 taskqueue.cpp                           thread_<os_family>.inline.hpp
 
@@ -4104,6 +4111,7 @@
 taskqueue.hpp                           allocation.inline.hpp
 taskqueue.hpp                           mutex.hpp
 taskqueue.hpp                           orderAccess_<os_arch>.inline.hpp
+taskqueue.hpp				stack.hpp
 
 templateInterpreter.cpp                 interpreter.hpp
 templateInterpreter.cpp                 interpreterGenerator.hpp
--- a/src/share/vm/memory/allocation.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/memory/allocation.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -288,16 +288,17 @@
 
 // One of the following macros must be used when allocating
 // an array or object from an arena
-#define NEW_ARENA_ARRAY(arena, type, size)\
-  (type*) arena->Amalloc((size) * sizeof(type))
+#define NEW_ARENA_ARRAY(arena, type, size) \
+  (type*) (arena)->Amalloc((size) * sizeof(type))
 
-#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)\
-  (type*) arena->Arealloc((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
+#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)    \
+  (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
+                            (new_size) * sizeof(type) )
 
-#define FREE_ARENA_ARRAY(arena, type, old, size)\
-  arena->Afree((char*)(old), (size) * sizeof(type))
+#define FREE_ARENA_ARRAY(arena, type, old, size) \
+  (arena)->Afree((char*)(old), (size) * sizeof(type))
 
-#define NEW_ARENA_OBJ(arena, type)\
+#define NEW_ARENA_OBJ(arena, type) \
   NEW_ARENA_ARRAY(arena, type, 1)
 
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -87,9 +87,7 @@
     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
                                        _scan_older);
   } while (!_gch->no_allocs_since_save_marks(_level));
-  guarantee(_gen->promo_failure_scan_stack() == NULL
-            || _gen->promo_failure_scan_stack()->length() == 0,
-            "Failed to finish scan");
+  guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
 }
 
 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@@ -130,9 +128,6 @@
                                    int level,
                                    const char* policy)
   : Generation(rs, initial_size, level),
-    _objs_with_preserved_marks(NULL),
-    _preserved_marks_of_objs(NULL),
-    _promo_failure_scan_stack(NULL),
     _promo_failure_drain_in_progress(false),
     _should_allocate_from_space(false)
 {
@@ -600,12 +595,8 @@
   } else {
     assert(HandlePromotionFailure,
       "Should not be here unless promotion failure handling is on");
-    assert(_promo_failure_scan_stack != NULL &&
-      _promo_failure_scan_stack->length() == 0, "post condition");
-
-    // deallocate stack and it's elements
-    delete _promo_failure_scan_stack;
-    _promo_failure_scan_stack = NULL;
+    assert(_promo_failure_scan_stack.is_empty(), "post condition");
+    _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
     remove_forwarding_pointers();
     if (PrintGCDetails) {
@@ -616,7 +607,7 @@
     // case there can be live objects in to-space
     // as a result of a partial evacuation of eden
     // and from-space.
-    swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
+    swap_spaces();   // For uniformity wrt ParNewGeneration.
     from()->set_next_compaction_space(to());
     gch->set_incremental_collection_will_fail();
 
@@ -649,34 +640,23 @@
   RemoveForwardPointerClosure rspc;
   eden()->object_iterate(&rspc);
   from()->object_iterate(&rspc);
+
   // Now restore saved marks, if any.
-  if (_objs_with_preserved_marks != NULL) {
-    assert(_preserved_marks_of_objs != NULL, "Both or none.");
-    assert(_objs_with_preserved_marks->length() ==
-           _preserved_marks_of_objs->length(), "Both or none.");
-    for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
-      oop obj   = _objs_with_preserved_marks->at(i);
-      markOop m = _preserved_marks_of_objs->at(i);
-      obj->set_mark(m);
-    }
-    delete _objs_with_preserved_marks;
-    delete _preserved_marks_of_objs;
-    _objs_with_preserved_marks = NULL;
-    _preserved_marks_of_objs = NULL;
+  assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
+         "should be the same");
+  while (!_objs_with_preserved_marks.is_empty()) {
+    oop obj   = _objs_with_preserved_marks.pop();
+    markOop m = _preserved_marks_of_objs.pop();
+    obj->set_mark(m);
   }
+  _objs_with_preserved_marks.clear(true);
+  _preserved_marks_of_objs.clear(true);
 }
 
 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   if (m->must_be_preserved_for_promotion_failure(obj)) {
-    if (_objs_with_preserved_marks == NULL) {
-      assert(_preserved_marks_of_objs == NULL, "Both or none.");
-      _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
-        GrowableArray<oop>(PreserveMarkStackSize, true);
-      _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
-        GrowableArray<markOop>(PreserveMarkStackSize, true);
-    }
-    _objs_with_preserved_marks->push(obj);
-    _preserved_marks_of_objs->push(m);
+    _objs_with_preserved_marks.push(obj);
+    _preserved_marks_of_objs.push(m);
   }
 }
 
@@ -691,7 +671,7 @@
   old->forward_to(old);
   _promotion_failed = true;
 
-  push_on_promo_failure_scan_stack(old);
+  _promo_failure_scan_stack.push(old);
 
   if (!_promo_failure_drain_in_progress) {
     // prevent recursion in copy_to_survivor_space()
@@ -744,20 +724,9 @@
   return obj;
 }
 
-void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
-  if (_promo_failure_scan_stack == NULL) {
-    _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
-                                    GrowableArray<oop>(40, true);
-  }
-
-  _promo_failure_scan_stack->push(obj);
-}
-
 void DefNewGeneration::drain_promo_failure_scan_stack() {
-  assert(_promo_failure_scan_stack != NULL, "precondition");
-
-  while (_promo_failure_scan_stack->length() > 0) {
-     oop obj = _promo_failure_scan_stack->pop();
+  while (!_promo_failure_scan_stack.is_empty()) {
+     oop obj = _promo_failure_scan_stack.pop();
      obj->oop_iterate(_promo_failure_scan_stack_closure);
   }
 }
--- a/src/share/vm/memory/defNewGeneration.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -77,10 +77,10 @@
   // word being overwritten with a self-forwarding-pointer.
   void   preserve_mark_if_necessary(oop obj, markOop m);
 
-  // When one is non-null, so is the other.  Together, they each pair is
-  // an object with a preserved mark, and its mark value.
-  GrowableArray<oop>*     _objs_with_preserved_marks;
-  GrowableArray<markOop>* _preserved_marks_of_objs;
+  // Together, these keep <object with a preserved mark, mark value> pairs.
+  // They should always contain the same number of elements.
+  Stack<oop>     _objs_with_preserved_marks;
+  Stack<markOop> _preserved_marks_of_objs;
 
   // Returns true if the collection can be safely attempted.
   // If this method returns false, a collection is not
@@ -94,11 +94,7 @@
     _promo_failure_scan_stack_closure = scan_stack_closure;
   }
 
-  GrowableArray<oop>* _promo_failure_scan_stack;
-  GrowableArray<oop>* promo_failure_scan_stack() const {
-    return _promo_failure_scan_stack;
-  }
-  void push_on_promo_failure_scan_stack(oop);
+  Stack<oop> _promo_failure_scan_stack;
   void drain_promo_failure_scan_stack(void);
   bool _promo_failure_drain_in_progress;
 
@@ -184,8 +180,6 @@
     void do_void();
   };
 
-  class FastEvacuateFollowersClosure;
-  friend class FastEvacuateFollowersClosure;
   class FastEvacuateFollowersClosure: public VoidClosure {
     GenCollectedHeap* _gch;
     int _level;
@@ -336,6 +330,10 @@
 
   void verify(bool allow_dirty);
 
+  bool promo_failure_scan_is_complete() const {
+    return _promo_failure_scan_stack.is_empty();
+  }
+
  protected:
   // If clear_space is true, clear the survivor spaces.  Eden is
   // cleared if the minimum size of eden is 0.  If mangle_space
--- a/src/share/vm/memory/genMarkSweep.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -155,16 +155,6 @@
 
   _preserved_marks = (PreservedMark*)scratch;
   _preserved_count = 0;
-  _preserved_mark_stack = NULL;
-  _preserved_oop_stack = NULL;
-
-  _marking_stack       = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
-
-  int size = SystemDictionary::number_of_classes() * 2;
-  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
-  // (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
-  // now until we have had a chance to investigate a more optimal setting.
-  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
@@ -200,16 +190,11 @@
     gch->release_scratch();
   }
 
-  if (_preserved_oop_stack) {
-    delete _preserved_mark_stack;
-    _preserved_mark_stack = NULL;
-    delete _preserved_oop_stack;
-    _preserved_oop_stack = NULL;
-  }
-
-  delete _marking_stack;
-  delete _revisit_klass_stack;
-  delete _revisit_mdo_stack;
+  _preserved_mark_stack.clear(true);
+  _preserved_oop_stack.clear(true);
+  _marking_stack.clear();
+  _revisit_klass_stack.clear(true);
+  _revisit_mdo_stack.clear(true);
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
@@ -267,17 +252,17 @@
 
   // Update subklass/sibling/implementor links of live klasses
   follow_weak_klass_links();
-  assert(_marking_stack->is_empty(), "just drained");
+  assert(_marking_stack.is_empty(), "just drained");
 
   // Visit memoized MDO's and clear any unmarked weak refs
   follow_mdo_weak_refs();
-  assert(_marking_stack->is_empty(), "just drained");
+  assert(_marking_stack.is_empty(), "just drained");
 
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(&is_alive);
   StringTable::unlink(&is_alive);
 
-  assert(_marking_stack->is_empty(), "stack should be empty by now");
+  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
--- a/src/share/vm/runtime/globals.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -631,6 +631,9 @@
   develop(bool, ZapJNIHandleArea, trueInDebug,                              \
           "Zap freed JNI handle space with 0xFEFEFEFE")                     \
                                                                             \
+  notproduct(bool, ZapStackSegments, trueInDebug,                           \
+             "Zap allocated/freed Stack segments with 0xFADFADED")          \
+                                                                            \
   develop(bool, ZapUnusedHeapArea, trueInDebug,                             \
           "Zap unused heap space with 0xBAADBABE")                          \
                                                                             \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/stack.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Class Stack (below) grows and shrinks by linking together "segments" which
+// are allocated on demand.  Segments are arrays of the element type (E) plus an
+// extra pointer-sized field to store the segment link.  Recently emptied
+// segments are kept in a cache and reused.
+//
+// Notes/caveats:
+//
+// The size of an element must either evenly divide the size of a pointer or be
+// a multiple of the size of a pointer.
+//
+// Destructors are not called for elements popped off the stack, so element
+// types which rely on destructors for things like reference counting will not
+// work properly.
+//
+// Class Stack allocates segments from the C heap.  However, two protected
+// virtual methods are used to alloc/free memory which subclasses can override:
+//
+//      virtual void* alloc(size_t bytes);
+//      virtual void  free(void* addr, size_t bytes);
+//
+// The alloc() method must return storage aligned for any use.  The
+// implementation in class Stack assumes that alloc() will terminate the process
+// if the allocation fails.
+
+template <class E> class StackIterator;
+
+// StackBase holds common data/methods that don't depend on the element type,
+// factored out to reduce template code duplication.
+class StackBase
+{
+public:
+  size_t segment_size()   const { return _seg_size; } // Elements per segment.
+  size_t max_size()       const { return _max_size; } // Max elements allowed.
+  size_t max_cache_size() const { return _max_cache_size; } // Max segments
+                                                            // allowed in cache.
+
+  size_t cache_size() const { return _cache_size; }   // Segments in the cache.
+
+protected:
+  // The ctor arguments correspond to the like-named functions above.
+  // segment_size:    number of items per segment
+  // max_cache_size:  maxmium number of *segments* to cache
+  // max_size:        maximum number of items allowed, rounded to a multiple of
+  //                  the segment size (0 == unlimited)
+  inline StackBase(size_t segment_size, size_t max_cache_size, size_t max_size);
+
+  // Round max_size to a multiple of the segment size.  Treat 0 as unlimited.
+  static inline size_t adjust_max_size(size_t max_size, size_t seg_size);
+
+protected:
+  const size_t _seg_size;       // Number of items per segment.
+  const size_t _max_size;       // Maximum number of items allowed in the stack.
+  const size_t _max_cache_size; // Maximum number of segments to cache.
+  size_t       _cur_seg_size;   // Number of items in the current segment.
+  size_t       _full_seg_size;  // Number of items in already-filled segments.
+  size_t       _cache_size;     // Number of segments in the cache.
+};
+
+#ifdef __GNUC__
+#define inline
+#endif // __GNUC__
+
+template <class E>
+class Stack:  public StackBase
+{
+public:
+  friend class StackIterator<E>;
+
+  // segment_size:    number of items per segment
+  // max_cache_size:  maxmium number of *segments* to cache
+  // max_size:        maximum number of items allowed, rounded to a multiple of
+  //                  the segment size (0 == unlimited)
+  inline Stack(size_t segment_size = default_segment_size(),
+               size_t max_cache_size = 4, size_t max_size = 0);
+  inline ~Stack() { clear(true); }
+
+  inline bool is_empty() const { return _cur_seg == NULL; }
+  inline bool is_full()  const { return _full_seg_size >= max_size(); }
+
+  // Performance sensitive code should use is_empty() instead of size() == 0 and
+  // is_full() instead of size() == max_size().  Using a conditional here allows
+  // just one var to be updated when pushing/popping elements instead of two;
+  // _full_seg_size is updated only when pushing/popping segments.
+  inline size_t size() const {
+    return is_empty() ? 0 : _full_seg_size + _cur_seg_size;
+  }
+
+  inline void push(E elem);
+  inline E    pop();
+
+  // Clear everything from the stack, releasing the associated memory.  If
+  // clear_cache is true, also release any cached segments.
+  void clear(bool clear_cache = false);
+
+  static inline size_t default_segment_size();
+
+protected:
+  // Each segment includes space for _seg_size elements followed by a link
+  // (pointer) to the previous segment; the space is allocated as a single block
+  // of size segment_bytes().  _seg_size is rounded up if necessary so the link
+  // is properly aligned.  The C struct for the layout would be:
+  //
+  // struct segment {
+  //   E     elements[_seg_size];
+  //   E*    link;
+  // };
+
+  // Round up seg_size to keep the link field aligned.
+  static inline size_t adjust_segment_size(size_t seg_size);
+
+  // Methods for allocation size and getting/setting the link.
+  inline size_t link_offset() const;              // Byte offset of link field.
+  inline size_t segment_bytes() const;            // Segment size in bytes.
+  inline E**    link_addr(E* seg) const;          // Address of the link field.
+  inline E*     get_link(E* seg) const;           // Extract the link from seg.
+  inline E*     set_link(E* new_seg, E* old_seg); // new_seg.link = old_seg.
+
+  virtual E*    alloc(size_t bytes);
+  virtual void  free(E* addr, size_t bytes);
+
+  void push_segment();
+  void pop_segment();
+
+  void free_segments(E* seg);          // Free all segments in the list.
+  inline void reset(bool reset_cache); // Reset all data fields.
+
+  DEBUG_ONLY(void verify(bool at_empty_transition) const;)
+  DEBUG_ONLY(void zap_segment(E* seg, bool zap_link_field) const;)
+
+private:
+  E* _cur_seg;    // Current segment.
+  E* _cache;      // Segment cache to avoid ping-ponging.
+};
+
+template <class E> class ResourceStack:  public Stack<E>, public ResourceObj
+{
+public:
+  // If this class becomes widely used, it may make sense to save the Thread
+  // and use it when allocating segments.
+  ResourceStack(size_t segment_size = Stack<E>::default_segment_size()):
+    Stack<E>(segment_size, max_uintx)
+    { }
+
+  // Set the segment pointers to NULL so the parent dtor does not free them;
+  // that must be done by the ResourceMark code.
+  ~ResourceStack() { Stack<E>::reset(true); }
+
+protected:
+  virtual E*   alloc(size_t bytes);
+  virtual void free(E* addr, size_t bytes);
+
+private:
+  void clear(bool clear_cache = false);
+};
+
+template <class E>
+class StackIterator: public StackObj
+{
+public:
+  StackIterator(Stack<E>& stack): _stack(stack) { sync(); }
+
+  Stack<E>& stack() const { return _stack; }
+
+  bool is_empty() const { return _cur_seg == NULL; }
+
+  E  next() { return *next_addr(); }
+  E* next_addr();
+
+  void sync(); // Sync the iterator's state to the stack's current state.
+
+private:
+  Stack<E>& _stack;
+  size_t    _cur_seg_size;
+  E*        _cur_seg;
+  size_t    _full_seg_size;
+};
+
+#ifdef __GNUC__
+#undef inline
+#endif // __GNUC__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/stack.inline.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+StackBase::StackBase(size_t segment_size, size_t max_cache_size,
+                     size_t max_size):
+  _seg_size(segment_size),
+  _max_cache_size(max_cache_size),
+  _max_size(adjust_max_size(max_size, segment_size))
+{
+  assert(_max_size % _seg_size == 0, "not a multiple");
+}
+
+size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size)
+{
+  assert(seg_size > 0, "cannot be 0");
+  assert(max_size >= seg_size || max_size == 0, "max_size too small");
+  const size_t limit = max_uintx - (seg_size - 1);
+  if (max_size == 0 || max_size > limit) {
+    max_size = limit;
+  }
+  return (max_size + seg_size - 1) / seg_size * seg_size;
+}
+
+template <class E>
+Stack<E>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
+  StackBase(adjust_segment_size(segment_size), max_cache_size, max_size)
+{
+  reset(true);
+}
+
+template <class E>
+void Stack<E>::push(E item)
+{
+  assert(!is_full(), "pushing onto a full stack");
+  if (_cur_seg_size == _seg_size) {
+    push_segment();
+  }
+  _cur_seg[_cur_seg_size] = item;
+  ++_cur_seg_size;
+}
+
+template <class E>
+E Stack<E>::pop()
+{
+  assert(!is_empty(), "popping from an empty stack");
+  if (_cur_seg_size == 1) {
+    E tmp = _cur_seg[--_cur_seg_size];
+    pop_segment();
+    return tmp;
+  }
+  return _cur_seg[--_cur_seg_size];
+}
+
+template <class E>
+void Stack<E>::clear(bool clear_cache)
+{
+  free_segments(_cur_seg);
+  if (clear_cache) free_segments(_cache);
+  reset(clear_cache);
+}
+
+template <class E>
+size_t Stack<E>::default_segment_size()
+{
+  // Number of elements that fit in 4K bytes minus the size of two pointers
+  // (link field and malloc header).
+  return (4096 - 2 * sizeof(E*)) / sizeof(E);
+}
+
+template <class E>
+size_t Stack<E>::adjust_segment_size(size_t seg_size)
+{
+  const size_t elem_sz = sizeof(E);
+  const size_t ptr_sz = sizeof(E*);
+  assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
+  if (elem_sz < ptr_sz) {
+    return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
+  }
+  return seg_size;
+}
+
+template <class E>
+size_t Stack<E>::link_offset() const
+{
+  return align_size_up(_seg_size * sizeof(E), sizeof(E*));
+}
+
+template <class E>
+size_t Stack<E>::segment_bytes() const
+{
+  return link_offset() + sizeof(E*);
+}
+
+template <class E>
+E** Stack<E>::link_addr(E* seg) const
+{
+  return (E**) ((char*)seg + link_offset());
+}
+
+template <class E>
+E* Stack<E>::get_link(E* seg) const
+{
+  return *link_addr(seg);
+}
+
+template <class E>
+E* Stack<E>::set_link(E* new_seg, E* old_seg)
+{
+  *link_addr(new_seg) = old_seg;
+  return new_seg;
+}
+
+template <class E>
+E* Stack<E>::alloc(size_t bytes)
+{
+  return (E*) NEW_C_HEAP_ARRAY(char, bytes);
+}
+
+template <class E>
+void Stack<E>::free(E* addr, size_t bytes)
+{
+  FREE_C_HEAP_ARRAY(char, (char*) addr);
+}
+
+template <class E>
+void Stack<E>::push_segment()
+{
+  assert(_cur_seg_size == _seg_size, "current segment is not full");
+  E* next;
+  if (_cache_size > 0) {
+    // Use a cached segment.
+    next = _cache;
+    _cache = get_link(_cache);
+    --_cache_size;
+  } else {
+    next = alloc(segment_bytes());
+    DEBUG_ONLY(zap_segment(next, true);)
+  }
+  const bool at_empty_transition = is_empty();
+  _cur_seg = set_link(next, _cur_seg);
+  _cur_seg_size = 0;
+  _full_seg_size += at_empty_transition ? 0 : _seg_size;
+  DEBUG_ONLY(verify(at_empty_transition);)
+}
+
+template <class E>
+void Stack<E>::pop_segment()
+{
+  assert(_cur_seg_size == 0, "current segment is not empty");
+  E* const prev = get_link(_cur_seg);
+  if (_cache_size < _max_cache_size) {
+    // Add the current segment to the cache.
+    DEBUG_ONLY(zap_segment(_cur_seg, false);)
+    _cache = set_link(_cur_seg, _cache);
+    ++_cache_size;
+  } else {
+    DEBUG_ONLY(zap_segment(_cur_seg, true);)
+    free(_cur_seg, segment_bytes());
+  }
+  const bool at_empty_transition = prev == NULL;
+  _cur_seg = prev;
+  _cur_seg_size = _seg_size;
+  _full_seg_size -= at_empty_transition ? 0 : _seg_size;
+  DEBUG_ONLY(verify(at_empty_transition);)
+}
+
+template <class E>
+void Stack<E>::free_segments(E* seg)
+{
+  const size_t bytes = segment_bytes();
+  while (seg != NULL) {
+    E* const prev = get_link(seg);
+    free(seg, bytes);
+    seg = prev;
+  }
+}
+
+template <class E>
+void Stack<E>::reset(bool reset_cache)
+{
+  _cur_seg_size = _seg_size; // So push() will alloc a new segment.
+  _full_seg_size = 0;
+  _cur_seg = NULL;
+  if (reset_cache) {
+    _cache_size = 0;
+    _cache = NULL;
+  }
+}
+
+#ifdef ASSERT
+template <class E>
+void Stack<E>::verify(bool at_empty_transition) const
+{
+  assert(size() <= max_size(), "stack exceeded bounds");
+  assert(cache_size() <= max_cache_size(), "cache exceeded bounds");
+  assert(_cur_seg_size <= segment_size(), "segment index exceeded bounds");
+
+  assert(_full_seg_size % _seg_size == 0, "not a multiple");
+  assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
+  assert((_cache == NULL) == (cache_size() == 0), "mismatch");
+
+  if (is_empty()) {
+    assert(_cur_seg_size == segment_size(), "sanity");
+  }
+}
+
+template <class E>
+void Stack<E>::zap_segment(E* seg, bool zap_link_field) const
+{
+  if (!ZapStackSegments) return;
+  const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
+  uint32_t* cur = (uint32_t*)seg;
+  const uint32_t* end = cur + zap_bytes / sizeof(uint32_t);
+  while (cur < end) {
+    *cur++ = 0xfadfaded;
+  }
+}
+#endif
+
+template <class E>
+E* ResourceStack<E>::alloc(size_t bytes)
+{
+  return (E*) resource_allocate_bytes(bytes);
+}
+
+template <class E>
+void ResourceStack<E>::free(E* addr, size_t bytes)
+{
+  resource_free_bytes((char*) addr, bytes);
+}
+
+template <class E>
+void StackIterator<E>::sync()
+{
+  _full_seg_size = _stack._full_seg_size;
+  _cur_seg_size = _stack._cur_seg_size;
+  _cur_seg = _stack._cur_seg;
+}
+
+template <class E>
+E* StackIterator<E>::next_addr()
+{
+  assert(!is_empty(), "no items left");
+  if (_cur_seg_size == 1) {
+    E* addr = _cur_seg;
+    _cur_seg = _stack.get_link(_cur_seg);
+    _cur_seg_size = _stack.segment_size();
+    _full_seg_size -= _stack.segment_size();
+    return addr;
+  }
+  return _cur_seg + --_cur_seg_size;
+}
--- a/src/share/vm/utilities/taskqueue.cpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/utilities/taskqueue.cpp	Tue Feb 16 22:09:54 2010 -0800
@@ -180,32 +180,12 @@
   }
 }
 
-bool RegionTaskQueueWithOverflow::is_empty() {
-  return (_region_queue.size() == 0) &&
-         (_overflow_stack->length() == 0);
-}
-
-bool RegionTaskQueueWithOverflow::stealable_is_empty() {
-  return _region_queue.size() == 0;
-}
-
-bool RegionTaskQueueWithOverflow::overflow_is_empty() {
-  return _overflow_stack->length() == 0;
-}
-
-void RegionTaskQueueWithOverflow::initialize() {
-  _region_queue.initialize();
-  assert(_overflow_stack == 0, "Creating memory leak");
-  _overflow_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
-}
-
 void RegionTaskQueueWithOverflow::save(RegionTask t) {
   if (TraceRegionTasksQueuing && Verbose) {
     gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
   }
   if(!_region_queue.push(t)) {
-    _overflow_stack->push(t);
+    _overflow_stack.push(t);
   }
 }
 
@@ -237,8 +217,8 @@
 bool
 RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
   bool result;
-  if (!_overflow_stack->is_empty()) {
-    region_task = _overflow_stack->pop();
+  if (!_overflow_stack.is_empty()) {
+    region_task = _overflow_stack.pop();
     result = true;
   } else {
     region_task = (RegionTask) NULL;
--- a/src/share/vm/utilities/taskqueue.hpp	Fri Apr 16 12:50:31 2010 -0700
+++ b/src/share/vm/utilities/taskqueue.hpp	Tue Feb 16 22:09:54 2010 -0800
@@ -557,13 +557,13 @@
 
 class RegionTaskQueueWithOverflow: public CHeapObj {
  protected:
-  RegionTaskQueue              _region_queue;
-  GrowableArray<RegionTask>*   _overflow_stack;
+  RegionTaskQueue   _region_queue;
+  Stack<RegionTask> _overflow_stack;
 
  public:
-  RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
+  RegionTaskQueueWithOverflow() { }
   // Initialize both stealable queue and overflow
-  void initialize();
+  inline void initialize();
   // Save first to stealable queue and then to overflow
   void save(RegionTask t);
   // Retrieve first from overflow and then from stealable queue
@@ -572,11 +572,27 @@
   bool retrieve_from_stealable_queue(RegionTask& region_index);
   // Retrieve from overflow
   bool retrieve_from_overflow(RegionTask& region_index);
-  bool is_empty();
-  bool stealable_is_empty();
-  bool overflow_is_empty();
+  inline bool is_empty();
+  inline bool stealable_is_empty();
+  inline bool overflow_is_empty();
   uint stealable_size() { return _region_queue.size(); }
   RegionTaskQueue* task_queue() { return &_region_queue; }
 };
 
+void RegionTaskQueueWithOverflow::initialize() {
+  _region_queue.initialize();
+}
+
+bool RegionTaskQueueWithOverflow::is_empty() {
+  return _region_queue.size() == 0 && _overflow_stack.is_empty();
+}
+
+bool RegionTaskQueueWithOverflow::stealable_is_empty() {
+  return _region_queue.size() == 0;
+}
+
+bool RegionTaskQueueWithOverflow::overflow_is_empty() {
+  return _overflow_stack.is_empty();
+}
+
 #define USE_RegionTaskQueueWithOverflow