changeset 2261:048f98400b8e

Merge
author jcoomes
date Fri, 18 Mar 2011 09:03:43 -0700
parents fc5ebbb2d1a8 (current diff) 92da084fefc9 (diff)
children e97ad5d5c990 d673ef06fe96
files src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp
diffstat 39 files changed, 676 insertions(+), 712 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@
 define_pd_global(intx, OptoLoopAlignment,     16);  // = 4*wordSize
 define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
 define_pd_global(intx, InlineSmallCode,       1500);
+
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
 define_pd_global(intx, ThreadStackSize,       1024);
@@ -71,4 +72,6 @@
 
 define_pd_global(bool, UseMembar,            false);
 
+// GC Ergo Flags
+define_pd_global(intx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
 #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
--- a/src/cpu/x86/vm/globals_x86.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,4 +72,6 @@
 
 define_pd_global(bool, UseMembar,            false);
 
+// GC Ergo Flags
+define_pd_global(intx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
 #endif // CPU_X86_VM_GLOBALS_X86_HPP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -292,13 +292,15 @@
 void CMSCollector::ref_processor_init() {
   if (_ref_processor == NULL) {
     // Allocate and initialize a reference processor
-    _ref_processor = ReferenceProcessor::create_ref_processor(
-        _span,                               // span
-        _cmsGen->refs_discovery_is_atomic(), // atomic_discovery
-        _cmsGen->refs_discovery_is_mt(),     // mt_discovery
-        &_is_alive_closure,
-        ParallelGCThreads,
-        ParallelRefProcEnabled);
+    _ref_processor =
+      new ReferenceProcessor(_span,                               // span
+                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
+                             (int) ParallelGCThreads,             // mt processing degree
+                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
+                             (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
+                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
+                             &_is_alive_closure,                  // closure for liveness info
+                             false);                              // next field updates do not need write barrier
     // Initialize the _ref_processor field of CMSGen
     _cmsGen->set_ref_processor(_ref_processor);
 
@@ -641,7 +643,7 @@
   }
 
   // Support for multi-threaded concurrent phases
-  if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) {
+  if (CMSConcurrentMTEnabled) {
     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
       // just for now
       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
@@ -1689,6 +1691,8 @@
     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
     _full_gc_requested = true;
     CGC_lock->notify();   // nudge CMS thread
+  } else {
+    assert(gc_count > full_gc_count, "Error: causal loop");
   }
 }
 
@@ -1988,17 +1992,16 @@
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
-  ReferenceProcessorSpanMutator x(ref_processor(), new_span);
-
+  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
   // Temporarily, clear the "is_alive_non_header" field of the
   // reference processor.
-  ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
-
+  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
   // Temporarily make reference _processing_ single threaded (non-MT).
-  ReferenceProcessorMTProcMutator z(ref_processor(), false);
-
+  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
   // Temporarily make refs discovery atomic
-  ReferenceProcessorAtomicMutator w(ref_processor(), true);
+  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
+  // Temporarily make reference _discovery_ single threaded (non-MT)
+  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
 
   ref_processor()->set_enqueuing_is_done(false);
   ref_processor()->enable_discovery();
@@ -4263,9 +4266,7 @@
 
   // Refs discovery is already non-atomic.
   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
-  // Mutate the Refs discovery so it is MT during the
-  // multi-threaded marking phase.
-  ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
+  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   conc_workers()->start_task(&tsk);
   while (tsk.yielded()) {
@@ -4318,6 +4319,8 @@
   ResourceMark rm;
   HandleMark   hm;
 
+  // Temporarily make refs discovery single threaded (non-MT)
+  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
     &_markStack, &_revisitStack, CMSYield && asynch);
   // the last argument to iterate indicates whether the iteration
@@ -4356,10 +4359,6 @@
   verify_overflow_empty();
   _abort_preclean = false;
   if (CMSPrecleaningEnabled) {
-    // Precleaning is currently not MT but the reference processor
-    // may be set for MT.  Disable it temporarily here.
-    ReferenceProcessor* rp = ref_processor();
-    ReferenceProcessorMTProcMutator z(rp, false);
     _eden_chunk_index = 0;
     size_t used = get_eden_used();
     size_t capacity = get_eden_capacity();
@@ -4502,11 +4501,16 @@
          _collectorState == AbortablePreclean, "incorrect state");
   ResourceMark rm;
   HandleMark   hm;
+
+  // Precleaning is currently not MT but the reference processor
+  // may be set for MT.  Disable it temporarily here.
+  ReferenceProcessor* rp = ref_processor();
+  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
+
   // Do one pass of scrubbing the discovered reference lists
   // to remove any reference objects with strongly-reachable
   // referents.
   if (clean_refs) {
-    ReferenceProcessor* rp = ref_processor();
     CMSPrecleanRefsYieldClosure yield_cl(this);
     assert(rp->span().equals(_span), "Spans should be equal");
     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
@@ -5576,8 +5580,10 @@
   // in the multi-threaded case, but we special-case n=1 here to get
   // repeatable measurements of the 1-thread overhead of the parallel code.
   if (n_workers > 1) {
-    // Make refs discovery MT-safe
-    ReferenceProcessorMTMutator mt(ref_processor(), true);
+    // Make refs discovery MT-safe, if it isn't already: it may not
+    // necessarily be so, since it's possible that we are doing
+    // ST marking.
+    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
     GenCollectedHeap::StrongRootsScope srs(gch);
     workers->run_task(&tsk);
   } else {
@@ -5703,14 +5709,19 @@
                       CMSBitMap*       mark_bit_map,
                       AbstractWorkGang* workers,
                       OopTaskQueueSet* task_queues):
+    // XXX Should superclass AGTWOQ also know about AWG since it knows
+    // about the task_queues used by the AWG? Then it could initialize
+    // the terminator() object. See 6984287. The set_for_termination()
+    // below is a temporary band-aid for the regression in 6984287.
     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
       task_queues),
     _task(task),
     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
-    {
-      assert(_collector->_span.equals(_span) && !_span.is_empty(),
-             "Inconsistency in _span");
-    }
+  {
+    assert(_collector->_span.equals(_span) && !_span.is_empty(),
+           "Inconsistency in _span");
+    set_for_termination(workers->active_workers());
+  }
 
   OopTaskQueueSet* task_queues() { return queues(); }
 
@@ -5872,8 +5883,7 @@
       // That is OK as long as the Reference lists are balanced (see
       // balance_all_queues() and balance_queues()).
 
-
-      rp->set_mt_degree(ParallelGCThreads);
+      rp->set_active_mt_degree(ParallelGCThreads);
       CMSRefProcTaskExecutor task_executor(*this);
       rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1133,7 +1133,7 @@
     // rare that the cost of the CAS's involved is in the
     // noise. That's a measurement that should be done, and
     // the code simplified if that turns out to be the case.
-    return false;
+    return ConcGCThreads > 1;
   }
 
   // Override
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
 volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
 volatile jint ConcurrentMarkSweepThread::_pending_decrements  = 0;
 
-volatile bool ConcurrentMarkSweepThread::_icms_enabled   = false;
+volatile jint ConcurrentMarkSweepThread::_icms_disabled   = 0;
 volatile bool ConcurrentMarkSweepThread::_should_run     = false;
 // When icms is enabled, the icms thread is stopped until explicitly
 // started.
@@ -84,7 +84,7 @@
     }
   }
   _sltMonitor = SLT_lock;
-  set_icms_enabled(CMSIncrementalMode);
+  assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
 }
 
 void ConcurrentMarkSweepThread::run() {
@@ -341,11 +341,11 @@
 
 void ConcurrentMarkSweepThread::icms_wait() {
   assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  if (_should_stop && icms_enabled()) {
+  if (_should_stop && icms_is_enabled()) {
     MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
     trace_state("pause_icms");
     _collector->stats().stop_cms_timer();
-    while(!_should_run && icms_enabled()) {
+    while(!_should_run && icms_is_enabled()) {
       iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
     }
     _collector->stats().start_cms_timer();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
 class ConcurrentMarkSweepGeneration;
 class CMSCollector;
 
-// The Concurrent Mark Sweep GC Thread (could be several in the future).
+// The Concurrent Mark Sweep GC Thread
 class ConcurrentMarkSweepThread: public ConcurrentGCThread {
   friend class VMStructs;
   friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
@@ -55,8 +55,6 @@
   static SurrogateLockerThread::SLT_msg_type _sltBuffer;
   static Monitor*                       _sltMonitor;
 
-  ConcurrentMarkSweepThread*            _next;
-
   static bool _should_terminate;
 
   enum CMS_flag_type {
@@ -84,7 +82,7 @@
   // Tracing messages, enabled by CMSTraceThreadState.
   static inline void trace_state(const char* desc);
 
-  static volatile bool _icms_enabled;   // iCMS enabled?
+  static volatile int _icms_disabled;   // a counter to track #iCMS disable & enable
   static volatile bool _should_run;     // iCMS may run
   static volatile bool _should_stop;    // iCMS should stop
 
@@ -214,10 +212,25 @@
 
   // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
   // must also be enabled/disabled dynamically to allow foreground collections.
-  static inline void enable_icms()              { _icms_enabled = true; }
-  static inline void disable_icms()             { _icms_enabled = false; }
-  static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
-  static inline bool icms_enabled()             { return _icms_enabled; }
+#define ICMS_ENABLING_ASSERT                                                      \
+          assert((CMSIncrementalMode  && _icms_disabled >= 0) ||                  \
+                 (!CMSIncrementalMode && _icms_disabled <= 0), "Error")
+
+  static inline void enable_icms() {
+    ICMS_ENABLING_ASSERT;
+    Atomic::dec(&_icms_disabled);
+  }
+  static inline void disable_icms() {
+   ICMS_ENABLING_ASSERT;
+   Atomic::inc(&_icms_disabled);
+  }
+  static inline bool icms_is_disabled() {
+   ICMS_ENABLING_ASSERT;
+   return _icms_disabled > 0;
+  }
+  static inline bool icms_is_enabled() {
+   return !icms_is_disabled();
+  }
 };
 
 inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -192,14 +192,18 @@
          "total_collections() should be monotonically increasing");
 
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
   if (gch->total_full_collections() == _full_gc_count_before) {
-    // Disable iCMS until the full collection is done.
+    // Disable iCMS until the full collection is done, and
+    // remember that we did so.
     CMSCollector::disable_icms();
+    _disabled_icms = true;
     // In case CMS thread was in icms_wait(), wake it up.
     CMSCollector::start_icms();
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before);
   } else {
+    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   }
 }
@@ -259,6 +263,8 @@
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
-  // Enable iCMS back.
-  CMSCollector::enable_icms();
+  // Enable iCMS back if we disabled it earlier.
+  if (_disabled_icms) {
+    CMSCollector::enable_icms();
+  }
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -128,11 +128,14 @@
 // VM operation to invoke a concurrent collection of the heap as a
 // GenCollectedHeap heap.
 class VM_GenCollectFullConcurrent: public VM_GC_Operation {
+  bool _disabled_icms;
  public:
   VM_GenCollectFullConcurrent(unsigned int gc_count_before,
                               unsigned int full_gc_count_before,
                               GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) {
+    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
+      _disabled_icms(false)
+  {
     assert(FullGCCount_lock != NULL, "Error");
     assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
   }
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -373,7 +373,7 @@
             // RSet updating while within an evacuation pause.
             // In this case worker_i should be the id of a GC worker thread
             assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-            assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id");
+            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
             into_cset_dcq->enqueue(entry);
           }
         }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1828,7 +1828,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   _cleanup_list.verify_optional();
-  FreeRegionList local_free_list("Local Cleanup List");
+  FreeRegionList tmp_free_list("Tmp Free List");
 
   if (G1ConcRegionFreeingVerbose) {
     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
@@ -1842,7 +1842,7 @@
     HeapRegion* hr = _cleanup_list.remove_head();
     assert(hr != NULL, "the list was not empty");
     hr->rem_set()->clear();
-    local_free_list.add_as_tail(hr);
+    tmp_free_list.add_as_tail(hr);
 
     // Instead of adding one region at a time to the secondary_free_list,
     // we accumulate them in the local list and move them a few at a
@@ -1850,20 +1850,20 @@
     // we do during this process. We'll also append the local list when
     // _cleanup_list is empty (which means we just removed the last
     // region from the _cleanup_list).
-    if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
+    if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
         _cleanup_list.is_empty()) {
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
                                "appending "SIZE_FORMAT" entries to the "
                                "secondary_free_list, clean list still has "
                                SIZE_FORMAT" entries",
-                               local_free_list.length(),
+                               tmp_free_list.length(),
                                _cleanup_list.length());
       }
 
       {
         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-        g1h->secondary_free_list_add_as_tail(&local_free_list);
+        g1h->secondary_free_list_add_as_tail(&tmp_free_list);
         SecondaryFreeList_lock->notify_all();
       }
 
@@ -1874,7 +1874,7 @@
       }
     }
   }
-  assert(local_free_list.is_empty(), "post-condition");
+  assert(tmp_free_list.is_empty(), "post-condition");
 }
 
 // Support closures for reference procssing in G1
@@ -2141,21 +2141,22 @@
   G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
   G1CMDrainMarkingStackClosure
     g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
-
   // We use the work gang from the G1CollectedHeap and we utilize all
   // the worker threads.
-  int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1);
+  int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
+  active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
 
   G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
                                           g1h->workers(), active_workers);
 
+
   if (rp->processing_is_mt()) {
     // Set the degree of MT here.  If the discovery is done MT, there
     // may have been a different number of threads doing the discovery
     // and a different number of discovered lists may have Ref objects.
     // That is OK as long as the Reference lists are balanced (see
     // balance_all_queues() and balance_queues()).
-    rp->set_mt_degree(active_workers);
+    rp->set_active_mt_degree(active_workers);
 
     rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
@@ -3182,7 +3183,7 @@
 
   template <class T> void do_oop_work(T* p) {
     assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-    assert(!_g1h->is_on_free_list(
+    assert(!_g1h->is_on_master_free_list(
                     _g1h->heap_region_containing((HeapWord*) p)), "invariant");
 
     oop obj = oopDesc::load_decode_heap_oop(p);
@@ -3403,7 +3404,7 @@
 void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
-  assert(!_g1h->is_on_free_list(
+  assert(!_g1h->is_on_master_free_list(
               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
   assert(!_g1h->is_obj_ill(obj), "invariant");
   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
@@ -3649,7 +3650,7 @@
                                (void*) obj);
 
       assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
-      assert(!_g1h->is_on_free_list(
+      assert(!_g1h->is_on_master_free_list(
                   _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
 
       scan_object(obj);
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -237,9 +237,9 @@
         // The following will finish freeing up any regions that we
         // found to be empty during cleanup. We'll do this part
         // without joining the suspendible set. If an evacuation pause
-        // takes places, then we would carry on freeing regions in
+        // takes place, then we would carry on freeing regions in
         // case they are needed by the pause. If a Full GC takes
-        // places, it would wait for us to process the regions
+        // place, it would wait for us to process the regions
         // reclaimed by cleanup.
 
         double cleanup_start_sec = os::elapsedTime();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -479,7 +479,7 @@
 // Private methods.
 
 HeapRegion*
-G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
+G1CollectedHeap::new_region_try_secondary_free_list() {
   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
     if (!_secondary_free_list.is_empty()) {
@@ -531,7 +531,7 @@
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                                "forced to look at the secondary_free_list");
       }
-      res = new_region_try_secondary_free_list(word_size);
+      res = new_region_try_secondary_free_list();
       if (res != NULL) {
         return res;
       }
@@ -543,7 +543,7 @@
       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                              "res == NULL, trying the secondary_free_list");
     }
-    res = new_region_try_secondary_free_list(word_size);
+    res = new_region_try_secondary_free_list();
   }
   if (res == NULL && do_expand) {
     if (expand(word_size * HeapWordSize)) {
@@ -579,6 +579,9 @@
 
 int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
                                                        size_t word_size) {
+  assert(isHumongous(word_size), "word_size should be humongous");
+  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
+
   int first = -1;
   if (num_regions == 1) {
     // Only one region to allocate, no need to go through the slower
@@ -600,7 +603,7 @@
     // request. If we are only allocating one region we use the common
     // region allocation code (see above).
     wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty();
+    append_secondary_free_list_if_not_empty_with_lock();
 
     if (free_regions() >= num_regions) {
       first = _hrs->find_contiguous(num_regions);
@@ -608,7 +611,7 @@
         for (int i = first; i < first + (int) num_regions; ++i) {
           HeapRegion* hr = _hrs->at(i);
           assert(hr->is_empty(), "sanity");
-          assert(is_on_free_list(hr), "sanity");
+          assert(is_on_master_free_list(hr), "sanity");
           hr->set_pending_removal(true);
         }
         _free_list.remove_all_pending(num_regions);
@@ -618,6 +621,126 @@
   return first;
 }
 
+HeapWord*
+G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
+                                                           size_t num_regions,
+                                                           size_t word_size) {
+  assert(first != -1, "pre-condition");
+  assert(isHumongous(word_size), "word_size should be humongous");
+  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
+
+  // Index of last region in the series + 1.
+  int last = first + (int) num_regions;
+
+  // We need to initialize the region(s) we just discovered. This is
+  // a bit tricky given that it can happen concurrently with
+  // refinement threads refining cards on these regions and
+  // potentially wanting to refine the BOT as they are scanning
+  // those cards (this can happen shortly after a cleanup; see CR
+  // 6991377). So we have to set up the region(s) carefully and in
+  // a specific order.
+
+  // The word size sum of all the regions we will allocate.
+  size_t word_size_sum = num_regions * HeapRegion::GrainWords;
+  assert(word_size <= word_size_sum, "sanity");
+
+  // This will be the "starts humongous" region.
+  HeapRegion* first_hr = _hrs->at(first);
+  // The header of the new object will be placed at the bottom of
+  // the first region.
+  HeapWord* new_obj = first_hr->bottom();
+  // This will be the new end of the first region in the series that
+  // should also match the end of the last region in the seriers.
+  HeapWord* new_end = new_obj + word_size_sum;
+  // This will be the new top of the first region that will reflect
+  // this allocation.
+  HeapWord* new_top = new_obj + word_size;
+
+  // First, we need to zero the header of the space that we will be
+  // allocating. When we update top further down, some refinement
+  // threads might try to scan the region. By zeroing the header we
+  // ensure that any thread that will try to scan the region will
+  // come across the zero klass word and bail out.
+  //
+  // NOTE: It would not have been correct to have used
+  // CollectedHeap::fill_with_object() and make the space look like
+  // an int array. The thread that is doing the allocation will
+  // later update the object header to a potentially different array
+  // type and, for a very short period of time, the klass and length
+  // fields will be inconsistent. This could cause a refinement
+  // thread to calculate the object size incorrectly.
+  Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
+
+  // We will set up the first region as "starts humongous". This
+  // will also update the BOT covering all the regions to reflect
+  // that there is a single object that starts at the bottom of the
+  // first region.
+  first_hr->set_startsHumongous(new_top, new_end);
+
+  // Then, if there are any, we will set up the "continues
+  // humongous" regions.
+  HeapRegion* hr = NULL;
+  for (int i = first + 1; i < last; ++i) {
+    hr = _hrs->at(i);
+    hr->set_continuesHumongous(first_hr);
+  }
+  // If we have "continues humongous" regions (hr != NULL), then the
+  // end of the last one should match new_end.
+  assert(hr == NULL || hr->end() == new_end, "sanity");
+
+  // Up to this point no concurrent thread would have been able to
+  // do any scanning on any region in this series. All the top
+  // fields still point to bottom, so the intersection between
+  // [bottom,top] and [card_start,card_end] will be empty. Before we
+  // update the top fields, we'll do a storestore to make sure that
+  // no thread sees the update to top before the zeroing of the
+  // object header and the BOT initialization.
+  OrderAccess::storestore();
+
+  // Now that the BOT and the object header have been initialized,
+  // we can update top of the "starts humongous" region.
+  assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
+         "new_top should be in this region");
+  first_hr->set_top(new_top);
+
+  // Now, we will update the top fields of the "continues humongous"
+  // regions. The reason we need to do this is that, otherwise,
+  // these regions would look empty and this will confuse parts of
+  // G1. For example, the code that looks for a consecutive number
+  // of empty regions will consider them empty and try to
+  // re-allocate them. We can extend is_empty() to also include
+  // !continuesHumongous(), but it is easier to just update the top
+  // fields here. The way we set top for all regions (i.e., top ==
+  // end for all regions but the last one, top == new_top for the
+  // last one) is actually used when we will free up the humongous
+  // region in free_humongous_region().
+  hr = NULL;
+  for (int i = first + 1; i < last; ++i) {
+    hr = _hrs->at(i);
+    if ((i + 1) == last) {
+      // last continues humongous region
+      assert(hr->bottom() < new_top && new_top <= hr->end(),
+             "new_top should fall on this region");
+      hr->set_top(new_top);
+    } else {
+      // not last one
+      assert(new_top > hr->end(), "new_top should be above this region");
+      hr->set_top(hr->end());
+    }
+  }
+  // If we have continues humongous regions (hr != NULL), then the
+  // end of the last one should match new_end and its top should
+  // match new_top.
+  assert(hr == NULL ||
+         (hr->end() == new_end && hr->top() == new_top), "sanity");
+
+  assert(first_hr->used() == word_size * HeapWordSize, "invariant");
+  _summary_bytes_used += first_hr->used();
+  _humongous_set.add(first_hr);
+
+  return new_obj;
+}
+
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
@@ -653,121 +776,16 @@
     }
   }
 
+  HeapWord* result = NULL;
   if (first != -1) {
-    // Index of last region in the series + 1.
-    int last = first + (int) num_regions;
-
-    // We need to initialize the region(s) we just discovered. This is
-    // a bit tricky given that it can happen concurrently with
-    // refinement threads refining cards on these regions and
-    // potentially wanting to refine the BOT as they are scanning
-    // those cards (this can happen shortly after a cleanup; see CR
-    // 6991377). So we have to set up the region(s) carefully and in
-    // a specific order.
-
-    // The word size sum of all the regions we will allocate.
-    size_t word_size_sum = num_regions * HeapRegion::GrainWords;
-    assert(word_size <= word_size_sum, "sanity");
-
-    // This will be the "starts humongous" region.
-    HeapRegion* first_hr = _hrs->at(first);
-    // The header of the new object will be placed at the bottom of
-    // the first region.
-    HeapWord* new_obj = first_hr->bottom();
-    // This will be the new end of the first region in the series that
-    // should also match the end of the last region in the seriers.
-    HeapWord* new_end = new_obj + word_size_sum;
-    // This will be the new top of the first region that will reflect
-    // this allocation.
-    HeapWord* new_top = new_obj + word_size;
-
-    // First, we need to zero the header of the space that we will be
-    // allocating. When we update top further down, some refinement
-    // threads might try to scan the region. By zeroing the header we
-    // ensure that any thread that will try to scan the region will
-    // come across the zero klass word and bail out.
-    //
-    // NOTE: It would not have been correct to have used
-    // CollectedHeap::fill_with_object() and make the space look like
-    // an int array. The thread that is doing the allocation will
-    // later update the object header to a potentially different array
-    // type and, for a very short period of time, the klass and length
-    // fields will be inconsistent. This could cause a refinement
-    // thread to calculate the object size incorrectly.
-    Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
-
-    // We will set up the first region as "starts humongous". This
-    // will also update the BOT covering all the regions to reflect
-    // that there is a single object that starts at the bottom of the
-    // first region.
-    first_hr->set_startsHumongous(new_top, new_end);
-
-    // Then, if there are any, we will set up the "continues
-    // humongous" regions.
-    HeapRegion* hr = NULL;
-    for (int i = first + 1; i < last; ++i) {
-      hr = _hrs->at(i);
-      hr->set_continuesHumongous(first_hr);
-    }
-    // If we have "continues humongous" regions (hr != NULL), then the
-    // end of the last one should match new_end.
-    assert(hr == NULL || hr->end() == new_end, "sanity");
-
-    // Up to this point no concurrent thread would have been able to
-    // do any scanning on any region in this series. All the top
-    // fields still point to bottom, so the intersection between
-    // [bottom,top] and [card_start,card_end] will be empty. Before we
-    // update the top fields, we'll do a storestore to make sure that
-    // no thread sees the update to top before the zeroing of the
-    // object header and the BOT initialization.
-    OrderAccess::storestore();
-
-    // Now that the BOT and the object header have been initialized,
-    // we can update top of the "starts humongous" region.
-    assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
-           "new_top should be in this region");
-    first_hr->set_top(new_top);
-
-    // Now, we will update the top fields of the "continues humongous"
-    // regions. The reason we need to do this is that, otherwise,
-    // these regions would look empty and this will confuse parts of
-    // G1. For example, the code that looks for a consecutive number
-    // of empty regions will consider them empty and try to
-    // re-allocate them. We can extend is_empty() to also include
-    // !continuesHumongous(), but it is easier to just update the top
-    // fields here. The way we set top for all regions (i.e., top ==
-    // end for all regions but the last one, top == new_top for the
-    // last one) is actually used when we will free up the humongous
-    // region in free_humongous_region().
-    hr = NULL;
-    for (int i = first + 1; i < last; ++i) {
-      hr = _hrs->at(i);
-      if ((i + 1) == last) {
-        // last continues humongous region
-        assert(hr->bottom() < new_top && new_top <= hr->end(),
-               "new_top should fall on this region");
-        hr->set_top(new_top);
-      } else {
-        // not last one
-        assert(new_top > hr->end(), "new_top should be above this region");
-        hr->set_top(hr->end());
-      }
-    }
-    // If we have continues humongous regions (hr != NULL), then the
-    // end of the last one should match new_end and its top should
-    // match new_top.
-    assert(hr == NULL ||
-           (hr->end() == new_end && hr->top() == new_top), "sanity");
-
-    assert(first_hr->used() == word_size * HeapWordSize, "invariant");
-    _summary_bytes_used += first_hr->used();
-    _humongous_set.add(first_hr);
-
-    return new_obj;
+    result =
+      humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
+    assert(result != NULL, "it should always return a valid result");
   }
 
   verify_region_sets_optional();
-  return NULL;
+
+  return result;
 }
 
 void
@@ -1389,7 +1407,7 @@
     g1_policy()->record_full_collection_start();
 
     wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty();
+    append_secondary_free_list_if_not_empty_with_lock();
 
     gc_prologue(true);
     increment_total_collections(true /* full gc */);
@@ -1444,7 +1462,7 @@
     // how reference processing currently works in G1.
 
     // Temporarily make reference _discovery_ single threaded (non-MT).
-    ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
+    ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
 
     // Temporarily make refs discovery atomic
     ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
@@ -2201,16 +2219,16 @@
 
   SharedHeap::ref_processing_init();
   MemRegion mr = reserved_region();
-  _ref_processor = ReferenceProcessor::create_ref_processor(
-                                         mr,    // span
-                                         false, // Reference discovery is not atomic
-                                         true,  // mt_discovery
-                                         &_is_alive_closure, // is alive closure
-                                                             // for efficiency
-                                         ParallelGCThreads,
-                                         ParallelRefProcEnabled,
-                                         true); // Setting next fields of discovered
-                                                // lists requires a barrier.
+  _ref_processor =
+    new ReferenceProcessor(mr,    // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1),    // mt processing
+                           (int) ParallelGCThreads,   // degree of mt processing
+                           ParallelGCThreads > 1 || ConcGCThreads > 1,  // mt discovery
+                           (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
+                           false,                     // Reference discovery is not atomic
+                           &_is_alive_closure,        // is alive closure for efficiency
+                           true);                     // Setting next fields of discovered
+                                                      // lists requires a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -3377,15 +3395,14 @@
 
     TraceMemoryManagerStats tms(false /* fullGC */);
 
-    // If there are any free regions available on the secondary_free_list
-    // make sure we append them to the free_list. However, we don't
-    // have to wait for the rest of the cleanup operation to
-    // finish. If it's still going on that's OK. If we run out of
-    // regions, the region allocation code will check the
-    // secondary_free_list and potentially wait if more free regions
-    // are coming (see new_region_try_secondary_free_list()).
+    // If the secondary_free_list is not empty, append it to the
+    // free_list. No need to wait for the cleanup operation to finish;
+    // the region allocation code will check the secondary_free_list
+    // and wait if necessary. If the G1StressConcRegionFreeing flag is
+    // set, skip this step so that the region allocation code has to
+    // get entries from the secondary_free_list.
     if (!G1StressConcRegionFreeing) {
-      append_secondary_free_list_if_not_empty();
+      append_secondary_free_list_if_not_empty_with_lock();
     }
 
     increment_gc_time_stamp();
@@ -5199,7 +5216,7 @@
   size_t rs_lengths = 0;
 
   while (cur != NULL) {
-    assert(!is_on_free_list(cur), "sanity");
+    assert(!is_on_master_free_list(cur), "sanity");
 
     if (non_young) {
       if (cur->is_young()) {
@@ -5543,13 +5560,10 @@
     return;
   }
 
-  {
-    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-    // Make sure we append the secondary_free_list on the free_list so
-    // that all free regions we will come across can be safely
-    // attributed to the free_list.
-    append_secondary_free_list();
-  }
+  // Make sure we append the secondary_free_list on the free_list so
+  // that all free regions we will come across can be safely
+  // attributed to the free_list.
+  append_secondary_free_list_if_not_empty_with_lock();
 
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -56,7 +56,6 @@
 class ConcurrentMark;
 class ConcurrentMarkThread;
 class ConcurrentG1Refine;
-class ConcurrentZFThread;
 
 typedef OverflowTaskQueue<StarTask>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
@@ -64,12 +63,6 @@
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 
-enum G1GCThreadGroups {
-  G1CRGroup = 0,
-  G1ZFGroup = 1,
-  G1CMGroup = 2
-};
-
 enum GCAllocPurpose {
   GCAllocForTenured,
   GCAllocForSurvived,
@@ -294,9 +287,9 @@
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
-#define heap_locking_asserts_err_msg(__extra_message)                         \
+#define heap_locking_asserts_err_msg(_extra_message_)                         \
   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
-          (__extra_message),                                                  \
+          (_extra_message_),                                                  \
           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
           BOOL_TO_STR(Thread::current()->is_VM_thread()))
@@ -307,11 +300,11 @@
            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
   } while (0)
 
-#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread)             \
+#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
   do {                                                                        \
     assert(Heap_lock->owned_by_self() ||                                      \
            (SafepointSynchronize::is_at_safepoint() &&                        \
-             ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
+             ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
            heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
                                         "should be at a safepoint"));         \
   } while (0)
@@ -338,10 +331,10 @@
                                    "should not be at a safepoint"));          \
   } while (0)
 
-#define assert_at_safepoint(__should_be_vm_thread)                            \
+#define assert_at_safepoint(_should_be_vm_thread_)                            \
   do {                                                                        \
     assert(SafepointSynchronize::is_at_safepoint() &&                         \
-              ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
+              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
            heap_locking_asserts_err_msg("should be at a safepoint"));         \
   } while (0)
 
@@ -371,35 +364,40 @@
   // will check whether there's anything available in the
   // secondary_free_list and/or wait for more regions to appear in that
   // list, if _free_regions_coming is set.
-  HeapRegion* new_region_try_secondary_free_list(size_t word_size);
+  HeapRegion* new_region_try_secondary_free_list();
 
-  // It will try to allocate a single non-humongous HeapRegion
-  // sufficient for an allocation of the given word_size.  If
-  // do_expand is true, it will attempt to expand the heap if
-  // necessary to satisfy the allocation request. Note that word_size
-  // is only used to make sure that we expand sufficiently but, given
-  // that the allocation request is assumed not to be humongous,
-  // having word_size is not strictly necessary (expanding by a single
-  // region will always be sufficient). But let's keep that parameter
-  // in case we need it in the future.
+  // Try to allocate a single non-humongous HeapRegion sufficient for
+  // an allocation of the given word_size. If do_expand is true,
+  // attempt to expand the heap if necessary to satisfy the allocation
+  // request.
   HeapRegion* new_region_work(size_t word_size, bool do_expand);
 
-  // It will try to allocate a new region to be used for allocation by
-  // mutator threads. It will not try to expand the heap if not region
-  // is available.
+  // Try to allocate a new region to be used for allocation by a
+  // mutator thread. Attempt to expand the heap if no region is
+  // available.
   HeapRegion* new_alloc_region(size_t word_size) {
     return new_region_work(word_size, false /* do_expand */);
   }
 
-  // It will try to allocate a new region to be used for allocation by
-  // a GC thread. It will try to expand the heap if no region is
-  // available.
+  // Try to allocate a new region to be used for allocation by a GC
+  // thread. Attempt to expand the heap if no region is available.
   HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
 
+  // Attempt to satisfy a humongous allocation request of the given
+  // size by finding a contiguous set of free regions of num_regions
+  // length and remove them from the master free list. Return the
+  // index of the first region or -1 if the search was unsuccessful.
   int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
 
-  // Attempt to allocate an object of the given (very large) "word_size".
-  // Returns "NULL" on failure.
+  // Initialize a contiguous set of free regions of length num_regions
+  // and starting at index first so that they appear as a single
+  // humongous region.
+  HeapWord* humongous_obj_allocate_initialize_regions(int first,
+                                                      size_t num_regions,
+                                                      size_t word_size);
+
+  // Attempt to allocate a humongous object of the given size. Return
+  // NULL if unsuccessful.
   HeapWord* humongous_obj_allocate(size_t word_size);
 
   // The following two methods, allocate_new_tlab() and
@@ -776,7 +774,7 @@
   // Invoke "save_marks" on all heap regions.
   void save_marks();
 
-  // It frees a non-humongous region by initializing its contents and
+  // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
   // list later). The used bytes of freed regions are accumulated in
@@ -787,13 +785,13 @@
                    FreeRegionList* free_list,
                    bool par);
 
-  // It frees a humongous region by collapsing it into individual
-  // regions and calling free_region() for each of them. The freed
-  // regions will be added to the free list that's passed as a parameter
-  // (this is usually a local list which will be appended to the
-  // master free list later). The used bytes of freed regions are
-  // accumulated in pre_used. If par is true, the region's RSet will
-  // not be freed up. The assumption is that this will be done later.
+  // Frees a humongous region by collapsing it into individual regions
+  // and calling free_region() for each of them. The freed regions
+  // will be added to the free list that's passed as a parameter (this
+  // is usually a local list which will be appended to the master free
+  // list later). The used bytes of freed regions are accumulated in
+  // pre_used. If par is true, the region's RSet will not be freed
+  // up. The assumption is that this will be done later.
   void free_humongous_region(HeapRegion* hr,
                              size_t* pre_used,
                              FreeRegionList* free_list,
@@ -1046,13 +1044,13 @@
 #endif // HEAP_REGION_SET_FORCE_VERIFY
 
 #ifdef ASSERT
-  bool is_on_free_list(HeapRegion* hr) {
+  bool is_on_master_free_list(HeapRegion* hr) {
     return hr->containing_set() == &_free_list;
   }
 
-  bool is_on_humongous_set(HeapRegion* hr) {
+  bool is_in_humongous_set(HeapRegion* hr) {
     return hr->containing_set() == &_humongous_set;
-}
+  }
 #endif // ASSERT
 
   // Wrapper for the region list operations that can be called from
@@ -1066,7 +1064,9 @@
     _free_list.add_as_tail(&_secondary_free_list);
   }
 
-  void append_secondary_free_list_if_not_empty() {
+  void append_secondary_free_list_if_not_empty_with_lock() {
+    // If the secondary free list looks empty there's no reason to
+    // take the lock and then try to append it.
     if (!_secondary_free_list.is_empty()) {
       MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
       append_secondary_free_list();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -81,6 +81,57 @@
 
 // </NEW PREDICTION>
 
+// Help class for avoiding interleaved logging
+class LineBuffer: public StackObj {
+
+private:
+  static const int BUFFER_LEN = 1024;
+  static const int INDENT_CHARS = 3;
+  char _buffer[BUFFER_LEN];
+  int _indent_level;
+  int _cur;
+
+  void vappend(const char* format, va_list ap) {
+    int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
+    if (res != -1) {
+      _cur += res;
+    } else {
+      DEBUG_ONLY(warning("buffer too small in LineBuffer");)
+      _buffer[BUFFER_LEN -1] = 0;
+      _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
+    }
+  }
+
+public:
+  explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
+    for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
+      _buffer[_cur] = ' ';
+    }
+  }
+
+#ifndef PRODUCT
+  ~LineBuffer() {
+    assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
+  }
+#endif
+
+  void append(const char* format, ...) {
+    va_list ap;
+    va_start(ap, format);
+    vappend(format, ap);
+    va_end(ap);
+  }
+
+  void append_and_print_cr(const char* format, ...) {
+    va_list ap;
+    va_start(ap, format);
+    vappend(format, ap);
+    va_end(ap);
+    gclog_or_tty->print_cr("%s", _buffer);
+    _cur = _indent_level * INDENT_CHARS;
+  }
+};
+
 G1CollectorPolicy::G1CollectorPolicy() :
   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
     ? ParallelGCThreads : 1),
@@ -1016,10 +1067,8 @@
                                          bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
-  int j;
-  for (j = 0; j < level; ++j)
-    gclog_or_tty->print("   ");
-  gclog_or_tty->print("[%s (ms):", str);
+  LineBuffer buf(level);
+  buf.append("[%s (ms):", str);
   for (uint i = 0; i < ParallelGCThreads; ++i) {
     double val = data[i];
     if (val < min)
@@ -1027,18 +1076,16 @@
     if (val > max)
       max = val;
     total += val;
-    gclog_or_tty->print("  %3.1lf", val);
+    buf.append("  %3.1lf", val);
   }
   if (summary) {
-    gclog_or_tty->print_cr("");
+    buf.append_and_print_cr("");
     double avg = total / (double) ParallelGCThreads;
-    gclog_or_tty->print(" ");
-    for (j = 0; j < level; ++j)
-      gclog_or_tty->print("   ");
-    gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
+    buf.append(" ");
+    buf.append("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
                         avg, min, max);
   }
-  gclog_or_tty->print_cr("]");
+  buf.append_and_print_cr("]");
 }
 
 void G1CollectorPolicy::print_par_sizes(int level,
@@ -1047,10 +1094,8 @@
                                         bool summary) {
   double min = data[0], max = data[0];
   double total = 0.0;
-  int j;
-  for (j = 0; j < level; ++j)
-    gclog_or_tty->print("   ");
-  gclog_or_tty->print("[%s :", str);
+  LineBuffer buf(level);
+  buf.append("[%s :", str);
   for (uint i = 0; i < ParallelGCThreads; ++i) {
     double val = data[i];
     if (val < min)
@@ -1058,34 +1103,28 @@
     if (val > max)
       max = val;
     total += val;
-    gclog_or_tty->print(" %d", (int) val);
+    buf.append(" %d", (int) val);
   }
   if (summary) {
-    gclog_or_tty->print_cr("");
+    buf.append_and_print_cr("");
     double avg = total / (double) ParallelGCThreads;
-    gclog_or_tty->print(" ");
-    for (j = 0; j < level; ++j)
-      gclog_or_tty->print("   ");
-    gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
+    buf.append(" ");
+    buf.append("Sum: %d, Avg: %d, Min: %d, Max: %d",
                (int)total, (int)avg, (int)min, (int)max);
   }
-  gclog_or_tty->print_cr("]");
+  buf.append_and_print_cr("]");
 }
 
 void G1CollectorPolicy::print_stats (int level,
                                      const char* str,
                                      double value) {
-  for (int j = 0; j < level; ++j)
-    gclog_or_tty->print("   ");
-  gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
+  LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
 }
 
 void G1CollectorPolicy::print_stats (int level,
                                      const char* str,
                                      int value) {
-  for (int j = 0; j < level; ++j)
-    gclog_or_tty->print("   ");
-  gclog_or_tty->print_cr("[%s: %d]", str, value);
+  LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
 }
 
 double G1CollectorPolicy::avg_value (double* data) {
@@ -2060,17 +2099,11 @@
   _g1->collection_set_iterate(&cs_closure);
 }
 
-static void print_indent(int level) {
-  for (int j = 0; j < level+1; ++j)
-    gclog_or_tty->print("   ");
-}
-
 void G1CollectorPolicy::print_summary (int level,
                                        const char* str,
                                        NumberSeq* seq) const {
   double sum = seq->sum();
-  print_indent(level);
-  gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
+  LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
                 str, sum / 1000.0, seq->avg());
 }
 
@@ -2078,8 +2111,7 @@
                                           const char* str,
                                           NumberSeq* seq) const {
   print_summary(level, str, seq);
-  print_indent(level + 5);
-  gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
+  LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
                 seq->num(), seq->sd(), seq->maximum());
 }
 
@@ -2087,6 +2119,7 @@
                                         NumberSeq* other_times_ms,
                                         NumberSeq* calc_other_times_ms) const {
   bool should_print = false;
+  LineBuffer buf(level + 2);
 
   double max_sum = MAX2(fabs(other_times_ms->sum()),
                         fabs(calc_other_times_ms->sum()));
@@ -2095,8 +2128,7 @@
   double sum_ratio = max_sum / min_sum;
   if (sum_ratio > 1.1) {
     should_print = true;
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
+    buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
   }
 
   double max_avg = MAX2(fabs(other_times_ms->avg()),
@@ -2106,30 +2138,25 @@
   double avg_ratio = max_avg / min_avg;
   if (avg_ratio > 1.1) {
     should_print = true;
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
+    buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
   }
 
   if (other_times_ms->sum() < -0.01) {
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
+    buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
   }
 
   if (other_times_ms->avg() < -0.01) {
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
+    buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
   }
 
   if (calc_other_times_ms->sum() < -0.01) {
     should_print = true;
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
+    buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
   }
 
   if (calc_other_times_ms->avg() < -0.01) {
     should_print = true;
-    print_indent(level + 1);
-    gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
+    buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
   }
 
   if (should_print)
@@ -2210,10 +2237,9 @@
       }
     }
   } else {
-    print_indent(0);
-    gclog_or_tty->print_cr("none");
+    LineBuffer(1).append_and_print_cr("none");
   }
-  gclog_or_tty->print_cr("");
+  LineBuffer(0).append_and_print_cr("");
 }
 
 void G1CollectorPolicy::print_tracing_info() const {
@@ -2532,7 +2558,7 @@
     jint regions_added = parKnownGarbageCl.marked_regions_added();
     _hrSorted->incNumMarkedHeapRegions(regions_added);
     if (G1PrintParCleanupStats) {
-      gclog_or_tty->print("     Thread %d called %d times, added %d regions to list.\n",
+      gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
                  i, parKnownGarbageCl.invokes(), regions_added);
     }
   }
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -185,22 +185,22 @@
   G1CollectedHeap* _g1h;
   ModRefBarrierSet* _mrbs;
   CompactPoint _cp;
-  size_t _pre_used;
-  FreeRegionList _free_list;
   HumongousRegionSet _humongous_proxy_set;
 
   void free_humongous_region(HeapRegion* hr) {
     HeapWord* end = hr->end();
+    size_t dummy_pre_used;
+    FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
     assert(hr->startsHumongous(),
            "Only the start of a humongous region should be freed.");
-    _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
+    _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
                                 &_humongous_proxy_set, false /* par */);
-    // Do we also need to do this for the continues humongous regions
-    // we just collapsed?
     hr->prepare_for_compaction(&_cp);
     // Also clear the part of the card table that will be unused after
     // compaction.
     _mrbs->clear(MemRegion(hr->compaction_top(), end));
+    dummy_free_list.remove_all();
   }
 
 public:
@@ -208,8 +208,6 @@
   : _g1h(G1CollectedHeap::heap()),
     _mrbs(G1CollectedHeap::heap()->mr_bs()),
     _cp(NULL, cs, cs->initialize_threshold()),
-    _pre_used(0),
-    _free_list("Local Free List for G1MarkSweep"),
     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
 
   void update_sets() {
@@ -219,7 +217,6 @@
                                             NULL, /* free_list */
                                             &_humongous_proxy_set,
                                             false /* par */);
-    _free_list.remove_all();
   }
 
   bool doHeapRegion(HeapRegion* hr) {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -86,28 +86,6 @@
   bool idempotent() { return true; }
 };
 
-class IntoCSRegionClosure: public HeapRegionClosure {
-  IntoCSOopClosure _blk;
-  G1CollectedHeap* _g1;
-public:
-  IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
-    _g1(g1), _blk(g1, blk) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (!r->in_collection_set()) {
-      _blk.set_region(r);
-      if (r->isHumongous()) {
-        if (r->startsHumongous()) {
-          oop obj = oop(r->bottom());
-          obj->oop_iterate(&_blk);
-        }
-      } else {
-        r->oop_before_save_marks_iterate(&_blk);
-      }
-    }
-    return false;
-  }
-};
-
 class VerifyRSCleanCardOopClosure: public OopClosure {
   G1CollectedHeap* _g1;
 public:
@@ -329,7 +307,7 @@
     // is during RSet updating within an evacuation pause.
     // In this case worker_i should be the id of a GC worker thread.
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-    assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker");
+    assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
 
     if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
       // 'card_ptr' contains references that point into the collection
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -53,8 +53,8 @@
 class HeapRegionSetBase;
 
 #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
-#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
-                               (__hr)->top(), (__hr)->end()
+#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
+                               (_hr_)->top(), (_hr_)->end()
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
@@ -518,13 +518,13 @@
                    containing_set, _containing_set));
 
     _containing_set = containing_set;
-}
+  }
 
   HeapRegionSetBase* containing_set() { return _containing_set; }
 #else // ASSERT
   void set_containing_set(HeapRegionSetBase* containing_set) { }
 
-  // containing_set() is only used in asserts so there's not reason
+  // containing_set() is only used in asserts so there's no reason
   // to provide a dummy version of it.
 #endif // ASSERT
 
@@ -535,14 +535,15 @@
   bool pending_removal() { return _pending_removal; }
 
   void set_pending_removal(bool pending_removal) {
-    // We can only set pending_removal to true, if it's false and the
-    // region belongs to a set.
-    assert(!pending_removal ||
-           (!_pending_removal && containing_set() != NULL), "pre-condition");
-    // We can only set pending_removal to false, if it's true and the
-    // region does not belong to a set.
-    assert( pending_removal ||
-           ( _pending_removal && containing_set() == NULL), "pre-condition");
+    if (pending_removal) {
+      assert(!_pending_removal && containing_set() != NULL,
+             "can only set pending removal to true if it's false and "
+             "the region belongs to a region set");
+    } else {
+      assert( _pending_removal && containing_set() == NULL,
+              "can only set pending removal to false if it's true and "
+              "the region does not belong to a region set");
+    }
 
     _pending_removal = pending_removal;
   }
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -165,7 +165,7 @@
 
   assert(num_so_far <= num, "post-condition");
   if (num_so_far == num) {
-    // we find enough space for the humongous object
+    // we found enough space for the humongous object
     assert(from <= first && first < _regions.length(), "post-condition");
     assert(first < curr && (curr - first) == (int) num, "post-condition");
     for (int i = first; i < first + (int) num; ++i) {
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -76,7 +76,8 @@
   // that are available for allocation.
   size_t free_suffix();
 
-  // Finds a contiguous set of empty regions of length num.
+  // Find a contiguous set of empty regions of length num and return
+  // the index of the first region or -1 if the search was unsuccessful.
   int find_contiguous(size_t num);
 
   // Apply the "doHeapRegion" method of "blk" to all regions in "this",
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -42,7 +42,7 @@
   return region_num;
 }
 
-void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
+void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
   msg->append("[%s] %s "
               "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
               "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
@@ -109,30 +109,30 @@
   // for the verification calls. If we do verification without the
   // appropriate locks and the set changes underneath our feet
   // verification might fail and send us on a wild goose chase.
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
 
   guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
               total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
             (!is_empty() && length() >= 0 && region_num() >= 0 &&
               total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
-            hrl_ext_msg(this, "invariant"));
+            hrs_ext_msg(this, "invariant"));
 
   guarantee((!regions_humongous() && region_num() == length()) ||
             ( regions_humongous() && region_num() >= length()),
-            hrl_ext_msg(this, "invariant"));
+            hrs_ext_msg(this, "invariant"));
 
   guarantee(!regions_empty() || total_used_bytes() == 0,
-            hrl_ext_msg(this, "invariant"));
+            hrs_ext_msg(this, "invariant"));
 
   guarantee(total_used_bytes() <= total_capacity_bytes(),
-            hrl_ext_msg(this, "invariant"));
+            hrs_ext_msg(this, "invariant"));
 }
 
 void HeapRegionSetBase::verify_start() {
   // See comment in verify() about MT safety and verification.
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   assert(!_verify_in_progress,
-         hrl_ext_msg(this, "verification should not be in progress"));
+         hrs_ext_msg(this, "verification should not be in progress"));
 
   // Do the basic verification first before we do the checks over the regions.
   HeapRegionSetBase::verify();
@@ -146,11 +146,11 @@
 
 void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
   // See comment in verify() about MT safety and verification.
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   assert(_verify_in_progress,
-         hrl_ext_msg(this, "verification should be in progress"));
+         hrs_ext_msg(this, "verification should be in progress"));
 
-  guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
+  guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification"));
 
   _calc_length               += 1;
   if (!hr->isHumongous()) {
@@ -164,28 +164,28 @@
 
 void HeapRegionSetBase::verify_end() {
   // See comment in verify() about MT safety and verification.
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   assert(_verify_in_progress,
-         hrl_ext_msg(this, "verification should be in progress"));
+         hrs_ext_msg(this, "verification should be in progress"));
 
   guarantee(length() == _calc_length,
-            hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
+            hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
                         "calc length: "SIZE_FORMAT,
                         name(), length(), _calc_length));
 
   guarantee(region_num() == _calc_region_num,
-            hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
+            hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
                         "calc region num: "SIZE_FORMAT,
                         name(), region_num(), _calc_region_num));
 
   guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
-            hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
+            hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
                         "calc capacity bytes: "SIZE_FORMAT,
                         name(),
                         total_capacity_bytes(), _calc_total_capacity_bytes));
 
   guarantee(total_used_bytes() == _calc_total_used_bytes,
-            hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
+            hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
                         "calc used bytes: "SIZE_FORMAT,
                         name(), total_used_bytes(), _calc_total_used_bytes));
 
@@ -221,9 +221,9 @@
 //////////////////// HeapRegionSet ////////////////////
 
 void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
-  hrl_assert_mt_safety_ok(this);
-  hrl_assert_mt_safety_ok(proxy_set);
-  hrl_assert_sets_match(this, proxy_set);
+  hrs_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(proxy_set);
+  hrs_assert_sets_match(this, proxy_set);
 
   verify_optional();
   proxy_set->verify_optional();
@@ -231,19 +231,19 @@
   if (proxy_set->is_empty()) return;
 
   assert(proxy_set->length() <= _length,
-         hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
+         hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
                      "should be <= length: "SIZE_FORMAT,
                      name(), proxy_set->length(), _length));
   _length -= proxy_set->length();
 
   assert(proxy_set->region_num() <= _region_num,
-         hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
+         hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
                      "should be <= region num: "SIZE_FORMAT,
                      name(), proxy_set->region_num(), _region_num));
   _region_num -= proxy_set->region_num();
 
   assert(proxy_set->total_used_bytes() <= _total_used_bytes,
-         hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
+         hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
                      "should be <= used bytes: "SIZE_FORMAT,
                      name(), proxy_set->total_used_bytes(),
                      _total_used_bytes));
@@ -257,13 +257,13 @@
 
 //////////////////// HeapRegionLinkedList ////////////////////
 
-void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
+void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
   msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
 }
 
 void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
-  hrl_assert_mt_safety_ok(this);
-  hrl_assert_mt_safety_ok(from_list);
+  hrs_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(from_list);
 
   verify_optional();
   from_list->verify_optional();
@@ -283,10 +283,10 @@
 #endif // ASSERT
 
   if (_tail != NULL) {
-    assert(length() >  0 && _head != NULL, hrl_ext_msg(this, "invariant"));
+    assert(length() >  0 && _head != NULL, hrs_ext_msg(this, "invariant"));
     _tail->set_next(from_list->_head);
   } else {
-    assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
+    assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant"));
     _head = from_list->_head;
   }
   _tail = from_list->_tail;
@@ -301,12 +301,12 @@
 }
 
 void HeapRegionLinkedList::remove_all() {
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   verify_optional();
 
   HeapRegion* curr = _head;
   while (curr != NULL) {
-    hrl_assert_region_ok(this, curr, this);
+    hrs_assert_region_ok(this, curr, this);
 
     HeapRegion* next = curr->next();
     curr->set_next(NULL);
@@ -319,9 +319,9 @@
 }
 
 void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
-  hrl_assert_mt_safety_ok(this);
-  assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
-  assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
+  hrs_assert_mt_safety_ok(this);
+  assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
+  assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
 
   verify_optional();
   DEBUG_ONLY(size_t old_length = length();)
@@ -330,27 +330,27 @@
   HeapRegion* prev = NULL;
   size_t count = 0;
   while (curr != NULL) {
-    hrl_assert_region_ok(this, curr, this);
+    hrs_assert_region_ok(this, curr, this);
     HeapRegion* next = curr->next();
 
     if (curr->pending_removal()) {
       assert(count < target_count,
-             hrl_err_msg("[%s] should not come across more regions "
+             hrs_err_msg("[%s] should not come across more regions "
                          "pending for removal than target_count: "SIZE_FORMAT,
                          name(), target_count));
 
       if (prev == NULL) {
-        assert(_head == curr, hrl_ext_msg(this, "invariant"));
+        assert(_head == curr, hrs_ext_msg(this, "invariant"));
         _head = next;
       } else {
-        assert(_head != curr, hrl_ext_msg(this, "invariant"));
+        assert(_head != curr, hrs_ext_msg(this, "invariant"));
         prev->set_next(next);
       }
       if (next == NULL) {
-        assert(_tail == curr, hrl_ext_msg(this, "invariant"));
+        assert(_tail == curr, hrs_ext_msg(this, "invariant"));
         _tail = prev;
       } else {
-        assert(_tail != curr, hrl_ext_msg(this, "invariant"));
+        assert(_tail != curr, hrs_ext_msg(this, "invariant"));
       }
 
       curr->set_next(NULL);
@@ -371,10 +371,10 @@
   }
 
   assert(count == target_count,
-         hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
+         hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
                      "target_count: "SIZE_FORMAT, name(), count, target_count));
   assert(length() + target_count == old_length,
-         hrl_err_msg("[%s] new length should be consistent "
+         hrs_err_msg("[%s] new length should be consistent "
                      "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
                      "target_count: "SIZE_FORMAT,
                      name(), length(), old_length, target_count));
@@ -385,7 +385,7 @@
 void HeapRegionLinkedList::verify() {
   // See comment in HeapRegionSetBase::verify() about MT safety and
   // verification.
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
 
   // This will also do the basic verification too.
   verify_start();
@@ -399,7 +399,7 @@
 
     count += 1;
     guarantee(count < _unrealistically_long_length,
-              hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
+              hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
                           "seems very long, is there maybe a cycle? "
                           "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
                           "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
@@ -410,7 +410,7 @@
     curr  = curr->next();
   }
 
-  guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
+  guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition"));
 
   verify_end();
 }
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -28,8 +28,8 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 
 // Large buffer for some cases where the output might be larger than normal.
-#define HRL_ERR_MSG_BUFSZ 512
-typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
+#define HRS_ERR_MSG_BUFSZ 512
+typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg;
 
 // Set verification will be forced either if someone defines
 // HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
@@ -45,10 +45,10 @@
 // (e.g., length, region num, used bytes sum) plus any shared
 // functionality (e.g., verification).
 
-class hrl_ext_msg;
+class hrs_ext_msg;
 
 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
-  friend class hrl_ext_msg;
+  friend class hrs_ext_msg;
 
 protected:
   static size_t calculate_region_num(HeapRegion* hr);
@@ -104,10 +104,10 @@
   virtual bool check_mt_safety() { return true; }
 
   // fill_in_ext_msg() writes the the values of the set's attributes
-  // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
+  // in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra()
   // allows subclasses to append further information.
-  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
-  void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
+  virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
+  void fill_in_ext_msg(hrs_ext_msg* msg, const char* message);
 
   // It updates the fields of the set to reflect hr being added to
   // the set.
@@ -170,9 +170,9 @@
 // the fields of the associated set. This can be very helpful in
 // diagnosing failures.
 
-class hrl_ext_msg : public hrl_err_msg {
+class hrs_ext_msg : public hrs_err_msg {
 public:
-  hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
+  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") {
     set->fill_in_ext_msg(this, message);
   }
 };
@@ -180,25 +180,25 @@
 // These two macros are provided for convenience, to keep the uses of
 // these two asserts a bit more concise.
 
-#define hrl_assert_mt_safety_ok(_set_)                                        \
+#define hrs_assert_mt_safety_ok(_set_)                                        \
   do {                                                                        \
-    assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety"));    \
+    assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety"));    \
   } while (0)
 
-#define hrl_assert_region_ok(_set_, _hr_, _expected_)                         \
+#define hrs_assert_region_ok(_set_, _hr_, _expected_)                         \
   do {                                                                        \
     assert((_set_)->verify_region((_hr_), (_expected_)),                      \
-           hrl_ext_msg((_set_), "region verification"));                      \
+           hrs_ext_msg((_set_), "region verification"));                      \
   } while (0)
 
 //////////////////// HeapRegionSet ////////////////////
 
-#define hrl_assert_sets_match(_set1_, _set2_)                                 \
+#define hrs_assert_sets_match(_set1_, _set2_)                                 \
   do {                                                                        \
     assert(((_set1_)->regions_humongous() ==                                  \
                                             (_set2_)->regions_humongous()) && \
            ((_set1_)->regions_empty() == (_set2_)->regions_empty()),          \
-           hrl_err_msg("the contents of set %s and set %s should match",      \
+           hrs_err_msg("the contents of set %s and set %s should match",      \
                        (_set1_)->name(), (_set2_)->name()));                  \
   } while (0)
 
@@ -267,7 +267,7 @@
   HeapRegion* tail() { return _tail; }
 
 protected:
-  virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
+  virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
 
   // See the comment for HeapRegionSetBase::clear()
   virtual void clear();
@@ -309,10 +309,10 @@
   virtual void print_on(outputStream* out, bool print_contents = false);
 };
 
-//////////////////// HeapRegionLinkedList ////////////////////
+//////////////////// HeapRegionLinkedListIterator ////////////////////
 
-// Iterator class that provides a convenient way to iterator over the
-// regions in a HeapRegionLinkedList instance.
+// Iterator class that provides a convenient way to iterate over the
+// regions of a HeapRegionLinkedList instance.
 
 class HeapRegionLinkedListIterator : public StackObj {
 private:
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -42,8 +42,8 @@
 }
 
 inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
-  hrl_assert_region_ok(this, hr, NULL);
-  assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
+  hrs_assert_region_ok(this, hr, NULL);
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
 
   update_for_addition(hr);
   hr->set_containing_set(this);
@@ -51,7 +51,7 @@
 
 inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
   // Assumes the caller has already verified the region.
-  assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
+  assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
   _length -= 1;
 
   size_t region_num_diff;
@@ -61,22 +61,22 @@
     region_num_diff = calculate_region_num(hr);
   }
   assert(region_num_diff <= _region_num,
-         hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
+         hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
                      "should be <= region num: "SIZE_FORMAT,
                      name(), region_num_diff, _region_num));
   _region_num -= region_num_diff;
 
   size_t used_bytes = hr->used();
   assert(used_bytes <= _total_used_bytes,
-         hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
+         hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
                      "should be <= used bytes: "SIZE_FORMAT,
                      name(), used_bytes, _total_used_bytes));
   _total_used_bytes -= used_bytes;
 }
 
 inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
-  hrl_assert_region_ok(this, hr, this);
-  assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
+  hrs_assert_region_ok(this, hr, this);
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
 
   hr->set_containing_set(NULL);
   update_for_removal(hr);
@@ -85,13 +85,13 @@
 //////////////////// HeapRegionSet ////////////////////
 
 inline void HeapRegionSet::add(HeapRegion* hr) {
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   // add_internal() will verify the region.
   add_internal(hr);
 }
 
 inline void HeapRegionSet::remove(HeapRegion* hr) {
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   // remove_internal() will verify the region.
   remove_internal(hr);
 }
@@ -101,8 +101,8 @@
   // No need to fo the MT safety check here given that this method
   // does not update the contents of the set but instead accumulates
   // the changes in proxy_set which is assumed to be thread-local.
-  hrl_assert_sets_match(this, proxy_set);
-  hrl_assert_region_ok(this, hr, this);
+  hrs_assert_sets_match(this, proxy_set);
+  hrs_assert_region_ok(this, hr, this);
 
   hr->set_containing_set(NULL);
   proxy_set->update_for_addition(hr);
@@ -111,10 +111,10 @@
 //////////////////// HeapRegionLinkedList ////////////////////
 
 inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
   assert((length() == 0 && _head == NULL && _tail == NULL) ||
          (length() >  0 && _head != NULL && _tail != NULL),
-         hrl_ext_msg(this, "invariant"));
+         hrs_ext_msg(this, "invariant"));
   // add_internal() will verify the region.
   add_internal(hr);
 
@@ -128,10 +128,10 @@
 }
 
 inline HeapRegion* HeapRegionLinkedList::remove_head() {
-  hrl_assert_mt_safety_ok(this);
-  assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
+  hrs_assert_mt_safety_ok(this);
+  assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
   assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrl_ext_msg(this, "invariant"));
+         hrs_ext_msg(this, "invariant"));
 
   // We need to unlink it first.
   HeapRegion* hr = _head;
@@ -147,7 +147,7 @@
 }
 
 inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
-  hrl_assert_mt_safety_ok(this);
+  hrs_assert_mt_safety_ok(this);
 
   if (!is_empty()) {
     return remove_head();
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -52,7 +52,7 @@
                                             FreeList_lock->owned_by_self())) ||
             (!SafepointSynchronize::is_at_safepoint() &&
                                                 Heap_lock->owned_by_self()),
-            hrl_ext_msg(this, "master free list MT safety protocol"));
+            hrs_ext_msg(this, "master free list MT safety protocol"));
 
   return FreeRegionList::check_mt_safety();
 }
@@ -65,7 +65,7 @@
   // while holding the SecondaryFreeList_lock.
 
   guarantee(SecondaryFreeList_lock->owned_by_self(),
-            hrl_ext_msg(this, "secondary free list MT safety protocol"));
+            hrs_ext_msg(this, "secondary free list MT safety protocol"));
 
   return FreeRegionList::check_mt_safety();
 }
@@ -81,7 +81,7 @@
   return HeapRegionSet::verify_region_extra(hr);
 }
 
-//////////////////// HumongousRegionSet ////////////////////
+//////////////////// MasterHumongousRegionSet ////////////////////
 
 bool MasterHumongousRegionSet::check_mt_safety() {
   // Master Humongous Set MT safety protocol:
@@ -97,6 +97,6 @@
                                              OldSets_lock->owned_by_self())) ||
             (!SafepointSynchronize::is_at_safepoint() &&
                                                  Heap_lock->owned_by_self()),
-            hrl_ext_msg(this, "master humongous set MT safety protocol"));
+            hrs_ext_msg(this, "master humongous set MT safety protocol"));
   return HumongousRegionSet::check_mt_safety();
 }
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1530,13 +1530,15 @@
 {
   if (_ref_processor == NULL) {
     // Allocate and initialize a reference processor
-    _ref_processor = ReferenceProcessor::create_ref_processor(
-        _reserved,                  // span
-        refs_discovery_is_atomic(), // atomic_discovery
-        refs_discovery_is_mt(),     // mt_discovery
-        NULL,                       // is_alive_non_header
-        ParallelGCThreads,
-        ParallelRefProcEnabled);
+    _ref_processor =
+      new ReferenceProcessor(_reserved,                  // span
+                             ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
+                             (int) ParallelGCThreads,    // mt processing degree
+                             refs_discovery_is_mt(),     // mt discovery
+                             (int) ParallelGCThreads,    // mt discovery degree
+                             refs_discovery_is_atomic(), // atomic_discovery
+                             NULL,                       // is_alive_non_header
+                             false);                     // write barrier for next field updates
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,9 +58,7 @@
 
 void PSMarkSweep::initialize() {
   MemRegion mr = Universe::heap()->reserved_region();
-  _ref_processor = new ReferenceProcessor(mr,
-                                          true,    // atomic_discovery
-                                          false);  // mt_discovery
+  _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
   _counters = new CollectorCounters("PSMarkSweep", 1);
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -827,13 +827,15 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   MemRegion mr = heap->reserved_region();
-  _ref_processor = ReferenceProcessor::create_ref_processor(
-    mr,                         // span
-    true,                       // atomic_discovery
-    true,                       // mt_discovery
-    &_is_alive_closure,
-    ParallelGCThreads,
-    ParallelRefProcEnabled);
+  _ref_processor =
+    new ReferenceProcessor(mr,            // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
+                           (int) ParallelGCThreads, // mt processing degree
+                           true,          // mt discovery
+                           (int) ParallelGCThreads, // mt discovery degree
+                           true,          // atomic_discovery
+                           &_is_alive_closure, // non-header is alive closure
+                           false);        // write barrier for next field updates
   _counters = new CollectorCounters("PSParallelCompact", 1);
 
   // Initialize static fields in ParCompactionManager.
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -411,7 +411,7 @@
 template <class T> void PSPromotionManager::process_array_chunk_work(
                                                  oop obj,
                                                  int start, int end) {
-  assert(start < end, "invariant");
+  assert(start <= end, "invariant");
   T* const base      = (T*)objArrayOop(obj)->base();
   T* p               = base + start;
   T* const chunk_end = base + end;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -796,13 +796,15 @@
 
   // Initialize ref handling object for scavenging.
   MemRegion mr = young_gen->reserved();
-  _ref_processor = ReferenceProcessor::create_ref_processor(
-    mr,                         // span
-    true,                       // atomic_discovery
-    true,                       // mt_discovery
-    NULL,                       // is_alive_non_header
-    ParallelGCThreads,
-    ParallelRefProcEnabled);
+  _ref_processor =
+    new ReferenceProcessor(mr,                         // span
+                           ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
+                           (int) ParallelGCThreads,    // mt processing degree
+                           true,                       // mt discovery
+                           (int) ParallelGCThreads,    // mt discovery degree
+                           true,                       // atomic_discovery
+                           NULL,                       // header provides liveness info
+                           false);                     // next field updates do not need write barrier
 
   // Cache the cardtable
   BarrierSet* bs = Universe::heap()->barrier_set();
--- a/src/share/vm/memory/collectorPolicy.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -293,10 +293,11 @@
   // Determine maximum size of gen0
 
   size_t max_new_size = 0;
-  if (FLAG_IS_CMDLINE(MaxNewSize)) {
+  if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
     if (MaxNewSize < min_alignment()) {
       max_new_size = min_alignment();
-    } else if (MaxNewSize >= max_heap_byte_size()) {
+    }
+    if (MaxNewSize >= max_heap_byte_size()) {
       max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
                                      min_alignment());
       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
@@ -333,7 +334,7 @@
   assert(max_new_size > 0, "All paths should set max_new_size");
 
   // Given the maximum gen0 size, determine the initial and
-  // minimum sizes.
+  // minimum gen0 sizes.
 
   if (max_heap_byte_size() == min_heap_byte_size()) {
     // The maximum and minimum heap sizes are the same so
@@ -396,7 +397,7 @@
   }
 
   if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+    gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
       min_gen0_size(), initial_gen0_size(), max_gen0_size());
   }
@@ -448,7 +449,7 @@
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of gen0 have been determined.
   // The maximum gen1 size can be determined from the maximum gen0
-  // and maximum heap size since not explicit flags exits
+  // and maximum heap size since no explicit flags exits
   // for setting the gen1 maximum.
   _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
   _max_gen1_size =
@@ -494,13 +495,13 @@
           "generation sizes: using maximum heap = " SIZE_FORMAT
           " -XX:OldSize flag is being ignored",
           max_heap_byte_size());
-  }
+    }
     // If there is an inconsistency between the OldSize and the minimum and/or
     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
                           min_heap_byte_size(), OldSize)) {
       if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+        gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
               min_gen0_size(), initial_gen0_size(), max_gen0_size());
       }
@@ -509,7 +510,7 @@
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                          initial_heap_byte_size(), OldSize)) {
       if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
+        gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
           min_gen0_size(), initial_gen0_size(), max_gen0_size());
       }
--- a/src/share/vm/memory/generation.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/memory/generation.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,14 +83,11 @@
 }
 
 // By default we get a single threaded default reference processor;
-// generations needing multi-threaded refs discovery override this method.
+// generations needing multi-threaded refs processing or discovery override this method.
 void Generation::ref_processor_init() {
   assert(_ref_processor == NULL, "a reference processor already exists");
   assert(!_reserved.is_empty(), "empty generation?");
-  _ref_processor =
-    new ReferenceProcessor(_reserved,                  // span
-                           refs_discovery_is_atomic(), // atomic_discovery
-                           refs_discovery_is_mt());    // mt_discovery
+  _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
   if (_ref_processor == NULL) {
     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   }
--- a/src/share/vm/memory/referenceProcessor.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/memory/referenceProcessor.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,40 +102,17 @@
             "Unrecongnized RefDiscoveryPolicy");
 }
 
-ReferenceProcessor*
-ReferenceProcessor::create_ref_processor(MemRegion          span,
-                                         bool               atomic_discovery,
-                                         bool               mt_discovery,
-                                         BoolObjectClosure* is_alive_non_header,
-                                         int                parallel_gc_threads,
-                                         bool               mt_processing,
-                                         bool               dl_needs_barrier) {
-  int mt_degree = 1;
-  if (parallel_gc_threads > 1) {
-    mt_degree = parallel_gc_threads;
-  }
-  ReferenceProcessor* rp =
-    new ReferenceProcessor(span, atomic_discovery,
-                           mt_discovery, mt_degree,
-                           mt_processing && (parallel_gc_threads > 0),
-                           dl_needs_barrier);
-  if (rp == NULL) {
-    vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
-  }
-  rp->set_is_alive_non_header(is_alive_non_header);
-  rp->setup_policy(false /* default soft ref policy */);
-  return rp;
-}
-
 ReferenceProcessor::ReferenceProcessor(MemRegion span,
-                                       bool      atomic_discovery,
+                                       bool      mt_processing,
+                                       int       mt_processing_degree,
                                        bool      mt_discovery,
-                                       int       mt_degree,
-                                       bool      mt_processing,
+                                       int       mt_discovery_degree,
+                                       bool      atomic_discovery,
+                                       BoolObjectClosure* is_alive_non_header,
                                        bool      discovered_list_needs_barrier)  :
   _discovering_refs(false),
   _enqueuing_is_done(false),
-  _is_alive_non_header(NULL),
+  _is_alive_non_header(is_alive_non_header),
   _discovered_list_needs_barrier(discovered_list_needs_barrier),
   _bs(NULL),
   _processing_is_mt(mt_processing),
@@ -144,8 +121,8 @@
   _span = span;
   _discovery_is_atomic = atomic_discovery;
   _discovery_is_mt     = mt_discovery;
-  _num_q               = mt_degree;
-  _max_num_q           = mt_degree;
+  _num_q               = MAX2(1, mt_processing_degree);
+  _max_num_q           = MAX2(_num_q, mt_discovery_degree);
   _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
   if (_discoveredSoftRefs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
@@ -163,6 +140,7 @@
   if (discovered_list_needs_barrier) {
     _bs = Universe::heap()->barrier_set();
   }
+  setup_policy(false /* default soft ref policy */);
 }
 
 #ifndef PRODUCT
@@ -405,15 +383,14 @@
   { }
 
   virtual void work(unsigned int work_id) {
-    assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
+    assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
     // Simplest first cut: static partitioning.
     int index = work_id;
     // The increment on "index" must correspond to the maximum number of queues
     // (n_queues) with which that ReferenceProcessor was created.  That
     // is because of the "clever" way the discovered references lists were
-    // allocated and are indexed into.  That number is ParallelGCThreads
-    // currently.  Assert that.
-    assert(_n_queues == (int) ParallelGCThreads, "Different number not expected");
+    // allocated and are indexed into.
+    assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
     for (int j = 0;
          j < subclasses_of_ref;
          j++, index += _n_queues) {
@@ -672,7 +649,7 @@
     }
   }
   NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC) {
+    if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
       gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
         "Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), (address)refs_list.head());
@@ -711,7 +688,7 @@
   // Now close the newly reachable set
   complete_gc->do_void();
   NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC) {
+    if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
       gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
         "Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), (address)refs_list.head());
@@ -951,7 +928,7 @@
   }
   if (PrintReferenceGC && PrintGCDetails) {
     size_t total = 0;
-    for (int i = 0; i < _num_q; ++i) {
+    for (int i = 0; i < _max_num_q; ++i) {
       total += refs_lists[i].length();
     }
     gclog_or_tty->print(", %u refs", total);
@@ -967,7 +944,7 @@
       RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
       task_executor->execute(phase1);
     } else {
-      for (int i = 0; i < _num_q; i++) {
+      for (int i = 0; i < _max_num_q; i++) {
         process_phase1(refs_lists[i], policy,
                        is_alive, keep_alive, complete_gc);
       }
@@ -983,7 +960,7 @@
     RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
     task_executor->execute(phase2);
   } else {
-    for (int i = 0; i < _num_q; i++) {
+    for (int i = 0; i < _max_num_q; i++) {
       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
     }
   }
@@ -994,7 +971,7 @@
     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
     task_executor->execute(phase3);
   } else {
-    for (int i = 0; i < _num_q; i++) {
+    for (int i = 0; i < _max_num_q; i++) {
       process_phase3(refs_lists[i], clear_referent,
                      is_alive, keep_alive, complete_gc);
     }
@@ -1008,7 +985,7 @@
   //   for (int j = 0; j < _num_q; j++) {
   //     int index = i * _max_num_q + j;
   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
-    if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
+    if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr(
         "\nScrubbing %s discovered list of Null referents",
         list_name(i));
@@ -1350,7 +1327,7 @@
   {
     TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _num_q; i++) {
+    for (int i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1363,7 +1340,7 @@
   {
     TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _num_q; i++) {
+    for (int i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1376,7 +1353,7 @@
   {
     TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _num_q; i++) {
+    for (int i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1433,7 +1410,7 @@
   complete_gc->do_void();
 
   NOT_PRODUCT(
-    if (PrintGCDetails && PrintReferenceGC) {
+    if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
       gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
         "Refs in discovered list " INTPTR_FORMAT,
         iter.removed(), iter.processed(), (address)refs_list.head());
--- a/src/share/vm/memory/referenceProcessor.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/memory/referenceProcessor.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
   bool        _enqueuing_is_done;     // true if all weak references enqueued
   bool        _processing_is_mt;      // true during phases when
                                       // reference processing is MT.
-  int         _next_id;               // round-robin counter in
+  int         _next_id;               // round-robin mod _num_q counter in
                                       // support of work distribution
 
   // For collectors that do not keep GC marking information
@@ -103,7 +103,8 @@
 
  public:
   int num_q()                            { return _num_q; }
-  void set_mt_degree(int v)              { _num_q = v; }
+  int max_num_q()                        { return _max_num_q; }
+  void set_active_mt_degree(int v)       { _num_q = v; }
   DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
   static oop  sentinel_ref()             { return _sentinelRef; }
   static oop* adr_sentinel_ref()         { return &_sentinelRef; }
@@ -216,6 +217,7 @@
                                    VoidClosure*       complete_gc,
                                    YieldClosure*      yield);
 
+  // round-robin mod _num_q (not: _not_ mode _max_num_q)
   int next_id() {
     int id = _next_id;
     if (++_next_id == _num_q) {
@@ -256,23 +258,15 @@
     _max_num_q(0),
     _processing_is_mt(false),
     _next_id(0)
-  {}
-
-  ReferenceProcessor(MemRegion span, bool atomic_discovery,
-                     bool mt_discovery,
-                     int mt_degree = 1,
-                     bool mt_processing = false,
-                     bool discovered_list_needs_barrier = false);
+  { }
 
-  // Allocates and initializes a reference processor.
-  static ReferenceProcessor* create_ref_processor(
-    MemRegion          span,
-    bool               atomic_discovery,
-    bool               mt_discovery,
-    BoolObjectClosure* is_alive_non_header = NULL,
-    int                parallel_gc_threads = 1,
-    bool               mt_processing = false,
-    bool               discovered_list_needs_barrier = false);
+  // Default parameters give you a vanilla reference processor.
+  ReferenceProcessor(MemRegion span,
+                     bool mt_processing = false, int mt_processing_degree = 1,
+                     bool mt_discovery  = false, int mt_discovery_degree  = 1,
+                     bool atomic_discovery = true,
+                     BoolObjectClosure* is_alive_non_header = NULL,
+                     bool discovered_list_needs_barrier = false);
 
   // RefDiscoveryPolicy values
   enum DiscoveryPolicy {
@@ -397,20 +391,20 @@
 // A utility class to temporarily change the MT'ness of
 // reference discovery for the given ReferenceProcessor
 // in the scope that contains it.
-class ReferenceProcessorMTMutator: StackObj {
+class ReferenceProcessorMTDiscoveryMutator: StackObj {
  private:
   ReferenceProcessor* _rp;
   bool                _saved_mt;
 
  public:
-  ReferenceProcessorMTMutator(ReferenceProcessor* rp,
-                              bool mt):
+  ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
+                                       bool mt):
     _rp(rp) {
     _saved_mt = _rp->discovery_is_mt();
     _rp->set_mt_discovery(mt);
   }
 
-  ~ReferenceProcessorMTMutator() {
+  ~ReferenceProcessorMTDiscoveryMutator() {
     _rp->set_mt_discovery(_saved_mt);
   }
 };
--- a/src/share/vm/runtime/arguments.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -242,6 +242,7 @@
                            JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
   { "MaxLiveObjectEvacuationRatio",
                            JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
+  { "ForceSharedSpaces",   JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) },
   { NULL, JDK_Version(0), JDK_Version(0) }
 };
 
@@ -1003,28 +1004,6 @@
   }
 }
 
-void Arguments::check_compressed_oops_compat() {
-#ifdef _LP64
-  assert(UseCompressedOops, "Precondition");
-  // Is it on by default or set on ergonomically
-  bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
-
-  // If dumping an archive or forcing its use, disable compressed oops if possible
-  if (DumpSharedSpaces || RequireSharedSpaces) {
-    if (is_on_by_default) {
-      FLAG_SET_DEFAULT(UseCompressedOops, false);
-      return;
-    } else {
-      vm_exit_during_initialization(
-        "Class Data Sharing is not supported with compressed oops yet", NULL);
-    }
-  } else if (UseSharedSpaces) {
-    // UseSharedSpaces is on by default. With compressed oops, we turn it off.
-    FLAG_SET_DEFAULT(UseSharedSpaces, false);
-  }
-#endif
-}
-
 void Arguments::set_tiered_flags() {
   // With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
   if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
@@ -1123,40 +1102,28 @@
     set_parnew_gc_flags();
   }
 
+  // MaxHeapSize is aligned down in collectorPolicy
+  size_t max_heap = align_size_down(MaxHeapSize,
+                                    CardTableRS::ct_max_alignment_constraint());
+
   // Now make adjustments for CMS
-  size_t young_gen_per_worker;
-  intx new_ratio;
-  size_t min_new_default;
-  intx tenuring_default;
-  if (CMSUseOldDefaults) {  // old defaults: "old" as of 6.0
-    if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) {
-      FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M);
-    }
-    young_gen_per_worker = 4*M;
-    new_ratio = (intx)15;
-    min_new_default = 4*M;
-    tenuring_default = (intx)0;
-  } else { // new defaults: "new" as of 6.0
-    young_gen_per_worker = CMSYoungGenPerWorker;
-    new_ratio = (intx)7;
-    min_new_default = 16*M;
-    tenuring_default = (intx)4;
-  }
-
-  // Preferred young gen size for "short" pauses
+  intx   tenuring_default = (intx)6;
+  size_t young_gen_per_worker = CMSYoungGenPerWorker;
+
+  // Preferred young gen size for "short" pauses:
+  // upper bound depends on # of threads and NewRatio.
   const uintx parallel_gc_threads =
     (ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
   const size_t preferred_max_new_size_unaligned =
-    ScaleForWordSize(young_gen_per_worker * parallel_gc_threads);
-  const size_t preferred_max_new_size =
+    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads));
+  size_t preferred_max_new_size =
     align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
 
   // Unless explicitly requested otherwise, size young gen
-  // for "short" pauses ~ 4M*ParallelGCThreads
+  // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
 
   // If either MaxNewSize or NewRatio is set on the command line,
   // assume the user is trying to set the size of the young gen.
-
   if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
 
     // Set MaxNewSize to our calculated preferred_max_new_size unless
@@ -1169,49 +1136,13 @@
     }
     if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
-      tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
+      tty->print_cr("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
     }
 
-    // Unless explicitly requested otherwise, prefer a large
-    // Old to Young gen size so as to shift the collection load
-    // to the old generation concurrent collector
-
-    // If this is only guarded by FLAG_IS_DEFAULT(NewRatio)
-    // then NewSize and OldSize may be calculated.  That would
-    // generally lead to some differences with ParNewGC for which
-    // there was no obvious reason.  Also limit to the case where
-    // MaxNewSize has not been set.
-
-    FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio));
-
     // Code along this path potentially sets NewSize and OldSize
 
-    // Calculate the desired minimum size of the young gen but if
-    // NewSize has been set on the command line, use it here since
-    // it should be the final value.
-    size_t min_new;
-    if (FLAG_IS_DEFAULT(NewSize)) {
-      min_new = align_size_up(ScaleForWordSize(min_new_default),
-                              os::vm_page_size());
-    } else {
-      min_new = NewSize;
-    }
-    size_t prev_initial_size = InitialHeapSize;
-    if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) {
-      FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize);
-      // Currently minimum size and the initial heap sizes are the same.
-      set_min_heap_size(InitialHeapSize);
-      if (PrintGCDetails && Verbose) {
-        warning("Initial heap size increased to " SIZE_FORMAT " M from "
-                SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
-                InitialHeapSize/M, prev_initial_size/M);
-      }
-    }
-
-    // MaxHeapSize is aligned down in collectorPolicy
-    size_t max_heap =
-      align_size_down(MaxHeapSize,
-                      CardTableRS::ct_max_alignment_constraint());
+    assert(max_heap >= InitialHeapSize, "Error");
+    assert(max_heap >= NewSize, "Error");
 
     if (PrintGCDetails && Verbose) {
       // Too early to use gclog_or_tty
@@ -1220,7 +1151,11 @@
            " max_heap: " SIZE_FORMAT,
            min_heap_size(), InitialHeapSize, max_heap);
     }
-    if (max_heap > min_new) {
+    size_t min_new = preferred_max_new_size;
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      min_new = NewSize;
+    }
+    if (max_heap > min_new && min_heap_size() > min_new) {
       // Unless explicitly requested otherwise, make young gen
       // at least min_new, and at most preferred_max_new_size.
       if (FLAG_IS_DEFAULT(NewSize)) {
@@ -1228,18 +1163,17 @@
         FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
         if (PrintGCDetails && Verbose) {
           // Too early to use gclog_or_tty
-          tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
+          tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
         }
       }
       // Unless explicitly requested otherwise, size old gen
-      // so that it's at least 3X of NewSize to begin with;
-      // later NewRatio will decide how it grows; see above.
+      // so it's NewRatio x of NewSize.
       if (FLAG_IS_DEFAULT(OldSize)) {
         if (max_heap > NewSize) {
-          FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
+          FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
           if (PrintGCDetails && Verbose) {
             // Too early to use gclog_or_tty
-            tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
+            tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
           }
         }
       }
@@ -1383,7 +1317,7 @@
 void Arguments::set_ergonomics_flags() {
   // Parallel GC is not compatible with sharing. If one specifies
   // that they want sharing explicitly, do not set ergonomics flags.
-  if (DumpSharedSpaces || ForceSharedSpaces) {
+  if (DumpSharedSpaces || RequireSharedSpaces) {
     return;
   }
 
@@ -1690,13 +1624,13 @@
 }
 
 bool Arguments::verify_min_value(intx val, intx min, const char* name) {
-  // Returns true if given value is greater than specified min threshold
+  // Returns true if given value is at least specified min threshold
   // false, otherwise.
   if (val >= min ) {
       return true;
   }
   jio_fprintf(defaultStream::error_stream(),
-              "%s of " INTX_FORMAT " is invalid; must be greater than " INTX_FORMAT "\n",
+              "%s of " INTX_FORMAT " is invalid; must be at least " INTX_FORMAT "\n",
               name, val, min);
   return false;
 }
@@ -1846,33 +1780,6 @@
 
   status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
 
-  // Check whether user-specified sharing option conflicts with GC or page size.
-  // Both sharing and large pages are enabled by default on some platforms;
-  // large pages override sharing only if explicitly set on the command line.
-  const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
-          UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
-          UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
-  if (cannot_share) {
-    // Either force sharing on by forcing the other options off, or
-    // force sharing off.
-    if (DumpSharedSpaces || ForceSharedSpaces) {
-      jio_fprintf(defaultStream::error_stream(),
-                  "Using Serial GC and default page size because of %s\n",
-                  ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump");
-      force_serial_gc();
-      FLAG_SET_DEFAULT(UseLargePages, false);
-    } else {
-      if (UseSharedSpaces && Verbose) {
-        jio_fprintf(defaultStream::error_stream(),
-                    "Turning off use of shared archive because of "
-                    "choice of garbage collector or large pages\n");
-      }
-      no_shared_spaces();
-    }
-  } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) {
-    FLAG_SET_DEFAULT(UseLargePages, false);
-  }
-
   status = status && check_gc_consistency();
   status = status && check_stack_pages();
 
@@ -1950,6 +1857,8 @@
     status = false;
   }
 
+  status = status && verify_min_value(ParGCArrayScanChunk, 1, "ParGCArrayScanChunk");
+
 #ifndef SERIALGC
   if (UseG1GC) {
     status = status && verify_percentage(InitiatingHeapOccupancyPercent,
@@ -2413,9 +2322,6 @@
     } else if (match_option(option, "-Xshare:on", &tail)) {
       FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
       FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
-#ifdef TIERED
-      FLAG_SET_CMDLINE(bool, ForceSharedSpaces, true);
-#endif // TIERED
     // -Xshare:auto
     } else if (match_option(option, "-Xshare:auto", &tail)) {
       FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
@@ -2912,6 +2818,36 @@
   return JNI_OK;
 }
 
+void Arguments::set_shared_spaces_flags() {
+  // Check whether class data sharing settings conflict with GC, compressed oops
+  // or page size, and fix them up.  Explicit sharing options override other
+  // settings.
+  const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
+    UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
+    UseCompressedOops || UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
+  const bool must_share = DumpSharedSpaces || RequireSharedSpaces;
+  const bool might_share = must_share || UseSharedSpaces;
+  if (cannot_share) {
+    if (must_share) {
+      warning("selecting serial gc and disabling large pages %s"
+              "because of %s", "" LP64_ONLY("and compressed oops "),
+              DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
+      force_serial_gc();
+      FLAG_SET_CMDLINE(bool, UseLargePages, false);
+      LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false));
+    } else {
+      if (UseSharedSpaces && Verbose) {
+        warning("turning off use of shared archive because of "
+                "choice of garbage collector or large pages");
+      }
+      no_shared_spaces();
+    }
+  } else if (UseLargePages && might_share) {
+    // Disable large pages to allow shared spaces.  This is sub-optimal, since
+    // there may not even be a shared archive to use.
+    FLAG_SET_DEFAULT(UseLargePages, false);
+  }
+}
 
 // Parse entry point called from JNI_CreateJavaVM
 
@@ -3059,9 +2995,7 @@
   // Set flags based on ergonomics.
   set_ergonomics_flags();
 
-  if (UseCompressedOops) {
-    check_compressed_oops_compat();
-  }
+  set_shared_spaces_flags();
 
   // Check the GC selections again.
   if (!check_gc_consistency()) {
@@ -3079,22 +3013,17 @@
   }
 
 #ifndef KERNEL
-  if (UseConcMarkSweepGC) {
-    // Set flags for CMS and ParNew.  Check UseConcMarkSweep first
-    // to ensure that when both UseConcMarkSweepGC and UseParNewGC
-    // are true, we don't call set_parnew_gc_flags() as well.
+  // Set heap size based on available physical memory
+  set_heap_size();
+  // Set per-collector flags
+  if (UseParallelGC || UseParallelOldGC) {
+    set_parallel_gc_flags();
+  } else if (UseConcMarkSweepGC) { // should be done before ParNew check below
     set_cms_and_parnew_gc_flags();
-  } else {
-    // Set heap size based on available physical memory
-    set_heap_size();
-    // Set per-collector flags
-    if (UseParallelGC || UseParallelOldGC) {
-      set_parallel_gc_flags();
-    } else if (UseParNewGC) {
-      set_parnew_gc_flags();
-    } else if (UseG1GC) {
-      set_g1_gc_flags();
-    }
+  } else if (UseParNewGC) {  // skipped if CMS is set above
+    set_parnew_gc_flags();
+  } else if (UseG1GC) {
+    set_g1_gc_flags();
   }
 #endif // KERNEL
 
--- a/src/share/vm/runtime/arguments.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/runtime/arguments.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -301,8 +301,6 @@
 
   // Tiered
   static void set_tiered_flags();
-  // Check compressed oops compatibility with other flags
-  static void check_compressed_oops_compat();
   // CMS/ParNew garbage collectors
   static void set_parnew_gc_flags();
   static void set_cms_and_parnew_gc_flags();
@@ -312,6 +310,7 @@
   static void set_g1_gc_flags();
   // GC ergonomics
   static void set_ergonomics_flags();
+  static void set_shared_spaces_flags();
   // Setup heap size
   static void set_heap_size();
   // Based on automatic selection criteria, should the
--- a/src/share/vm/runtime/globals.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1540,12 +1540,8 @@
   product(bool, AlwaysPreTouch, false,                                      \
           "It forces all freshly committed pages to be pre-touched.")       \
                                                                             \
-  product(bool, CMSUseOldDefaults, false,                                   \
-          "A flag temporarily introduced to allow reverting to some "       \
-          "older default settings; older as of 6.0")                        \
-                                                                            \
-  product(intx, CMSYoungGenPerWorker, 16*M,                                 \
-          "The amount of young gen chosen by default per GC worker "        \
+  product_pd(intx, CMSYoungGenPerWorker,                                    \
+          "The maximum size of young gen chosen by default per GC worker "  \
           "thread available")                                               \
                                                                             \
   product(bool, GCOverheadReporting, false,                                 \
@@ -3653,9 +3649,6 @@
   product(bool, RequireSharedSpaces, false,                                 \
           "Require shared spaces in the permanent generation")              \
                                                                             \
-  product(bool, ForceSharedSpaces, false,                                   \
-          "Require shared spaces in the permanent generation")              \
-                                                                            \
   product(bool, DumpSharedSpaces, false,                                    \
            "Special mode: JVM reads a class list, loads classes, builds "   \
             "shared spaces, and dumps the shared spaces to a file to be "   \
--- a/src/share/vm/utilities/debug.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/utilities/debug.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_UTILITIES_DEBUG_HPP
 #define SHARE_VM_UTILITIES_DEBUG_HPP
 
+#include "prims/jvm.h"
 #include "utilities/globalDefinitions.hpp"
 
 #include <stdarg.h>
@@ -48,7 +49,7 @@
 FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) {
   va_list argp;
   va_start(argp, format);
-  vsnprintf(_buf, bufsz, format, argp);
+  jio_vsnprintf(_buf, bufsz, format, argp);
   va_end(argp);
 }
 
@@ -61,7 +62,7 @@
 
   va_list argp;
   va_start(argp, format);
-  vsnprintf(buf_end, bufsz - len, format, argp);
+  jio_vsnprintf(buf_end, bufsz - len, format, argp);
   va_end(argp);
 }
 
--- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1185,7 +1185,7 @@
 // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
 // (in ILP32).
 
-#define BOOL_TO_STR(__b) (__b) ? "true" : "false"
+#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false")
 
 // Format 32-bit quantities.
 #define INT32_FORMAT  "%d"
--- a/src/share/vm/utilities/workgroup.cpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/utilities/workgroup.cpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -156,7 +156,7 @@
     tty->print_cr("/nFinished work gang %s: %d/%d sequence %d",
                   name(), finished_workers(), total_workers(),
                   _sequence_number);
-    }
+  }
 }
 
 void AbstractWorkGang::stop() {
--- a/src/share/vm/utilities/workgroup.hpp	Fri Mar 18 01:44:15 2011 -0700
+++ b/src/share/vm/utilities/workgroup.hpp	Fri Mar 18 09:03:43 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,20 @@
 # include "thread_windows.inline.hpp"
 #endif
 
+// Task class hierarchy:
+//   AbstractGangTask
+//     AbstractGangTaskWOopQueues
+//
+// Gang/Group class hierarchy:
+//   AbstractWorkGang
+//     WorkGang
+//       FlexibleWorkGang
+//         YieldingFlexibleWorkGang (defined in another file)
+//
+// Worker class hierarchy:
+//   GangWorker (subclass of WorkerThread)
+//     YieldingFlexibleGangWorker   (defined in another file)
+
 // Forward declarations of classes defined here
 
 class WorkGang;