changeset 2903:78bef05801ca

Merge
author twisti
date Thu, 10 Nov 2011 04:46:08 -0800
parents 670a74b863fc (current diff) 869804b759e7 (diff)
children f9a80a035a4a e8fdaf4a66cb
files src/share/vm/precompiled.hpp src/share/vm/runtime/globals.hpp
diffstat 29 files changed, 727 insertions(+), 746 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Nov 09 07:25:51 2011 -0800
+++ b/.hgtags	Thu Nov 10 04:46:08 2011 -0800
@@ -191,3 +191,9 @@
 49ed7eacfd16616166ff066493143889741097af jdk8-b08
 7c20d272643f47195478708eff593a9cce40fec4 jdk8-b09
 e4f412d2b75d2c797acff965aa2c420e3d358f09 hs23-b02
+d815de2e85e511b7deab2a83cf80c0224d011da9 jdk8-b10
+4d3850d9d326ac3a9bee2d867727e954322d014e hs23-b03
+4538caeef7b6cbd4302bebced805d65e68ccf301 jdk8-b11
+6534482ff68ad79066dfe15dfb6d8905f09681bd hs23-b04
+1d3900713a67a0a39faf4e12c9c158d55aebef87 jdk8-b12
+3e609627e780736f372eb14d29bb9b5e53b21fbf hs23-b05
--- a/make/bsd/makefiles/buildtree.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/bsd/makefiles/buildtree.make	Thu Nov 10 04:46:08 2011 -0800
@@ -234,6 +234,8 @@
 	echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
 	echo "$(call gamma-path,altsrc,share/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,share/vm) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
 	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
 	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
--- a/make/bsd/makefiles/gcc.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/bsd/makefiles/gcc.make	Thu Nov 10 04:46:08 2011 -0800
@@ -88,7 +88,7 @@
 ifneq ($(USE_PRECOMPILED_HEADER),0)
 USE_PRECOMPILED_HEADER=1
 PRECOMPILED_HEADER_DIR=.
-PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled.hpp
+PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
 PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
 endif
 endif
--- a/make/hotspot_version	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/hotspot_version	Thu Nov 10 04:46:08 2011 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=23
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=03
+HS_BUILD_NUMBER=06
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/makefiles/buildtree.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/linux/makefiles/buildtree.make	Thu Nov 10 04:46:08 2011 -0800
@@ -223,6 +223,8 @@
 	echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
 	echo "$(call gamma-path,altsrc,share/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,share/vm) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
 	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
 	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
--- a/make/linux/makefiles/gcc.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/linux/makefiles/gcc.make	Thu Nov 10 04:46:08 2011 -0800
@@ -52,7 +52,7 @@
 ifneq ($(USE_PRECOMPILED_HEADER),0)
 USE_PRECOMPILED_HEADER=1
 PRECOMPILED_HEADER_DIR=.
-PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled.hpp
+PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
 PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
 endif
 endif
--- a/make/solaris/makefiles/buildtree.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/solaris/makefiles/buildtree.make	Thu Nov 10 04:46:08 2011 -0800
@@ -216,6 +216,8 @@
 	echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
 	echo "$(call gamma-path,altsrc,share/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,share/vm) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
 	echo "$(call gamma-path,altsrc,cpu/$(ARCH)/vm) \\"; \
 	echo "$(call gamma-path,commonsrc,cpu/$(ARCH)/vm) \\"; \
 	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(ARCH)/vm) \\"; \
--- a/make/solaris/makefiles/gcc.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/solaris/makefiles/gcc.make	Thu Nov 10 04:46:08 2011 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
 ifneq ($(USE_PRECOMPILED_HEADER),0)
 USE_PRECOMPILED_HEADER=1
 PRECOMPILED_HEADER_DIR=.
-PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled.hpp
+PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
 PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
 endif
 endif
--- a/make/windows/makefiles/vm.make	Wed Nov 09 07:25:51 2011 -0800
+++ b/make/windows/makefiles/vm.make	Thu Nov 10 04:46:08 2011 -0800
@@ -134,6 +134,7 @@
 
 CPP_INCLUDE_DIRS=$(CPP_INCLUDE_DIRS) \
   /I "$(COMMONSRC)\share\vm" \
+  /I "$(COMMONSRC)\share\vm\precompiled" \
   /I "$(COMMONSRC)\share\vm\prims" \
   /I "$(COMMONSRC)\os\windows\vm" \
   /I "$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm" \
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -62,7 +62,7 @@
   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
 
   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
-  IndexSetStart  = MinObjAlignment;
+  IndexSetStart  = (int) MinChunkSize;
   IndexSetStride = MinObjAlignment;
 }
 
@@ -138,7 +138,7 @@
   } else {
     _fitStrategy = FreeBlockStrategyNone;
   }
-  checkFreeListConsistency();
+  check_free_list_consistency();
 
   // Initialize locks for parallel case.
 
@@ -1358,17 +1358,29 @@
   ShouldNotReachHere();
 }
 
-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
-  const {
+bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
 }
 
+bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
+  assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
+         (_smallLinearAllocBlock._word_size == fc->size()),
+         "Linear allocation block shows incorrect size");
+  return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
+          (_smallLinearAllocBlock._word_size == fc->size()));
+}
+
+// Check if the purported free chunk is present either as a linear
+// allocation block, the size-indexed table of (smaller) free blocks,
+// or the larger free blocks kept in the binary tree dictionary.
 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
-  if (fc->size() >= IndexSetSize) {
+  if (verify_chunk_is_linear_alloc_block(fc)) {
+    return true;
+  } else if (fc->size() < IndexSetSize) {
+    return verifyChunkInIndexedFreeLists(fc);
+  } else {
     return dictionary()->verifyChunkInFreeLists(fc);
-  } else {
-    return verifyChunkInIndexedFreeLists(fc);
   }
 }
 
@@ -2495,7 +2507,8 @@
   FreeChunk* tail =  _indexedFreeList[size].tail();
   size_t    num = _indexedFreeList[size].count();
   size_t      n = 0;
-  guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
+  guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL,
+            "Slot should have been empty");
   for (; fc != NULL; fc = fc->next(), n++) {
     guarantee(fc->size() == size, "Size inconsistency");
     guarantee(fc->isFree(), "!free?");
@@ -2506,14 +2519,14 @@
 }
 
 #ifndef PRODUCT
-void CompactibleFreeListSpace::checkFreeListConsistency() const {
+void CompactibleFreeListSpace::check_free_list_consistency() const {
   assert(_dictionary->minSize() <= IndexSetSize,
     "Some sizes can't be allocated without recourse to"
     " linear allocation buffers");
   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
     "else MIN_TREE_CHUNK_SIZE is wrong");
-  assert((IndexSetStride == 2 && IndexSetStart == 2) ||
-         (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
+  assert((IndexSetStride == 2 && IndexSetStart == 4) ||                   // 32-bit
+         (IndexSetStride == 1 && IndexSetStart == 3), "just checking");   // 64-bit
   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
       "Some for-loops may be incorrectly initialized");
   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
@@ -2688,33 +2701,27 @@
   }
 }
 
+// If this is changed in the future to allow parallel
+// access, one would need to take the FL locks and,
+// depending on how it is used, stagger access from
+// parallel threads to reduce contention.
 void CFLS_LAB::retire(int tid) {
   // We run this single threaded with the world stopped;
   // so no need for locks and such.
-#define CFLS_LAB_PARALLEL_ACCESS 0
   NOT_PRODUCT(Thread* t = Thread::current();)
   assert(Thread::current()->is_VM_thread(), "Error");
-  assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
-         "Will access to uninitialized slot below");
-#if CFLS_LAB_PARALLEL_ACCESS
-  for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
-       i > 0;
-       i -= CompactibleFreeListSpace::IndexSetStride) {
-#else // CFLS_LAB_PARALLEL_ACCESS
   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
        i < CompactibleFreeListSpace::IndexSetSize;
        i += CompactibleFreeListSpace::IndexSetStride) {
-#endif // !CFLS_LAB_PARALLEL_ACCESS
     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
            "Can't retire more than what we obtained");
     if (_num_blocks[i] > 0) {
       size_t num_retire =  _indexedFreeList[i].count();
       assert(_num_blocks[i] > num_retire, "Should have used at least one");
       {
-#if CFLS_LAB_PARALLEL_ACCESS
-        MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
-                        Mutex::_no_safepoint_check_flag);
-#endif // CFLS_LAB_PARALLEL_ACCESS
+        // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
+        //                Mutex::_no_safepoint_check_flag);
+
         // Update globals stats for num_blocks used
         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
         _global_num_workers[i]++;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -502,10 +502,14 @@
   void verifyFreeLists()                  const PRODUCT_RETURN;
   void verifyIndexedFreeLists()           const;
   void verifyIndexedFreeList(size_t size) const;
-  // verify that the given chunk is in the free lists.
+  // Verify that the given chunk is in the free lists:
+  // i.e. either the binary tree dictionary, the indexed free lists
+  // or the linear allocation block.
   bool verifyChunkInFreeLists(FreeChunk* fc) const;
+  // Verify that the given chunk is the linear allocation block
+  bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
   // Do some basic checks on the the free lists.
-  void checkFreeListConsistency()         const PRODUCT_RETURN;
+  void check_free_list_consistency()      const PRODUCT_RETURN;
 
   // Printing support
   void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -1816,9 +1816,7 @@
 
   // this will also free any regions totally full of garbage objects,
   // and sort the regions.
-  g1h->g1_policy()->record_concurrent_mark_cleanup_end(
-                        g1_par_note_end_task.freed_bytes(),
-                        g1_par_note_end_task.max_live_bytes());
+  g1h->g1_policy()->record_concurrent_mark_cleanup_end();
 
   // Statistics.
   double end = os::elapsedTime();
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -147,12 +147,8 @@
           }
         }
       } while (cm()->restart_for_overflow());
+
       double counting_start_time = os::elapsedVTime();
-
-      // YSR: These look dubious (i.e. redundant) !!! FIX ME
-      slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
-      slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
-
       if (!cm()->has_aborted()) {
         double count_start_sec = os::elapsedTime();
         if (PrintGC) {
@@ -175,6 +171,7 @@
           }
         }
       }
+
       double end_time = os::elapsedVTime();
       _vtime_count_accum += (end_time - counting_start_time);
       // Update the total virtual time before doing this, since it will try
@@ -215,20 +212,20 @@
           gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
         }
 
-        // Now do the remainder of the cleanup operation.
+        // Now do the concurrent cleanup operation.
         _cm->completeCleanup();
+
         // Notify anyone who's waiting that there are no more free
-        // regions coming. We have to do this before we join the STS,
-        // otherwise we might deadlock: a GC worker could be blocked
-        // waiting for the notification whereas this thread will be
-        // blocked for the pause to finish while it's trying to join
-        // the STS, which is conditional on the GC workers finishing.
+        // regions coming. We have to do this before we join the STS
+        // (in fact, we should not attempt to join the STS in the
+        // interval between finishing the cleanup pause and clearing
+        // the free_regions_coming flag) otherwise we might deadlock:
+        // a GC worker could be blocked waiting for the notification
+        // whereas this thread will be blocked for the pause to finish
+        // while it's trying to join the STS, which is conditional on
+        // the GC workers finishing.
         g1h->reset_free_regions_coming();
 
-        _sts.join();
-        g1_policy->record_concurrent_mark_cleanup_completed();
-        _sts.leave();
-
         double cleanup_end_sec = os::elapsedTime();
         if (PrintGC) {
           gclog_or_tty->date_stamp(PrintGCDateStamps);
@@ -240,6 +237,36 @@
       guarantee(cm()->cleanup_list_is_empty(),
                 "at this point there should be no regions on the cleanup list");
 
+      // There is a tricky race before recording that the concurrent
+      // cleanup has completed and a potential Full GC starting around
+      // the same time. We want to make sure that the Full GC calls
+      // abort() on concurrent mark after
+      // record_concurrent_mark_cleanup_completed(), since abort() is
+      // the method that will reset the concurrent mark state. If we
+      // end up calling record_concurrent_mark_cleanup_completed()
+      // after abort() then we might incorrectly undo some of the work
+      // abort() did. Checking the has_aborted() flag after joining
+      // the STS allows the correct ordering of the two methods. There
+      // are two scenarios:
+      //
+      // a) If we reach here before the Full GC, the fact that we have
+      // joined the STS means that the Full GC cannot start until we
+      // leave the STS, so record_concurrent_mark_cleanup_completed()
+      // will complete before abort() is called.
+      //
+      // b) If we reach here during the Full GC, we'll be held up from
+      // joining the STS until the Full GC is done, which means that
+      // abort() will have completed and has_aborted() will return
+      // true to prevent us from calling
+      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
+      // not needed any more as the concurrent mark state has been
+      // already reset).
+      _sts.join();
+      if (!cm()->has_aborted()) {
+        g1_policy->record_concurrent_mark_cleanup_completed();
+      }
+      _sts.leave();
+
       if (cm()->has_aborted()) {
         if (PrintGC) {
           gclog_or_tty->date_stamp(PrintGCDateStamps);
@@ -248,7 +275,7 @@
         }
       }
 
-      // we now want to allow clearing of the marking bitmap to be
+      // We now want to allow clearing of the marking bitmap to be
       // suspended by a collection pause.
       _sts.join();
       _cm->clearNextBitmap();
@@ -305,13 +332,15 @@
   clear_started();
 }
 
-// Note: this method, although exported by the ConcurrentMarkSweepThread,
-// which is a non-JavaThread, can only be called by a JavaThread.
-// Currently this is done at vm creation time (post-vm-init) by the
-// main/Primordial (Java)Thread.
-// XXX Consider changing this in the future to allow the CMS thread
+// Note: As is the case with CMS - this method, although exported
+// by the ConcurrentMarkThread, which is a non-JavaThread, can only
+// be called by a JavaThread. Currently this is done at vm creation
+// time (post-vm-init) by the main/Primordial (Java)Thread.
+// XXX Consider changing this in the future to allow the CM thread
 // itself to create this thread?
 void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) {
+  assert(UseG1GC, "SLT thread needed only for concurrent GC");
+  assert(THREAD->is_Java_thread(), "must be a Java thread");
   assert(_slt == NULL, "SLT already created");
   _slt = SurrogateLockerThread::make(THREAD);
 }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -2011,8 +2011,6 @@
   // Perform any initialization actions delegated to the policy.
   g1_policy()->init();
 
-  g1_policy()->note_start_of_mark_thread();
-
   _refine_cte_cl =
     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
                                     g1_rem_set(),
@@ -3960,9 +3958,6 @@
         // _next_top_at_mark_start == top, _next_marked_bytes == 0
         // _next_marked_bytes == next_marked_bytes.
       }
-
-      // Now make sure the region has the right index in the sorted array.
-      g1_policy()->note_change_in_marked_bytes(cur);
     }
     cur = cur->next_in_collection_set();
   }
@@ -5073,7 +5068,7 @@
 
     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
     for (int idx = i; idx < limit; idx += stride) {
-      DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
+      DiscoveredList& ref_list = rp->discovered_refs()[idx];
 
       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
       while (iter.has_next()) {
@@ -5507,34 +5502,36 @@
   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
   double start = os::elapsedTime();
 
-  // Iterate over the dirty cards region list.
-  G1ParCleanupCTTask cleanup_task(ct_bs, this);
-
-  if (ParallelGCThreads > 0) {
-    set_par_threads(workers()->total_workers());
-    workers()->run_task(&cleanup_task);
-    set_par_threads(0);
-  } else {
-    while (_dirty_cards_region_list) {
-      HeapRegion* r = _dirty_cards_region_list;
-      cleanup_task.clear_cards(r);
-      _dirty_cards_region_list = r->get_next_dirty_cards_region();
-      if (_dirty_cards_region_list == r) {
-        // The last region.
-        _dirty_cards_region_list = NULL;
+  {
+    // Iterate over the dirty cards region list.
+    G1ParCleanupCTTask cleanup_task(ct_bs, this);
+
+    if (ParallelGCThreads > 0) {
+      set_par_threads(workers()->total_workers());
+      workers()->run_task(&cleanup_task);
+      set_par_threads(0);
+    } else {
+      while (_dirty_cards_region_list) {
+        HeapRegion* r = _dirty_cards_region_list;
+        cleanup_task.clear_cards(r);
+        _dirty_cards_region_list = r->get_next_dirty_cards_region();
+        if (_dirty_cards_region_list == r) {
+          // The last region.
+          _dirty_cards_region_list = NULL;
+        }
+        r->set_next_dirty_cards_region(NULL);
       }
-      r->set_next_dirty_cards_region(NULL);
     }
+#ifndef PRODUCT
+    if (G1VerifyCTCleanup || VerifyAfterGC) {
+      G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
+      heap_region_iterate(&cleanup_verifier);
+    }
+#endif
   }
 
   double elapsed = os::elapsedTime() - start;
   g1_policy()->record_clear_ct_time(elapsed * 1000.0);
-#ifndef PRODUCT
-  if (G1VerifyCTCleanup || VerifyAfterGC) {
-    G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
-    heap_region_iterate(&cleanup_verifier);
-  }
-#endif
 }
 
 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -225,16 +225,12 @@
   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
   _recent_avg_pause_time_ratio(0.0),
-  _num_markings(0),
-  _n_marks(0),
-  _n_pauses_at_mark_end(0),
 
   _all_full_gc_times_ms(new NumberSeq()),
 
   // G1PausesBtwnConcMark defaults to -1
   // so the hack is to do the cast  QQQ FIXME
   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
-  _n_marks_since_last_pause(0),
   _initiate_conc_mark_if_possible(false),
   _during_initial_mark_pause(false),
   _should_revert_to_full_young_gcs(false),
@@ -324,6 +320,7 @@
   _par_last_termination_attempts = new double[_parallel_gc_threads];
   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
+  _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
 
   // start conservatively
   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
@@ -440,6 +437,7 @@
   _reserve_regions = 0;
 
   initialize_all();
+  _collectionSetChooser = new CollectionSetChooser();
 }
 
 // Increment "i", mod "len"
@@ -500,7 +498,6 @@
   initialize_gc_policy_counters();
 
   G1YoungGenSizer sizer;
-  size_t initial_region_num = sizer.initial_young_region_num();
   _min_desired_young_length = sizer.min_young_region_num();
   _max_desired_young_length = sizer.max_young_region_num();
 
@@ -514,17 +511,14 @@
     }
   }
 
-  // GenCollectorPolicy guarantees that min <= initial <= max.
-  // Asserting here just to state that we rely on this property.
   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
-  assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
-  assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
 
   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
   if (adaptive_young_list_length()) {
     _young_list_fixed_length = 0;
   } else {
-    _young_list_fixed_length = initial_region_num;
+    assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ");
+    _young_list_fixed_length = _min_desired_young_length;
   }
   _free_regions_at_end_of_collection = _g1->free_regions();
   update_young_list_target_length();
@@ -921,6 +915,7 @@
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
+  _collectionSetChooser->updateAfterFullCollection();
 }
 
 void G1CollectorPolicy::record_stop_world_start() {
@@ -978,6 +973,7 @@
     _par_last_termination_attempts[i] = -1234.0;
     _par_last_gc_worker_end_times_ms[i] = -1234.0;
     _par_last_gc_worker_times_ms[i] = -1234.0;
+    _par_last_gc_worker_other_times_ms[i] = -1234.0;
   }
 #endif
 
@@ -986,8 +982,10 @@
     _cur_aux_times_set[i] = false;
   }
 
-  _satb_drain_time_set = false;
-  _last_satb_drain_processed_buffers = -1;
+  // These are initialized to zero here and they are set during
+  // the evacuation pause if marking is in progress.
+  _cur_satb_drain_time_ms = 0.0;
+  _last_satb_drain_processed_buffers = 0;
 
   _last_young_gc_full = false;
 
@@ -1029,39 +1027,7 @@
   _mark_cleanup_start_sec = os::elapsedTime();
 }
 
-void
-G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
-                                                      size_t max_live_bytes) {
-  record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
-  record_concurrent_mark_cleanup_end_work2();
-}
-
-void
-G1CollectorPolicy::
-record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
-                                         size_t max_live_bytes) {
-  if (_n_marks < 2) {
-    _n_marks++;
-  }
-}
-
-// The important thing about this is that it includes "os::elapsedTime".
-void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
-  double end_time_sec = os::elapsedTime();
-  double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
-  _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
-  _cur_mark_stop_world_time_ms += elapsed_time_ms;
-  _prev_collection_pause_end_ms += elapsed_time_ms;
-
-  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
-
-  _num_markings++;
-  _n_pauses_at_mark_end = _n_pauses;
-  _n_marks_since_last_pause++;
-}
-
-void
-G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
+void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   _should_revert_to_full_young_gcs = false;
   _last_full_young_gc = true;
   _in_marking_window = false;
@@ -1131,61 +1097,65 @@
     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
 }
 
-void G1CollectorPolicy::print_stats (int level,
-                                     const char* str,
-                                     double value) {
+void G1CollectorPolicy::print_stats(int level,
+                                    const char* str,
+                                    double value) {
   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
 }
 
-void G1CollectorPolicy::print_stats (int level,
-                                     const char* str,
-                                     int value) {
+void G1CollectorPolicy::print_stats(int level,
+                                    const char* str,
+                                    int value) {
   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
 }
 
-double G1CollectorPolicy::avg_value (double* data) {
+double G1CollectorPolicy::avg_value(double* data) {
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     double ret = 0.0;
-    for (uint i = 0; i < ParallelGCThreads; ++i)
+    for (uint i = 0; i < ParallelGCThreads; ++i) {
       ret += data[i];
+    }
     return ret / (double) ParallelGCThreads;
   } else {
     return data[0];
   }
 }
 
-double G1CollectorPolicy::max_value (double* data) {
+double G1CollectorPolicy::max_value(double* data) {
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     double ret = data[0];
-    for (uint i = 1; i < ParallelGCThreads; ++i)
-      if (data[i] > ret)
+    for (uint i = 1; i < ParallelGCThreads; ++i) {
+      if (data[i] > ret) {
         ret = data[i];
+      }
+    }
     return ret;
   } else {
     return data[0];
   }
 }
 
-double G1CollectorPolicy::sum_of_values (double* data) {
+double G1CollectorPolicy::sum_of_values(double* data) {
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     double sum = 0.0;
-    for (uint i = 0; i < ParallelGCThreads; i++)
+    for (uint i = 0; i < ParallelGCThreads; i++) {
       sum += data[i];
+    }
     return sum;
   } else {
     return data[0];
   }
 }
 
-double G1CollectorPolicy::max_sum (double* data1,
-                                   double* data2) {
+double G1CollectorPolicy::max_sum(double* data1, double* data2) {
   double ret = data1[0] + data2[0];
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     for (uint i = 1; i < ParallelGCThreads; ++i) {
       double data = data1[i] + data2[i];
-      if (data > ret)
+      if (data > ret) {
         ret = data;
+      }
     }
   }
   return ret;
@@ -1285,6 +1255,10 @@
 
   _n_pauses++;
 
+  // These values are used to update the summary information that is
+  // displayed when TraceGen0Time is enabled, and are output as part
+  // of the PrintGCDetails output, in the non-parallel case.
+
   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
@@ -1294,42 +1268,68 @@
   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
   double termination_time = avg_value(_par_last_termination_times_ms);
 
-  double parallel_known_time = update_rs_time +
-                               ext_root_scan_time +
-                               mark_stack_scan_time +
-                               scan_rs_time +
-                               obj_copy_time +
-                               termination_time;
-
-  double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
-
-  PauseSummary* summary = _summary;
+  double known_time = ext_root_scan_time +
+                      mark_stack_scan_time +
+                      update_rs_time +
+                      scan_rs_time +
+                      obj_copy_time;
+
+  double other_time_ms = elapsed_ms;
+
+  // Subtract the SATB drain time. It's initialized to zero at the
+  // start of the pause and is updated during the pause if marking
+  // is in progress.
+  other_time_ms -= _cur_satb_drain_time_ms;
+
+  if (parallel) {
+    other_time_ms -= _cur_collection_par_time_ms;
+  } else {
+    other_time_ms -= known_time;
+  }
+
+  // Subtract the time taken to clean the card table from the
+  // current value of "other time"
+  other_time_ms -= _cur_clear_ct_time_ms;
+
+  // TraceGen0Time and TraceGen1Time summary info updating.
+  _all_pause_times_ms->add(elapsed_ms);
 
   if (update_stats) {
     _recent_rs_scan_times_ms->add(scan_rs_time);
     _recent_pause_times_ms->add(elapsed_ms);
     _recent_rs_sizes->add(rs_size);
 
-    MainBodySummary* body_summary = summary->main_body_summary();
-    guarantee(body_summary != NULL, "should not be null!");
-
-    if (_satb_drain_time_set)
-      body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
-    else
-      body_summary->record_satb_drain_time_ms(0.0);
+    _summary->record_total_time_ms(elapsed_ms);
+    _summary->record_other_time_ms(other_time_ms);
+
+    MainBodySummary* body_summary = _summary->main_body_summary();
+    assert(body_summary != NULL, "should not be null!");
+
+    // This will be non-zero iff marking is currently in progress (i.e.
+    // _g1->mark_in_progress() == true) and the currrent pause was not
+    // an initial mark pause. Since the body_summary items are NumberSeqs,
+    // however, they have to be consistent and updated in lock-step with
+    // each other. Therefore we unconditionally record the SATB drain
+    // time - even if it's zero.
+    body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
 
     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
     body_summary->record_update_rs_time_ms(update_rs_time);
     body_summary->record_scan_rs_time_ms(scan_rs_time);
     body_summary->record_obj_copy_time_ms(obj_copy_time);
+
     if (parallel) {
       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
-      body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
       body_summary->record_termination_time_ms(termination_time);
+
+      double parallel_known_time = known_time + termination_time;
+      double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
       body_summary->record_parallel_other_time_ms(parallel_other_time);
     }
+
     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
+    body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
 
     // We exempt parallel collection from this check because Alloc Buffer
     // fragmentation can produce negative collections.  Same with evac
@@ -1341,6 +1341,7 @@
            || _g1->evacuation_failed()
            || surviving_bytes <= _collection_set_bytes_used_before,
            "Or else negative collection!");
+
     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
     _recent_CS_bytes_surviving->add(surviving_bytes);
 
@@ -1391,6 +1392,13 @@
     }
   }
 
+  for (int i = 0; i < _aux_num; ++i) {
+    if (_cur_aux_times_set[i]) {
+      _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
+    }
+  }
+
+
   if (G1PolicyVerbose > 1) {
     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
   }
@@ -1417,61 +1425,60 @@
                            recent_avg_pause_time_ratio() * 100.0);
   }
 
-  double other_time_ms = elapsed_ms;
-
-  if (_satb_drain_time_set) {
-    other_time_ms -= _cur_satb_drain_time_ms;
-  }
-
-  if (parallel) {
-    other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
-  } else {
-    other_time_ms -=
-      update_rs_time +
-      ext_root_scan_time + mark_stack_scan_time +
-      scan_rs_time + obj_copy_time;
-  }
-
+  // PrintGCDetails output
   if (PrintGCDetails) {
+    bool print_marking_info =
+      _g1->mark_in_progress() && !last_pause_included_initial_mark;
+
     gclog_or_tty->print_cr("%s, %1.8lf secs]",
                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
                            elapsed_ms / 1000.0);
 
-    if (_satb_drain_time_set) {
+    if (print_marking_info) {
       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
-    }
-    if (_last_satb_drain_processed_buffers >= 0) {
       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
     }
+
     if (parallel) {
       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
-      print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
+      print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
+      print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
+      if (print_marking_info) {
+        print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
+      }
       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
-      print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
-      print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
       print_par_stats(2, "Termination", _par_last_termination_times_ms);
       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
-      print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
+      print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
 
       for (int i = 0; i < _parallel_gc_threads; i++) {
         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
+
+        double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
+                                   _par_last_mark_stack_scan_times_ms[i] +
+                                   _par_last_update_rs_times_ms[i] +
+                                   _par_last_scan_rs_times_ms[i] +
+                                   _par_last_obj_copy_times_ms[i] +
+                                   _par_last_termination_times_ms[i];
+
+        _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
       }
-      print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
-
-      print_stats(2, "Parallel Other", parallel_other_time);
-      print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
+      print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
+      print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
     } else {
+      print_stats(1, "Ext Root Scanning", ext_root_scan_time);
+      if (print_marking_info) {
+        print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
+      }
       print_stats(1, "Update RS", update_rs_time);
-      print_stats(2, "Processed Buffers",
-                  (int)update_rs_processed_buffers);
-      print_stats(1, "Ext Root Scanning", ext_root_scan_time);
-      print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
+      print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
       print_stats(1, "Scan RS", scan_rs_time);
       print_stats(1, "Object Copying", obj_copy_time);
     }
+    print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
 #ifndef PRODUCT
     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
@@ -1495,18 +1502,6 @@
     }
   }
 
-  _all_pause_times_ms->add(elapsed_ms);
-  if (update_stats) {
-    summary->record_total_time_ms(elapsed_ms);
-    summary->record_other_time_ms(other_time_ms);
-  }
-  for (int i = 0; i < _aux_num; ++i)
-    if (_cur_aux_times_set[i])
-      _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
-
-  // Reset marks-between-pauses counter.
-  _n_marks_since_last_pause = 0;
-
   // Update the efficiency-since-mark vars.
   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
@@ -1729,6 +1724,8 @@
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
   // </NEW PREDICTION>
+
+  assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
 }
 
 #define EXT_SIZE_FORMAT "%d%s"
@@ -2156,10 +2153,6 @@
   }
 }
 
-void G1CollectorPolicy::note_start_of_mark_thread() {
-  _mark_thread_startup_sec = os::elapsedTime();
-}
-
 class CountCSClosure: public HeapRegionClosure {
   G1CollectorPolicy* _g1_policy;
 public:
@@ -2176,17 +2169,17 @@
   _g1->collection_set_iterate(&cs_closure);
 }
 
-void G1CollectorPolicy::print_summary (int level,
-                                       const char* str,
-                                       NumberSeq* seq) const {
+void G1CollectorPolicy::print_summary(int level,
+                                      const char* str,
+                                      NumberSeq* seq) const {
   double sum = seq->sum();
   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
                 str, sum / 1000.0, seq->avg());
 }
 
-void G1CollectorPolicy::print_summary_sd (int level,
-                                          const char* str,
-                                          NumberSeq* seq) const {
+void G1CollectorPolicy::print_summary_sd(int level,
+                                         const char* str,
+                                         NumberSeq* seq) const {
   print_summary(level, str, seq);
   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
                 seq->num(), seq->sd(), seq->maximum());
@@ -2249,20 +2242,18 @@
       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
       if (parallel) {
         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
+        print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
+        print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
-        print_summary(2, "Ext Root Scanning",
-                      body_summary->get_ext_root_scan_seq());
-        print_summary(2, "Mark Stack Scanning",
-                      body_summary->get_mark_stack_scan_seq());
         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
         print_summary(2, "Termination", body_summary->get_termination_seq());
-        print_summary(2, "Other", body_summary->get_parallel_other_seq());
+        print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
         {
           NumberSeq* other_parts[] = {
-            body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_mark_stack_scan_seq(),
+            body_summary->get_update_rs_seq(),
             body_summary->get_scan_rs_seq(),
             body_summary->get_obj_copy_seq(),
             body_summary->get_termination_seq()
@@ -2272,18 +2263,16 @@
           check_other_times(2, body_summary->get_parallel_other_seq(),
                             &calc_other_times_ms);
         }
-        print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
-        print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
       } else {
+        print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
+        print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
-        print_summary(1, "Ext Root Scanning",
-                      body_summary->get_ext_root_scan_seq());
-        print_summary(1, "Mark Stack Scanning",
-                      body_summary->get_mark_stack_scan_seq());
         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
       }
     }
+    print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
+    print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
     print_summary(1, "Other", summary->get_other_seq());
     {
       if (body_summary != NULL) {
@@ -2446,7 +2435,7 @@
   }
 };
 
-bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
+bool G1CollectorPolicy::assertMarkedBytesDataOK() {
   HRSortIndexIsOKClosure cl(_collectionSetChooser);
   _g1->heap_region_iterate(&cl);
   return true;
@@ -2532,12 +2521,6 @@
   }
 }
 
-void
-G1CollectorPolicy_BestRegionsFirst::
-record_collection_pause_start(double start_time_sec, size_t start_used) {
-  G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
-}
-
 class KnownGarbageClosure: public HeapRegionClosure {
   CollectionSetChooser* _hrSorted;
 
@@ -2645,20 +2628,20 @@
 };
 
 void
-G1CollectorPolicy_BestRegionsFirst::
-record_concurrent_mark_cleanup_end(size_t freed_bytes,
-                                   size_t max_live_bytes) {
-  double start;
-  if (G1PrintParCleanupStats) start = os::elapsedTime();
-  record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
+G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
+  double start_sec;
+  if (G1PrintParCleanupStats) {
+    start_sec = os::elapsedTime();
+  }
 
   _collectionSetChooser->clearMarkedHeapRegions();
-  double clear_marked_end;
+  double clear_marked_end_sec;
   if (G1PrintParCleanupStats) {
-    clear_marked_end = os::elapsedTime();
-    gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
-                  (clear_marked_end - start)*1000.0);
+    clear_marked_end_sec = os::elapsedTime();
+    gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
+                           (clear_marked_end_sec - start_sec) * 1000.0);
   }
+
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     const size_t OverpartitionFactor = 4;
     const size_t MinWorkUnit = 8;
@@ -2677,27 +2660,25 @@
     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
     _g1->heap_region_iterate(&knownGarbagecl);
   }
-  double known_garbage_end;
+  double known_garbage_end_sec;
   if (G1PrintParCleanupStats) {
-    known_garbage_end = os::elapsedTime();
+    known_garbage_end_sec = os::elapsedTime();
     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
-                  (known_garbage_end - clear_marked_end)*1000.0);
+                      (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
   }
+
   _collectionSetChooser->sortMarkedHeapRegions();
-  double sort_end;
+  double end_sec = os::elapsedTime();
   if (G1PrintParCleanupStats) {
-    sort_end = os::elapsedTime();
     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
-                  (sort_end - known_garbage_end)*1000.0);
+                           (end_sec - known_garbage_end_sec) * 1000.0);
   }
 
-  record_concurrent_mark_cleanup_end_work2();
-  double work2_end;
-  if (G1PrintParCleanupStats) {
-    work2_end = os::elapsedTime();
-    gclog_or_tty->print_cr("  work2: %8.3f ms.",
-                  (work2_end - sort_end)*1000.0);
-  }
+  double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
+  _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
+  _cur_mark_stop_world_time_ms += elapsed_time_ms;
+  _prev_collection_pause_end_ms += elapsed_time_ms;
+  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
 }
 
 // Add the heap region at the head of the non-incremental collection set
@@ -2912,9 +2893,7 @@
 }
 #endif // !PRODUCT
 
-void
-G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
-                                                  double target_pause_time_ms) {
+void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -3115,14 +3094,3 @@
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
 }
-
-void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
-  G1CollectorPolicy::record_full_collection_end();
-  _collectionSetChooser->updateAfterFullCollection();
-}
-
-void G1CollectorPolicy_BestRegionsFirst::
-record_collection_pause_end() {
-  G1CollectorPolicy::record_collection_pause_end();
-  assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
-}
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -74,7 +74,7 @@
     define_num_seq(termination) // parallel only
     define_num_seq(parallel_other) // parallel only
   define_num_seq(mark_closure)
-  define_num_seq(clear_ct)  // parallel only
+  define_num_seq(clear_ct)
 };
 
 class Summary: public PauseSummary,
@@ -84,7 +84,7 @@
 };
 
 class G1CollectorPolicy: public CollectorPolicy {
-protected:
+private:
   // The number of pauses during the execution.
   long _n_pauses;
 
@@ -106,10 +106,7 @@
     initialize_perm_generation(PermGen::MarkSweepCompact);
   }
 
-  virtual size_t default_init_heap_size() {
-    // Pick some reasonable default.
-    return 8*M;
-  }
+  CollectionSetChooser* _collectionSetChooser;
 
   double _cur_collection_start_sec;
   size_t _cur_collection_pause_used_at_start_bytes;
@@ -118,7 +115,6 @@
   double _cur_collection_par_time_ms;
   double _cur_satb_drain_time_ms;
   double _cur_clear_ct_time_ms;
-  bool   _satb_drain_time_set;
   double _cur_ref_proc_time_ms;
   double _cur_ref_enq_time_ms;
 
@@ -179,6 +175,11 @@
   double* _par_last_gc_worker_end_times_ms;
   double* _par_last_gc_worker_times_ms;
 
+  // Each workers 'other' time i.e. the elapsed time of the parallel
+  // phase of the pause minus the sum of the individual sub-phase
+  // times for a given worker thread.
+  double* _par_last_gc_worker_other_times_ms;
+
   // indicates whether we are in full young or partially young GC mode
   bool _full_young_gcs;
 
@@ -316,7 +317,6 @@
                                     double update_rs_processed_buffers,
                                     double goal_ms);
 
-protected:
   double _pause_time_target_ms;
   double _recorded_young_cset_choice_time_ms;
   double _recorded_non_young_cset_choice_time_ms;
@@ -554,7 +554,7 @@
     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   }
 
-protected:
+private:
   void print_stats(int level, const char* str, double value);
   void print_stats(int level, const char* str, int value);
 
@@ -588,10 +588,6 @@
   // Statistics kept per GC stoppage, pause or full.
   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
 
-  // We track markings.
-  int _num_markings;
-  double _mark_thread_startup_sec;       // Time at startup of marking thread
-
   // Add a new GC of the given duration and end time to the record.
   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
 
@@ -664,12 +660,6 @@
   // young list/collection set).
   size_t _inc_cset_predicted_bytes_to_copy;
 
-  // Info about marking.
-  int _n_marks; // Sticky at 2, so we know when we've done at least 2.
-
-  // The number of collection pauses at the end of the last mark.
-  size_t _n_pauses_at_mark_end;
-
   // Stash a pointer to the g1 heap.
   G1CollectedHeap* _g1;
 
@@ -737,8 +727,6 @@
   // Number of pauses between concurrent marking.
   size_t _pauses_btwn_concurrent_mark;
 
-  size_t _n_marks_since_last_pause;
-
   // At the end of a pause we check the heap occupancy and we decide
   // whether we will start a marking cycle during the next pause. If
   // we decide that we want to do that, we will set this parameter to
@@ -810,6 +798,11 @@
   bool predict_will_fit(size_t young_length, double base_time_ms,
                         size_t base_free_regions, double target_pause_time_ms);
 
+  // Count the number of bytes used in the CS.
+  void count_CS_bytes_used();
+
+  void update_young_list_size_using_newratio(size_t number_of_heap_regions);
+
 public:
 
   G1CollectorPolicy();
@@ -836,22 +829,9 @@
   // This should be called after the heap is resized.
   void record_new_heap_size(size_t new_number_of_regions);
 
-protected:
-
-  // Count the number of bytes used in the CS.
-  void count_CS_bytes_used();
-
-  // Together these do the base cleanup-recording work.  Subclasses might
-  // want to put something between them.
-  void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
-                                                size_t max_live_bytes);
-  void record_concurrent_mark_cleanup_end_work2();
-
-  void update_young_list_size_using_newratio(size_t number_of_heap_regions);
-
 public:
 
-  virtual void init();
+  void init();
 
   // Create jstat counters for the policy.
   virtual void initialize_gc_policy_counters();
@@ -876,10 +856,9 @@
   // start time, where the given number of bytes were used at the start.
   // This may involve changing the desired size of a collection set.
 
-  virtual void record_stop_world_start();
+  void record_stop_world_start();
 
-  virtual void record_collection_pause_start(double start_time_sec,
-                                             size_t start_used);
+  void record_collection_pause_start(double start_time_sec, size_t start_used);
 
   // Must currently be called while the world is stopped.
   void record_concurrent_mark_init_end(double
@@ -887,23 +866,22 @@
 
   void record_mark_closure_time(double mark_closure_time_ms);
 
-  virtual void record_concurrent_mark_remark_start();
-  virtual void record_concurrent_mark_remark_end();
+  void record_concurrent_mark_remark_start();
+  void record_concurrent_mark_remark_end();
 
-  virtual void record_concurrent_mark_cleanup_start();
-  virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
-                                                  size_t max_live_bytes);
-  virtual void record_concurrent_mark_cleanup_completed();
+  void record_concurrent_mark_cleanup_start();
+  void record_concurrent_mark_cleanup_end();
+  void record_concurrent_mark_cleanup_completed();
 
-  virtual void record_concurrent_pause();
-  virtual void record_concurrent_pause_end();
+  void record_concurrent_pause();
+  void record_concurrent_pause_end();
 
-  virtual void record_collection_pause_end();
+  void record_collection_pause_end();
   void print_heap_transition();
 
   // Record the fact that a full collection occurred.
-  virtual void record_full_collection_start();
-  virtual void record_full_collection_end();
+  void record_full_collection_start();
+  void record_full_collection_end();
 
   void record_gc_worker_start_time(int worker_i, double ms) {
     _par_last_gc_worker_start_times_ms[worker_i] = ms;
@@ -918,11 +896,12 @@
   }
 
   void record_satb_drain_time(double ms) {
+    assert(_g1->mark_in_progress(), "shouldn't be here otherwise");
     _cur_satb_drain_time_ms = ms;
-    _satb_drain_time_set    = true;
   }
 
-  void record_satb_drain_processed_buffers (int processed_buffers) {
+  void record_satb_drain_processed_buffers(int processed_buffers) {
+    assert(_g1->mark_in_progress(), "shouldn't be here otherwise");
     _last_satb_drain_processed_buffers = processed_buffers;
   }
 
@@ -1022,7 +1001,7 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual void choose_collection_set(double target_pause_time_ms) = 0;
+  void choose_collection_set(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
@@ -1107,19 +1086,12 @@
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
-  virtual size_t expansion_amount();
-
-  // note start of mark thread
-  void note_start_of_mark_thread();
-
-  // The marked bytes of the "r" has changed; reclassify it's desirability
-  // for marking.  Also asserts that "r" is eligible for a CS.
-  virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
+  size_t expansion_amount();
 
 #ifndef PRODUCT
   // Check any appropriate marked bytes info, asserting false if
   // something's wrong, else returning "true".
-  virtual bool assertMarkedBytesDataOK() = 0;
+  bool assertMarkedBytesDataOK();
 #endif
 
   // Print tracing information.
@@ -1182,10 +1154,10 @@
     return ret;
   }
 
+private:
   //
   // Survivor regions policy.
   //
-protected:
 
   // Current tenuring threshold, set to 0 if the collector reaches the
   // maximum amount of suvivors regions.
@@ -1265,51 +1237,6 @@
 
 };
 
-// This encapsulates a particular strategy for a g1 Collector.
-//
-//      Start a concurrent mark when our heap size is n bytes
-//            greater then our heap size was at the last concurrent
-//            mark.  Where n is a function of the CMSTriggerRatio
-//            and the MinHeapFreeRatio.
-//
-//      Start a g1 collection pause when we have allocated the
-//            average number of bytes currently being freed in
-//            a collection, but only if it is at least one region
-//            full
-//
-//      Resize Heap based on desired
-//      allocation space, where desired allocation space is
-//      a function of survival rate and desired future to size.
-//
-//      Choose collection set by first picking all older regions
-//      which have a survival rate which beats our projected young
-//      survival rate.  Then fill out the number of needed regions
-//      with young regions.
-
-class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
-  CollectionSetChooser* _collectionSetChooser;
-
-  virtual void choose_collection_set(double target_pause_time_ms);
-  virtual void record_collection_pause_start(double start_time_sec,
-                                             size_t start_used);
-  virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
-                                                  size_t max_live_bytes);
-  virtual void record_full_collection_end();
-
-public:
-  G1CollectorPolicy_BestRegionsFirst() {
-    _collectionSetChooser = new CollectionSetChooser();
-  }
-  void record_collection_pause_end();
-  // This is not needed any more, after the CSet choosing code was
-  // changed to use the pause prediction work. But let's leave the
-  // hook in just in case.
-  void note_change_in_marked_bytes(HeapRegion* r) { }
-#ifndef PRODUCT
-  bool assertMarkedBytesDataOK();
-#endif
-};
-
 // This should move to some place more general...
 
 // If we have "n" measurements, and we've kept track of their "sum" and the
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -122,10 +122,10 @@
   void set_try_claimed() { _try_claimed = true; }
 
   void scanCard(size_t index, HeapRegion *r) {
-    DirtyCardToOopClosure* cl =
-      r->new_dcto_closure(_oc,
-                         CardTableModRefBS::Precise,
-                         HeapRegionDCTOC::IntoCSFilterKind);
+    // Stack allocate the DirtyCardToOopClosure instance
+    HeapRegionDCTOC cl(_g1h, r, _oc,
+                       CardTableModRefBS::Precise,
+                       HeapRegionDCTOC::IntoCSFilterKind);
 
     // Set the "from" region in the closure.
     _oc->set_region(r);
@@ -140,7 +140,7 @@
       // scans (the rsets of the regions in the cset can intersect).
       _ct_bs->set_card_claimed(index);
       _cards_done++;
-      cl->do_MemRegion(mr);
+      cl.do_MemRegion(mr);
     }
   }
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -340,14 +340,6 @@
   init_top_at_mark_start();
 }
 
-DirtyCardToOopClosure*
-HeapRegion::new_dcto_closure(OopClosure* cl,
-                             CardTableModRefBS::PrecisionStyle precision,
-                             HeapRegionDCTOC::FilterKind fk) {
-  return new HeapRegionDCTOC(G1CollectedHeap::heap(),
-                             this, cl, precision, fk);
-}
-
 void HeapRegion::hr_clear(bool par, bool clear_space) {
   assert(_humongous_type == NotHumongous,
          "we should have already filtered out humongous regions");
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -431,6 +431,14 @@
     return _humongous_start_region;
   }
 
+  // Same as Space::is_in_reserved, but will use the original size of the region.
+  // The original size is different only for start humongous regions. They get
+  // their _end set up to be the end of the last continues region of the
+  // corresponding humongous object.
+  bool is_in_reserved_raw(const void* p) const {
+    return _bottom <= p && p < _orig_end;
+  }
+
   // Makes the current region be a "starts humongous" region, i.e.,
   // the first region in a series of one or more contiguous regions
   // that will contain a single "humongous" object. The two parameters
@@ -569,11 +577,6 @@
   // allocated in the current region before the last call to "save_mark".
   void oop_before_save_marks_iterate(OopClosure* cl);
 
-  DirtyCardToOopClosure*
-  new_dcto_closure(OopClosure* cl,
-                   CardTableModRefBS::PrecisionStyle precision,
-                   HeapRegionDCTOC::FilterKind fk);
-
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -143,7 +143,11 @@
     // If the test below fails, then this table was reused concurrently
     // with this operation.  This is OK, since the old table was coarsened,
     // and adding a bit to the new table is never incorrect.
-    if (loc_hr->is_in_reserved(from)) {
+    // If the table used to belong to a continues humongous region and is
+    // now reused for the corresponding start humongous region, we need to
+    // make sure that we detect this. Thus, we call is_in_reserved_raw()
+    // instead of just is_in_reserved() here.
+    if (loc_hr->is_in_reserved_raw(from)) {
       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
       CardIdx_t from_card = (CardIdx_t)
           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
@@ -165,6 +166,20 @@
   }
 }
 
+void VM_CGC_Operation::acquire_pending_list_lock() {
+  // The caller may block while communicating
+  // with the SLT thread in order to acquire/release the PLL.
+  ConcurrentMarkThread::slt()->
+    manipulatePLL(SurrogateLockerThread::acquirePLL);
+}
+
+void VM_CGC_Operation::release_and_notify_pending_list_lock() {
+  // The caller may block while communicating
+  // with the SLT thread in order to acquire/release the PLL.
+  ConcurrentMarkThread::slt()->
+    manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
+}
+
 void VM_CGC_Operation::doit() {
   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
@@ -180,12 +195,19 @@
 }
 
 bool VM_CGC_Operation::doit_prologue() {
+  // Note the relative order of the locks must match that in
+  // VM_GC_Operation::doit_prologue() or deadlocks can occur
+  acquire_pending_list_lock();
+
   Heap_lock->lock();
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
   return true;
 }
 
 void VM_CGC_Operation::doit_epilogue() {
+  // Note the relative order of the unlocks must match that in
+  // VM_GC_Operation::doit_epilogue()
   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   Heap_lock->unlock();
+  release_and_notify_pending_list_lock();
 }
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -93,11 +93,17 @@
   }
 };
 
-// Concurrent GC stop-the-world operations such as initial and final mark;
+// Concurrent GC stop-the-world operations such as remark and cleanup;
 // consider sharing these with CMS's counterparts.
 class VM_CGC_Operation: public VM_Operation {
   VoidClosure* _cl;
   const char* _printGCMessage;
+
+protected:
+  // java.lang.ref.Reference support
+  void acquire_pending_list_lock();
+  void release_and_notify_pending_list_lock();
+
 public:
   VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg)
     : _cl(cl), _printGCMessage(printGCMsg) { }
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -224,6 +224,8 @@
   MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
   assert(_buffer == empty, "Should be empty");
   assert(msg != empty, "empty message");
+  assert(!Heap_lock->owned_by_self(), "Heap_lock owned by requesting thread");
+
   _buffer = msg;
   while (_buffer != empty) {
     _monitor.notify();
--- a/src/share/vm/memory/referenceProcessor.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/memory/referenceProcessor.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -105,19 +105,22 @@
   _discovery_is_mt     = mt_discovery;
   _num_q               = MAX2(1, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
-  _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
+  _discovered_refs     = NEW_C_HEAP_ARRAY(DiscoveredList,
                                           _max_num_q * number_of_subclasses_of_ref());
-  if (_discoveredSoftRefs == NULL) {
+  if (_discovered_refs == NULL) {
     vm_exit_during_initialization("Could not allocated RefProc Array");
   }
+  _discoveredSoftRefs    = &_discovered_refs[0];
   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
-  // Initialized all entries to NULL
+
+  // Initialize all entries to NULL
   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    _discoveredSoftRefs[i].set_head(NULL);
-    _discoveredSoftRefs[i].set_length(0);
+    _discovered_refs[i].set_head(NULL);
+    _discovered_refs[i].set_length(0);
   }
+
   // If we do barriers, cache a copy of the barrier set.
   if (discovered_list_needs_barrier) {
     _bs = Universe::heap()->barrier_set();
@@ -129,7 +132,7 @@
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    guarantee(_discoveredSoftRefs[i].is_empty(),
+    guarantee(_discovered_refs[i].is_empty(),
               "Found non-empty discovered list");
   }
 }
@@ -138,9 +141,9 @@
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
-      f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
+      f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
     } else {
-      f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
+      f->do_oop((oop*)_discovered_refs[i].adr_head());
     }
   }
 }
@@ -423,15 +426,15 @@
   AbstractRefProcTaskExecutor* task_executor) {
   if (_processing_is_mt && task_executor != NULL) {
     // Parallel code
-    RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
+    RefProcEnqueueTask tsk(*this, _discovered_refs,
                            pending_list_addr, _max_num_q);
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
     for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-      enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
-      _discoveredSoftRefs[i].set_head(NULL);
-      _discoveredSoftRefs[i].set_length(0);
+      enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
+      _discovered_refs[i].set_head(NULL);
+      _discovered_refs[i].set_length(0);
     }
   }
 }
@@ -691,7 +694,7 @@
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
     }
-    abandon_partial_discovered_list(_discoveredSoftRefs[i]);
+    abandon_partial_discovered_list(_discovered_refs[i]);
   }
 }
 
@@ -952,7 +955,7 @@
         "\nScrubbing %s discovered list of Null referents",
         list_name(i));
     }
-    clean_up_discovered_reflist(_discoveredSoftRefs[i]);
+    clean_up_discovered_reflist(_discovered_refs[i]);
   }
 }
 
@@ -1402,7 +1405,7 @@
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    clear_discovered_references(_discoveredSoftRefs[i]);
+    clear_discovered_references(_discovered_refs[i]);
   }
 }
 
--- a/src/share/vm/memory/referenceProcessor.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/memory/referenceProcessor.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -255,7 +255,11 @@
   int             _num_q;
   // The maximum MT'ness degree of the queues below
   int             _max_num_q;
-  // Arrays of lists of oops, one per thread
+
+  // Master array of discovered oops
+  DiscoveredList* _discovered_refs;
+
+  // Arrays of lists of oops, one per thread (pointers into master array above)
   DiscoveredList* _discoveredSoftRefs;
   DiscoveredList* _discoveredWeakRefs;
   DiscoveredList* _discoveredFinalRefs;
@@ -267,7 +271,8 @@
   int num_q()                              { return _num_q; }
   int max_num_q()                          { return _max_num_q; }
   void set_active_mt_degree(int v)         { _num_q = v; }
-  DiscoveredList* discovered_soft_refs()   { return _discoveredSoftRefs; }
+
+  DiscoveredList* discovered_refs()        { return _discovered_refs; }
 
   ReferencePolicy* setup_policy(bool always_clear) {
     _current_soft_ref_policy = always_clear ?
@@ -411,6 +416,7 @@
   // constructor
   ReferenceProcessor():
     _span((HeapWord*)NULL, (HeapWord*)NULL),
+    _discovered_refs(NULL),
     _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
     _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
     _discovering_refs(false),
--- a/src/share/vm/memory/universe.cpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/memory/universe.cpp	Thu Nov 10 04:46:08 2011 -0800
@@ -893,7 +893,7 @@
 
   } else if (UseG1GC) {
 #ifndef SERIALGC
-    G1CollectorPolicy* g1p = new G1CollectorPolicy_BestRegionsFirst();
+    G1CollectorPolicy* g1p = new G1CollectorPolicy();
     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
     Universe::_collectedHeap = g1h;
 #else  // SERIALGC
--- a/src/share/vm/precompiled.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,330 +0,0 @@
-/*
- * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// Precompiled headers are turned off for Sun Studion,
-// or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles.
-#ifndef DONT_USE_PRECOMPILED_HEADER
-
-# include "asm/assembler.hpp"
-# include "asm/assembler.inline.hpp"
-# include "asm/codeBuffer.hpp"
-# include "asm/register.hpp"
-# include "ci/ciArray.hpp"
-# include "ci/ciArrayKlass.hpp"
-# include "ci/ciClassList.hpp"
-# include "ci/ciConstant.hpp"
-# include "ci/ciConstantPoolCache.hpp"
-# include "ci/ciEnv.hpp"
-# include "ci/ciExceptionHandler.hpp"
-# include "ci/ciField.hpp"
-# include "ci/ciFlags.hpp"
-# include "ci/ciInstance.hpp"
-# include "ci/ciInstanceKlass.hpp"
-# include "ci/ciInstanceKlassKlass.hpp"
-# include "ci/ciKlass.hpp"
-# include "ci/ciKlassKlass.hpp"
-# include "ci/ciMethod.hpp"
-# include "ci/ciNullObject.hpp"
-# include "ci/ciObjArrayKlass.hpp"
-# include "ci/ciObject.hpp"
-# include "ci/ciObjectFactory.hpp"
-# include "ci/ciSignature.hpp"
-# include "ci/ciStreams.hpp"
-# include "ci/ciSymbol.hpp"
-# include "ci/ciType.hpp"
-# include "ci/ciTypeArrayKlass.hpp"
-# include "ci/ciUtilities.hpp"
-# include "ci/compilerInterface.hpp"
-# include "classfile/classFileParser.hpp"
-# include "classfile/classFileStream.hpp"
-# include "classfile/classLoader.hpp"
-# include "classfile/javaClasses.hpp"
-# include "classfile/symbolTable.hpp"
-# include "classfile/systemDictionary.hpp"
-# include "classfile/vmSymbols.hpp"
-# include "code/codeBlob.hpp"
-# include "code/codeCache.hpp"
-# include "code/compressedStream.hpp"
-# include "code/debugInfo.hpp"
-# include "code/debugInfoRec.hpp"
-# include "code/dependencies.hpp"
-# include "code/exceptionHandlerTable.hpp"
-# include "code/jvmticmlr.h"
-# include "code/location.hpp"
-# include "code/nmethod.hpp"
-# include "code/oopRecorder.hpp"
-# include "code/pcDesc.hpp"
-# include "code/relocInfo.hpp"
-# include "code/stubs.hpp"
-# include "code/vmreg.hpp"
-# include "compiler/disassembler.hpp"
-# include "compiler/methodLiveness.hpp"
-# include "compiler/oopMap.hpp"
-# include "gc_implementation/shared/adaptiveSizePolicy.hpp"
-# include "gc_implementation/shared/ageTable.hpp"
-# include "gc_implementation/shared/allocationStats.hpp"
-# include "gc_implementation/shared/cSpaceCounters.hpp"
-# include "gc_implementation/shared/collectorCounters.hpp"
-# include "gc_implementation/shared/gSpaceCounters.hpp"
-# include "gc_implementation/shared/gcStats.hpp"
-# include "gc_implementation/shared/gcUtil.hpp"
-# include "gc_implementation/shared/generationCounters.hpp"
-# include "gc_implementation/shared/immutableSpace.hpp"
-# include "gc_implementation/shared/markSweep.hpp"
-# include "gc_implementation/shared/markSweep.inline.hpp"
-# include "gc_implementation/shared/mutableSpace.hpp"
-# include "gc_implementation/shared/spaceCounters.hpp"
-# include "gc_implementation/shared/spaceDecorator.hpp"
-# include "gc_interface/collectedHeap.hpp"
-# include "gc_interface/collectedHeap.inline.hpp"
-# include "gc_interface/gcCause.hpp"
-# include "interpreter/abstractInterpreter.hpp"
-# include "interpreter/bytecode.hpp"
-# include "interpreter/bytecodeHistogram.hpp"
-# include "interpreter/bytecodeInterpreter.hpp"
-# include "interpreter/bytecodeInterpreter.inline.hpp"
-# include "interpreter/bytecodeTracer.hpp"
-# include "interpreter/bytecodes.hpp"
-# include "interpreter/cppInterpreter.hpp"
-# include "interpreter/interpreter.hpp"
-# include "interpreter/invocationCounter.hpp"
-# include "interpreter/linkResolver.hpp"
-# include "interpreter/templateInterpreter.hpp"
-# include "interpreter/templateTable.hpp"
-# include "jvmtifiles/jvmti.h"
-# include "memory/allocation.hpp"
-# include "memory/allocation.inline.hpp"
-# include "memory/barrierSet.hpp"
-# include "memory/barrierSet.inline.hpp"
-# include "memory/blockOffsetTable.hpp"
-# include "memory/blockOffsetTable.inline.hpp"
-# include "memory/cardTableModRefBS.hpp"
-# include "memory/collectorPolicy.hpp"
-# include "memory/compactingPermGenGen.hpp"
-# include "memory/defNewGeneration.hpp"
-# include "memory/gcLocker.hpp"
-# include "memory/genCollectedHeap.hpp"
-# include "memory/genOopClosures.hpp"
-# include "memory/genRemSet.hpp"
-# include "memory/generation.hpp"
-# include "memory/generation.inline.hpp"
-# include "memory/heap.hpp"
-# include "memory/iterator.hpp"
-# include "memory/memRegion.hpp"
-# include "memory/modRefBarrierSet.hpp"
-# include "memory/oopFactory.hpp"
-# include "memory/permGen.hpp"
-# include "memory/referencePolicy.hpp"
-# include "memory/referenceProcessor.hpp"
-# include "memory/resourceArea.hpp"
-# include "memory/sharedHeap.hpp"
-# include "memory/space.hpp"
-# include "memory/space.inline.hpp"
-# include "memory/specialized_oop_closures.hpp"
-# include "memory/threadLocalAllocBuffer.hpp"
-# include "memory/threadLocalAllocBuffer.inline.hpp"
-# include "memory/universe.hpp"
-# include "memory/universe.inline.hpp"
-# include "memory/watermark.hpp"
-# include "oops/arrayKlass.hpp"
-# include "oops/arrayOop.hpp"
-# include "oops/constMethodOop.hpp"
-# include "oops/constantPoolOop.hpp"
-# include "oops/cpCacheOop.hpp"
-# include "oops/instanceKlass.hpp"
-# include "oops/instanceOop.hpp"
-# include "oops/instanceRefKlass.hpp"
-# include "oops/klass.hpp"
-# include "oops/klassOop.hpp"
-# include "oops/klassPS.hpp"
-# include "oops/klassVtable.hpp"
-# include "oops/markOop.hpp"
-# include "oops/markOop.inline.hpp"
-# include "oops/methodDataOop.hpp"
-# include "oops/methodOop.hpp"
-# include "oops/objArrayKlass.hpp"
-# include "oops/objArrayOop.hpp"
-# include "oops/oop.hpp"
-# include "oops/oop.inline.hpp"
-# include "oops/oop.inline2.hpp"
-# include "oops/oopsHierarchy.hpp"
-# include "oops/symbol.hpp"
-# include "oops/typeArrayKlass.hpp"
-# include "oops/typeArrayOop.hpp"
-# include "prims/jni.h"
-# include "prims/jvm.h"
-# include "prims/jvmtiExport.hpp"
-# include "prims/methodHandles.hpp"
-# include "runtime/arguments.hpp"
-# include "runtime/atomic.hpp"
-# include "runtime/deoptimization.hpp"
-# include "runtime/extendedPC.hpp"
-# include "runtime/fieldDescriptor.hpp"
-# include "runtime/fieldType.hpp"
-# include "runtime/frame.hpp"
-# include "runtime/frame.inline.hpp"
-# include "runtime/globals.hpp"
-# include "runtime/globals_extension.hpp"
-# include "runtime/handles.hpp"
-# include "runtime/handles.inline.hpp"
-# include "runtime/icache.hpp"
-# include "runtime/init.hpp"
-# include "runtime/interfaceSupport.hpp"
-# include "runtime/java.hpp"
-# include "runtime/javaCalls.hpp"
-# include "runtime/javaFrameAnchor.hpp"
-# include "runtime/jniHandles.hpp"
-# include "runtime/monitorChunk.hpp"
-# include "runtime/mutex.hpp"
-# include "runtime/mutexLocker.hpp"
-# include "runtime/objectMonitor.hpp"
-# include "runtime/orderAccess.hpp"
-# include "runtime/os.hpp"
-# include "runtime/osThread.hpp"
-# include "runtime/perfData.hpp"
-# include "runtime/perfMemory.hpp"
-# include "runtime/prefetch.hpp"
-# include "runtime/reflection.hpp"
-# include "runtime/reflectionUtils.hpp"
-# include "runtime/registerMap.hpp"
-# include "runtime/safepoint.hpp"
-# include "runtime/sharedRuntime.hpp"
-# include "runtime/signature.hpp"
-# include "runtime/stackValue.hpp"
-# include "runtime/stackValueCollection.hpp"
-# include "runtime/stubCodeGenerator.hpp"
-# include "runtime/stubRoutines.hpp"
-# include "runtime/synchronizer.hpp"
-# include "runtime/thread.hpp"
-# include "runtime/threadLocalStorage.hpp"
-# include "runtime/timer.hpp"
-# include "runtime/unhandledOops.hpp"
-# include "runtime/vframe.hpp"
-# include "runtime/virtualspace.hpp"
-# include "runtime/vmThread.hpp"
-# include "runtime/vm_operations.hpp"
-# include "runtime/vm_version.hpp"
-# include "services/lowMemoryDetector.hpp"
-# include "services/memoryPool.hpp"
-# include "services/memoryService.hpp"
-# include "services/memoryUsage.hpp"
-# include "utilities/accessFlags.hpp"
-# include "utilities/array.hpp"
-# include "utilities/bitMap.hpp"
-# include "utilities/bitMap.inline.hpp"
-# include "utilities/constantTag.hpp"
-# include "utilities/copy.hpp"
-# include "utilities/debug.hpp"
-# include "utilities/exceptions.hpp"
-# include "utilities/globalDefinitions.hpp"
-# include "utilities/growableArray.hpp"
-# include "utilities/hashtable.hpp"
-# include "utilities/histogram.hpp"
-# include "utilities/macros.hpp"
-# include "utilities/numberSeq.hpp"
-# include "utilities/ostream.hpp"
-# include "utilities/preserveException.hpp"
-# include "utilities/sizes.hpp"
-# include "utilities/taskqueue.hpp"
-# include "utilities/top.hpp"
-# include "utilities/utf8.hpp"
-# include "utilities/workgroup.hpp"
-# include "utilities/yieldingWorkgroup.hpp"
-#ifdef COMPILER2
-# include "libadt/dict.hpp"
-# include "libadt/port.hpp"
-# include "libadt/set.hpp"
-# include "libadt/vectset.hpp"
-# include "opto/addnode.hpp"
-# include "opto/adlcVMDeps.hpp"
-# include "opto/block.hpp"
-# include "opto/c2_globals.hpp"
-# include "opto/callnode.hpp"
-# include "opto/cfgnode.hpp"
-# include "opto/compile.hpp"
-# include "opto/connode.hpp"
-# include "opto/idealGraphPrinter.hpp"
-# include "opto/loopnode.hpp"
-# include "opto/machnode.hpp"
-# include "opto/matcher.hpp"
-# include "opto/memnode.hpp"
-# include "opto/mulnode.hpp"
-# include "opto/multnode.hpp"
-# include "opto/node.hpp"
-# include "opto/opcodes.hpp"
-# include "opto/optoreg.hpp"
-# include "opto/phase.hpp"
-# include "opto/phaseX.hpp"
-# include "opto/regalloc.hpp"
-# include "opto/regmask.hpp"
-# include "opto/runtime.hpp"
-# include "opto/subnode.hpp"
-# include "opto/type.hpp"
-# include "opto/vectornode.hpp"
-#endif // COMPILER2
-#ifdef COMPILER1
-# include "c1/c1_Compilation.hpp"
-# include "c1/c1_Defs.hpp"
-# include "c1/c1_FrameMap.hpp"
-# include "c1/c1_LIR.hpp"
-# include "c1/c1_MacroAssembler.hpp"
-# include "c1/c1_ValueType.hpp"
-# include "c1/c1_globals.hpp"
-#endif // COMPILER1
-#ifndef SERIALGC
-# include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
-# include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
-# include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
-# include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
-# include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
-# include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-# include "gc_implementation/concurrentMarkSweep/freeList.hpp"
-# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
-# include "gc_implementation/g1/dirtyCardQueue.hpp"
-# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
-# include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
-# include "gc_implementation/g1/g1OopClosures.hpp"
-# include "gc_implementation/g1/g1_globals.hpp"
-# include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
-# include "gc_implementation/g1/ptrQueue.hpp"
-# include "gc_implementation/g1/satbQueue.hpp"
-# include "gc_implementation/parNew/parGCAllocBuffer.hpp"
-# include "gc_implementation/parNew/parOopClosures.hpp"
-# include "gc_implementation/parallelScavenge/objectStartArray.hpp"
-# include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
-# include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
-# include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
-# include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
-# include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
-# include "gc_implementation/parallelScavenge/psGenerationCounters.hpp"
-# include "gc_implementation/parallelScavenge/psOldGen.hpp"
-# include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
-# include "gc_implementation/parallelScavenge/psPermGen.hpp"
-# include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
-# include "gc_implementation/parallelScavenge/psYoungGen.hpp"
-# include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
-# include "gc_implementation/shared/gcPolicyCounters.hpp"
-#endif // SERIALGC
-
-#endif // !DONT_USE_PRECOMPILED_HEADER
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/precompiled/precompiled.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Precompiled headers are turned off for Sun Studion,
+// or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles.
+#ifndef DONT_USE_PRECOMPILED_HEADER
+
+# include "asm/assembler.hpp"
+# include "asm/assembler.inline.hpp"
+# include "asm/codeBuffer.hpp"
+# include "asm/register.hpp"
+# include "ci/ciArray.hpp"
+# include "ci/ciArrayKlass.hpp"
+# include "ci/ciClassList.hpp"
+# include "ci/ciConstant.hpp"
+# include "ci/ciConstantPoolCache.hpp"
+# include "ci/ciEnv.hpp"
+# include "ci/ciExceptionHandler.hpp"
+# include "ci/ciField.hpp"
+# include "ci/ciFlags.hpp"
+# include "ci/ciInstance.hpp"
+# include "ci/ciInstanceKlass.hpp"
+# include "ci/ciInstanceKlassKlass.hpp"
+# include "ci/ciKlass.hpp"
+# include "ci/ciKlassKlass.hpp"
+# include "ci/ciMethod.hpp"
+# include "ci/ciNullObject.hpp"
+# include "ci/ciObjArrayKlass.hpp"
+# include "ci/ciObject.hpp"
+# include "ci/ciObjectFactory.hpp"
+# include "ci/ciSignature.hpp"
+# include "ci/ciStreams.hpp"
+# include "ci/ciSymbol.hpp"
+# include "ci/ciType.hpp"
+# include "ci/ciTypeArrayKlass.hpp"
+# include "ci/ciUtilities.hpp"
+# include "ci/compilerInterface.hpp"
+# include "classfile/classFileParser.hpp"
+# include "classfile/classFileStream.hpp"
+# include "classfile/classLoader.hpp"
+# include "classfile/javaClasses.hpp"
+# include "classfile/symbolTable.hpp"
+# include "classfile/systemDictionary.hpp"
+# include "classfile/vmSymbols.hpp"
+# include "code/codeBlob.hpp"
+# include "code/codeCache.hpp"
+# include "code/compressedStream.hpp"
+# include "code/debugInfo.hpp"
+# include "code/debugInfoRec.hpp"
+# include "code/dependencies.hpp"
+# include "code/exceptionHandlerTable.hpp"
+# include "code/jvmticmlr.h"
+# include "code/location.hpp"
+# include "code/nmethod.hpp"
+# include "code/oopRecorder.hpp"
+# include "code/pcDesc.hpp"
+# include "code/relocInfo.hpp"
+# include "code/stubs.hpp"
+# include "code/vmreg.hpp"
+# include "compiler/disassembler.hpp"
+# include "compiler/methodLiveness.hpp"
+# include "compiler/oopMap.hpp"
+# include "gc_implementation/shared/adaptiveSizePolicy.hpp"
+# include "gc_implementation/shared/ageTable.hpp"
+# include "gc_implementation/shared/allocationStats.hpp"
+# include "gc_implementation/shared/cSpaceCounters.hpp"
+# include "gc_implementation/shared/collectorCounters.hpp"
+# include "gc_implementation/shared/gSpaceCounters.hpp"
+# include "gc_implementation/shared/gcStats.hpp"
+# include "gc_implementation/shared/gcUtil.hpp"
+# include "gc_implementation/shared/generationCounters.hpp"
+# include "gc_implementation/shared/immutableSpace.hpp"
+# include "gc_implementation/shared/markSweep.hpp"
+# include "gc_implementation/shared/markSweep.inline.hpp"
+# include "gc_implementation/shared/mutableSpace.hpp"
+# include "gc_implementation/shared/spaceCounters.hpp"
+# include "gc_implementation/shared/spaceDecorator.hpp"
+# include "gc_interface/collectedHeap.hpp"
+# include "gc_interface/collectedHeap.inline.hpp"
+# include "gc_interface/gcCause.hpp"
+# include "interpreter/abstractInterpreter.hpp"
+# include "interpreter/bytecode.hpp"
+# include "interpreter/bytecodeHistogram.hpp"
+# include "interpreter/bytecodeInterpreter.hpp"
+# include "interpreter/bytecodeInterpreter.inline.hpp"
+# include "interpreter/bytecodeTracer.hpp"
+# include "interpreter/bytecodes.hpp"
+# include "interpreter/cppInterpreter.hpp"
+# include "interpreter/interpreter.hpp"
+# include "interpreter/invocationCounter.hpp"
+# include "interpreter/linkResolver.hpp"
+# include "interpreter/templateInterpreter.hpp"
+# include "interpreter/templateTable.hpp"
+# include "jvmtifiles/jvmti.h"
+# include "memory/allocation.hpp"
+# include "memory/allocation.inline.hpp"
+# include "memory/barrierSet.hpp"
+# include "memory/barrierSet.inline.hpp"
+# include "memory/blockOffsetTable.hpp"
+# include "memory/blockOffsetTable.inline.hpp"
+# include "memory/cardTableModRefBS.hpp"
+# include "memory/collectorPolicy.hpp"
+# include "memory/compactingPermGenGen.hpp"
+# include "memory/defNewGeneration.hpp"
+# include "memory/gcLocker.hpp"
+# include "memory/genCollectedHeap.hpp"
+# include "memory/genOopClosures.hpp"
+# include "memory/genRemSet.hpp"
+# include "memory/generation.hpp"
+# include "memory/generation.inline.hpp"
+# include "memory/heap.hpp"
+# include "memory/iterator.hpp"
+# include "memory/memRegion.hpp"
+# include "memory/modRefBarrierSet.hpp"
+# include "memory/oopFactory.hpp"
+# include "memory/permGen.hpp"
+# include "memory/referencePolicy.hpp"
+# include "memory/referenceProcessor.hpp"
+# include "memory/resourceArea.hpp"
+# include "memory/sharedHeap.hpp"
+# include "memory/space.hpp"
+# include "memory/space.inline.hpp"
+# include "memory/specialized_oop_closures.hpp"
+# include "memory/threadLocalAllocBuffer.hpp"
+# include "memory/threadLocalAllocBuffer.inline.hpp"
+# include "memory/universe.hpp"
+# include "memory/universe.inline.hpp"
+# include "memory/watermark.hpp"
+# include "oops/arrayKlass.hpp"
+# include "oops/arrayOop.hpp"
+# include "oops/constMethodOop.hpp"
+# include "oops/constantPoolOop.hpp"
+# include "oops/cpCacheOop.hpp"
+# include "oops/instanceKlass.hpp"
+# include "oops/instanceOop.hpp"
+# include "oops/instanceRefKlass.hpp"
+# include "oops/klass.hpp"
+# include "oops/klassOop.hpp"
+# include "oops/klassPS.hpp"
+# include "oops/klassVtable.hpp"
+# include "oops/markOop.hpp"
+# include "oops/markOop.inline.hpp"
+# include "oops/methodDataOop.hpp"
+# include "oops/methodOop.hpp"
+# include "oops/objArrayKlass.hpp"
+# include "oops/objArrayOop.hpp"
+# include "oops/oop.hpp"
+# include "oops/oop.inline.hpp"
+# include "oops/oop.inline2.hpp"
+# include "oops/oopsHierarchy.hpp"
+# include "oops/symbol.hpp"
+# include "oops/typeArrayKlass.hpp"
+# include "oops/typeArrayOop.hpp"
+# include "prims/jni.h"
+# include "prims/jvm.h"
+# include "prims/jvmtiExport.hpp"
+# include "prims/methodHandles.hpp"
+# include "runtime/arguments.hpp"
+# include "runtime/atomic.hpp"
+# include "runtime/deoptimization.hpp"
+# include "runtime/extendedPC.hpp"
+# include "runtime/fieldDescriptor.hpp"
+# include "runtime/fieldType.hpp"
+# include "runtime/frame.hpp"
+# include "runtime/frame.inline.hpp"
+# include "runtime/globals.hpp"
+# include "runtime/globals_extension.hpp"
+# include "runtime/handles.hpp"
+# include "runtime/handles.inline.hpp"
+# include "runtime/icache.hpp"
+# include "runtime/init.hpp"
+# include "runtime/interfaceSupport.hpp"
+# include "runtime/java.hpp"
+# include "runtime/javaCalls.hpp"
+# include "runtime/javaFrameAnchor.hpp"
+# include "runtime/jniHandles.hpp"
+# include "runtime/monitorChunk.hpp"
+# include "runtime/mutex.hpp"
+# include "runtime/mutexLocker.hpp"
+# include "runtime/objectMonitor.hpp"
+# include "runtime/orderAccess.hpp"
+# include "runtime/os.hpp"
+# include "runtime/osThread.hpp"
+# include "runtime/perfData.hpp"
+# include "runtime/perfMemory.hpp"
+# include "runtime/prefetch.hpp"
+# include "runtime/reflection.hpp"
+# include "runtime/reflectionUtils.hpp"
+# include "runtime/registerMap.hpp"
+# include "runtime/safepoint.hpp"
+# include "runtime/sharedRuntime.hpp"
+# include "runtime/signature.hpp"
+# include "runtime/stackValue.hpp"
+# include "runtime/stackValueCollection.hpp"
+# include "runtime/stubCodeGenerator.hpp"
+# include "runtime/stubRoutines.hpp"
+# include "runtime/synchronizer.hpp"
+# include "runtime/thread.hpp"
+# include "runtime/threadLocalStorage.hpp"
+# include "runtime/timer.hpp"
+# include "runtime/unhandledOops.hpp"
+# include "runtime/vframe.hpp"
+# include "runtime/virtualspace.hpp"
+# include "runtime/vmThread.hpp"
+# include "runtime/vm_operations.hpp"
+# include "runtime/vm_version.hpp"
+# include "services/lowMemoryDetector.hpp"
+# include "services/memoryPool.hpp"
+# include "services/memoryService.hpp"
+# include "services/memoryUsage.hpp"
+# include "utilities/accessFlags.hpp"
+# include "utilities/array.hpp"
+# include "utilities/bitMap.hpp"
+# include "utilities/bitMap.inline.hpp"
+# include "utilities/constantTag.hpp"
+# include "utilities/copy.hpp"
+# include "utilities/debug.hpp"
+# include "utilities/exceptions.hpp"
+# include "utilities/globalDefinitions.hpp"
+# include "utilities/growableArray.hpp"
+# include "utilities/hashtable.hpp"
+# include "utilities/histogram.hpp"
+# include "utilities/macros.hpp"
+# include "utilities/numberSeq.hpp"
+# include "utilities/ostream.hpp"
+# include "utilities/preserveException.hpp"
+# include "utilities/sizes.hpp"
+# include "utilities/taskqueue.hpp"
+# include "utilities/top.hpp"
+# include "utilities/utf8.hpp"
+# include "utilities/workgroup.hpp"
+# include "utilities/yieldingWorkgroup.hpp"
+#ifdef COMPILER2
+# include "libadt/dict.hpp"
+# include "libadt/port.hpp"
+# include "libadt/set.hpp"
+# include "libadt/vectset.hpp"
+# include "opto/addnode.hpp"
+# include "opto/adlcVMDeps.hpp"
+# include "opto/block.hpp"
+# include "opto/c2_globals.hpp"
+# include "opto/callnode.hpp"
+# include "opto/cfgnode.hpp"
+# include "opto/compile.hpp"
+# include "opto/connode.hpp"
+# include "opto/idealGraphPrinter.hpp"
+# include "opto/loopnode.hpp"
+# include "opto/machnode.hpp"
+# include "opto/matcher.hpp"
+# include "opto/memnode.hpp"
+# include "opto/mulnode.hpp"
+# include "opto/multnode.hpp"
+# include "opto/node.hpp"
+# include "opto/opcodes.hpp"
+# include "opto/optoreg.hpp"
+# include "opto/phase.hpp"
+# include "opto/phaseX.hpp"
+# include "opto/regalloc.hpp"
+# include "opto/regmask.hpp"
+# include "opto/runtime.hpp"
+# include "opto/subnode.hpp"
+# include "opto/type.hpp"
+# include "opto/vectornode.hpp"
+#endif // COMPILER2
+#ifdef COMPILER1
+# include "c1/c1_Compilation.hpp"
+# include "c1/c1_Defs.hpp"
+# include "c1/c1_FrameMap.hpp"
+# include "c1/c1_LIR.hpp"
+# include "c1/c1_MacroAssembler.hpp"
+# include "c1/c1_ValueType.hpp"
+# include "c1/c1_globals.hpp"
+#endif // COMPILER1
+#ifndef SERIALGC
+# include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
+# include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
+# include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
+# include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
+# include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
+# include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
+# include "gc_implementation/concurrentMarkSweep/freeList.hpp"
+# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
+# include "gc_implementation/g1/dirtyCardQueue.hpp"
+# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
+# include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+# include "gc_implementation/g1/g1OopClosures.hpp"
+# include "gc_implementation/g1/g1_globals.hpp"
+# include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
+# include "gc_implementation/g1/ptrQueue.hpp"
+# include "gc_implementation/g1/satbQueue.hpp"
+# include "gc_implementation/parNew/parGCAllocBuffer.hpp"
+# include "gc_implementation/parNew/parOopClosures.hpp"
+# include "gc_implementation/parallelScavenge/objectStartArray.hpp"
+# include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
+# include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
+# include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
+# include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
+# include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
+# include "gc_implementation/parallelScavenge/psGenerationCounters.hpp"
+# include "gc_implementation/parallelScavenge/psOldGen.hpp"
+# include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+# include "gc_implementation/parallelScavenge/psPermGen.hpp"
+# include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
+# include "gc_implementation/parallelScavenge/psYoungGen.hpp"
+# include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
+# include "gc_implementation/shared/gcPolicyCounters.hpp"
+#endif // SERIALGC
+
+#endif // !DONT_USE_PRECOMPILED_HEADER
--- a/src/share/vm/runtime/globals.hpp	Wed Nov 09 07:25:51 2011 -0800
+++ b/src/share/vm/runtime/globals.hpp	Thu Nov 10 04:46:08 2011 -0800
@@ -2580,7 +2580,7 @@
   diagnostic(bool, DebugInlinedCalls, true,                                 \
          "If false, restricts profiled locations to the root method only")  \
                                                                             \
-  product(bool, PrintVMOptions, trueInDebug,                                \
+  product(bool, PrintVMOptions, NOT_EMBEDDED(trueInDebug) EMBEDDED_ONLY(false),\
          "Print flags that appeared on the command line")                   \
                                                                             \
   product(bool, IgnoreUnrecognizedVMOptions, false,                         \