changeset 4586:dedc8563e33d

Merge
author bharadwaj
date Thu, 18 Apr 2013 16:04:33 -0700
parents cbf8c8c25bbe (diff) a7fb14888912 (current diff)
children 2a9d97b57920
files make/bsd/makefiles/jvmg.make make/bsd/makefiles/profiled.make make/linux/makefiles/jvmg.make make/linux/makefiles/profiled.make make/solaris/makefiles/jvmg.make make/solaris/makefiles/profiled.make src/os/bsd/vm/chaitin_bsd.cpp src/os/linux/vm/chaitin_linux.cpp src/os/solaris/vm/chaitin_solaris.cpp src/os/windows/vm/chaitin_windows.cpp
diffstat 12 files changed, 349 insertions(+), 376 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -63,6 +63,13 @@
                                 noreg /* pre_val */,
                                 tmp, true /*preserve_o_regs*/);
 
+        // G1 barrier needs uncompressed oop for region cross check.
+        Register new_val = val;
+        if (UseCompressedOops && val != G0) {
+          new_val = tmp;
+          __ mov(val, new_val);
+        }
+
         if (index == noreg ) {
           assert(Assembler::is_simm13(offset), "fix this code");
           __ store_heap_oop(val, base, offset);
@@ -79,7 +86,7 @@
               __ add(base, index, base);
             }
           }
-          __ g1_write_barrier_post(base, val, tmp);
+          __ g1_write_barrier_post(base, new_val, tmp);
         }
       }
       break;
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -158,14 +158,19 @@
         if (val == noreg) {
           __ store_heap_oop_null(Address(rdx, 0));
         } else {
+          // G1 barrier needs uncompressed oop for region cross check.
+          Register new_val = val;
+          if (UseCompressedOops) {
+            new_val = rbx;
+            __ movptr(new_val, val);
+          }
           __ store_heap_oop(Address(rdx, 0), val);
           __ g1_write_barrier_post(rdx /* store_adr */,
-                                   val /* new_val */,
+                                   new_val /* new_val */,
                                    r15_thread /* thread */,
                                    r8 /* tmp */,
                                    rbx /* tmp2 */);
         }
-
       }
       break;
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1322,233 +1322,239 @@
     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
-    TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
-    TraceCollectorStats tcs(g1mm()->full_collection_counters());
-    TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
-
-    double start = os::elapsedTime();
-    g1_policy()->record_full_collection_start();
-
-    // Note: When we have a more flexible GC logging framework that
-    // allows us to add optional attributes to a GC log record we
-    // could consider timing and reporting how long we wait in the
-    // following two methods.
-    wait_while_free_regions_coming();
-    // If we start the compaction before the CM threads finish
-    // scanning the root regions we might trip them over as we'll
-    // be moving objects / updating references. So let's wait until
-    // they are done. By telling them to abort, they should complete
-    // early.
-    _cm->root_regions()->abort();
-    _cm->root_regions()->wait_until_scan_finished();
-    append_secondary_free_list_if_not_empty_with_lock();
-
-    gc_prologue(true);
-    increment_total_collections(true /* full gc */);
-    increment_old_marking_cycles_started();
-
-    size_t g1h_prev_used = used();
-    assert(used() == recalculate_used(), "Should be equal");
-
-    verify_before_gc();
-
-    pre_full_gc_dump();
-
-    COMPILER2_PRESENT(DerivedPointerTable::clear());
-
-    // Disable discovery and empty the discovered lists
-    // for the CM ref processor.
-    ref_processor_cm()->disable_discovery();
-    ref_processor_cm()->abandon_partial_discovery();
-    ref_processor_cm()->verify_no_references_recorded();
-
-    // Abandon current iterations of concurrent marking and concurrent
-    // refinement, if any are in progress. We have to do this before
-    // wait_until_scan_finished() below.
-    concurrent_mark()->abort();
-
-    // Make sure we'll choose a new allocation region afterwards.
-    release_mutator_alloc_region();
-    abandon_gc_alloc_regions();
-    g1_rem_set()->cleanupHRRS();
-
-    // We should call this after we retire any currently active alloc
-    // regions so that all the ALLOC / RETIRE events are generated
-    // before the start GC event.
-    _hr_printer.start_gc(true /* full */, (size_t) total_collections());
-
-    // We may have added regions to the current incremental collection
-    // set between the last GC or pause and now. We need to clear the
-    // incremental collection set and then start rebuilding it afresh
-    // after this full GC.
-    abandon_collection_set(g1_policy()->inc_cset_head());
-    g1_policy()->clear_incremental_cset();
-    g1_policy()->stop_incremental_cset_building();
-
-    tear_down_region_sets(false /* free_list_only */);
-    g1_policy()->set_gcs_are_young(true);
-
-    // See the comments in g1CollectedHeap.hpp and
-    // G1CollectedHeap::ref_processing_init() about
-    // how reference processing currently works in G1.
-
-    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
-    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
-
-    // Temporarily clear the STW ref processor's _is_alive_non_header field.
-    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
-
-    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
-    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
-
-    // Do collection work
     {
-      HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
-    }
-
-    assert(free_regions() == 0, "we should not have added any free regions");
-    rebuild_region_sets(false /* free_list_only */);
-
-    // Enqueue any discovered reference objects that have
-    // not been removed from the discovered lists.
-    ref_processor_stw()->enqueue_discovered_references();
-
-    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
-
-    MemoryService::track_memory_usage();
-
-    verify_after_gc();
-
-    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
-    ref_processor_stw()->verify_no_references_recorded();
-
-    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
-    ClassLoaderDataGraph::purge();
-
-    // Note: since we've just done a full GC, concurrent
-    // marking is no longer active. Therefore we need not
-    // re-enable reference discovery for the CM ref processor.
-    // That will be done at the start of the next marking cycle.
-    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
-    ref_processor_cm()->verify_no_references_recorded();
-
-    reset_gc_time_stamp();
-    // Since everything potentially moved, we will clear all remembered
-    // sets, and clear all cards.  Later we will rebuild remebered
-    // sets. We will also reset the GC time stamps of the regions.
-    clear_rsets_post_compaction();
-    check_gc_time_stamps();
-
-    // Resize the heap if necessary.
-    resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
-
-    if (_hr_printer.is_active()) {
-      // We should do this after we potentially resize the heap so
-      // that all the COMMIT / UNCOMMIT events are generated before
-      // the end GC event.
-
-      print_hrs_post_compaction();
-      _hr_printer.end_gc(true /* full */, (size_t) total_collections());
-    }
-
-    if (_cg1r->use_cache()) {
-      _cg1r->clear_and_record_card_counts();
-      _cg1r->clear_hot_cache();
-    }
-
-    // Rebuild remembered sets of all regions.
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      uint n_workers =
-        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
-                                       workers()->active_workers(),
-                                       Threads::number_of_non_daemon_threads());
-      assert(UseDynamicNumberOfGCThreads ||
-             n_workers == workers()->total_workers(),
-             "If not dynamic should be using all the  workers");
-      workers()->set_active_workers(n_workers);
-      // Set parallel threads in the heap (_n_par_threads) only
-      // before a parallel phase and always reset it to 0 after
-      // the phase so that the number of parallel threads does
-      // no get carried forward to a serial phase where there
-      // may be code that is "possibly_parallel".
-      set_par_threads(n_workers);
-
-      ParRebuildRSTask rebuild_rs_task(this);
-      assert(check_heap_region_claim_values(
-             HeapRegion::InitialClaimValue), "sanity check");
-      assert(UseDynamicNumberOfGCThreads ||
-             workers()->active_workers() == workers()->total_workers(),
-        "Unless dynamic should use total workers");
-      // Use the most recent number of  active workers
-      assert(workers()->active_workers() > 0,
-        "Active workers not properly set");
-      set_par_threads(workers()->active_workers());
-      workers()->run_task(&rebuild_rs_task);
-      set_par_threads(0);
-      assert(check_heap_region_claim_values(
-             HeapRegion::RebuildRSClaimValue), "sanity check");
-      reset_heap_region_claim_values();
-    } else {
-      RebuildRSOutOfRegionClosure rebuild_rs(this);
-      heap_region_iterate(&rebuild_rs);
-    }
-
-    if (G1Log::fine()) {
-      print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
-    }
-
-    if (true) { // FIXME
-      MetaspaceGC::compute_new_size();
-    }
-
-    // Start a new incremental collection set for the next pause
-    assert(g1_policy()->collection_set() == NULL, "must be");
-    g1_policy()->start_incremental_cset_building();
-
-    // Clear the _cset_fast_test bitmap in anticipation of adding
-    // regions to the incremental collection set for the next
-    // evacuation pause.
-    clear_cset_fast_test();
-
-    init_mutator_alloc_region();
-
-    double end = os::elapsedTime();
-    g1_policy()->record_full_collection_end();
+      TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
+      TraceCollectorStats tcs(g1mm()->full_collection_counters());
+      TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
+
+      double start = os::elapsedTime();
+      g1_policy()->record_full_collection_start();
+
+      // Note: When we have a more flexible GC logging framework that
+      // allows us to add optional attributes to a GC log record we
+      // could consider timing and reporting how long we wait in the
+      // following two methods.
+      wait_while_free_regions_coming();
+      // If we start the compaction before the CM threads finish
+      // scanning the root regions we might trip them over as we'll
+      // be moving objects / updating references. So let's wait until
+      // they are done. By telling them to abort, they should complete
+      // early.
+      _cm->root_regions()->abort();
+      _cm->root_regions()->wait_until_scan_finished();
+      append_secondary_free_list_if_not_empty_with_lock();
+
+      gc_prologue(true);
+      increment_total_collections(true /* full gc */);
+      increment_old_marking_cycles_started();
+
+      assert(used() == recalculate_used(), "Should be equal");
+
+      verify_before_gc();
+
+      pre_full_gc_dump();
+
+      COMPILER2_PRESENT(DerivedPointerTable::clear());
+
+      // Disable discovery and empty the discovered lists
+      // for the CM ref processor.
+      ref_processor_cm()->disable_discovery();
+      ref_processor_cm()->abandon_partial_discovery();
+      ref_processor_cm()->verify_no_references_recorded();
+
+      // Abandon current iterations of concurrent marking and concurrent
+      // refinement, if any are in progress. We have to do this before
+      // wait_until_scan_finished() below.
+      concurrent_mark()->abort();
+
+      // Make sure we'll choose a new allocation region afterwards.
+      release_mutator_alloc_region();
+      abandon_gc_alloc_regions();
+      g1_rem_set()->cleanupHRRS();
+
+      // We should call this after we retire any currently active alloc
+      // regions so that all the ALLOC / RETIRE events are generated
+      // before the start GC event.
+      _hr_printer.start_gc(true /* full */, (size_t) total_collections());
+
+      // We may have added regions to the current incremental collection
+      // set between the last GC or pause and now. We need to clear the
+      // incremental collection set and then start rebuilding it afresh
+      // after this full GC.
+      abandon_collection_set(g1_policy()->inc_cset_head());
+      g1_policy()->clear_incremental_cset();
+      g1_policy()->stop_incremental_cset_building();
+
+      tear_down_region_sets(false /* free_list_only */);
+      g1_policy()->set_gcs_are_young(true);
+
+      // See the comments in g1CollectedHeap.hpp and
+      // G1CollectedHeap::ref_processing_init() about
+      // how reference processing currently works in G1.
+
+      // Temporarily make discovery by the STW ref processor single threaded (non-MT).
+      ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
+
+      // Temporarily clear the STW ref processor's _is_alive_non_header field.
+      ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
+
+      ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+      ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
+
+      // Do collection work
+      {
+        HandleMark hm;  // Discard invalid handles created during gc
+        G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
+      }
+
+      assert(free_regions() == 0, "we should not have added any free regions");
+      rebuild_region_sets(false /* free_list_only */);
+
+      // Enqueue any discovered reference objects that have
+      // not been removed from the discovered lists.
+      ref_processor_stw()->enqueue_discovered_references();
+
+      COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+
+      MemoryService::track_memory_usage();
+
+      verify_after_gc();
+
+      assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
+      ref_processor_stw()->verify_no_references_recorded();
+
+      // Delete metaspaces for unloaded class loaders and clean up loader_data graph
+      ClassLoaderDataGraph::purge();
+
+      // Note: since we've just done a full GC, concurrent
+      // marking is no longer active. Therefore we need not
+      // re-enable reference discovery for the CM ref processor.
+      // That will be done at the start of the next marking cycle.
+      assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
+      ref_processor_cm()->verify_no_references_recorded();
+
+      reset_gc_time_stamp();
+      // Since everything potentially moved, we will clear all remembered
+      // sets, and clear all cards.  Later we will rebuild remebered
+      // sets. We will also reset the GC time stamps of the regions.
+      clear_rsets_post_compaction();
+      check_gc_time_stamps();
+
+      // Resize the heap if necessary.
+      resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
+
+      if (_hr_printer.is_active()) {
+        // We should do this after we potentially resize the heap so
+        // that all the COMMIT / UNCOMMIT events are generated before
+        // the end GC event.
+
+        print_hrs_post_compaction();
+        _hr_printer.end_gc(true /* full */, (size_t) total_collections());
+      }
+
+      if (_cg1r->use_cache()) {
+        _cg1r->clear_and_record_card_counts();
+        _cg1r->clear_hot_cache();
+      }
+
+      // Rebuild remembered sets of all regions.
+      if (G1CollectedHeap::use_parallel_gc_threads()) {
+        uint n_workers =
+          AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
+                                                  workers()->active_workers(),
+                                                  Threads::number_of_non_daemon_threads());
+        assert(UseDynamicNumberOfGCThreads ||
+               n_workers == workers()->total_workers(),
+               "If not dynamic should be using all the  workers");
+        workers()->set_active_workers(n_workers);
+        // Set parallel threads in the heap (_n_par_threads) only
+        // before a parallel phase and always reset it to 0 after
+        // the phase so that the number of parallel threads does
+        // no get carried forward to a serial phase where there
+        // may be code that is "possibly_parallel".
+        set_par_threads(n_workers);
+
+        ParRebuildRSTask rebuild_rs_task(this);
+        assert(check_heap_region_claim_values(
+               HeapRegion::InitialClaimValue), "sanity check");
+        assert(UseDynamicNumberOfGCThreads ||
+               workers()->active_workers() == workers()->total_workers(),
+               "Unless dynamic should use total workers");
+        // Use the most recent number of  active workers
+        assert(workers()->active_workers() > 0,
+               "Active workers not properly set");
+        set_par_threads(workers()->active_workers());
+        workers()->run_task(&rebuild_rs_task);
+        set_par_threads(0);
+        assert(check_heap_region_claim_values(
+               HeapRegion::RebuildRSClaimValue), "sanity check");
+        reset_heap_region_claim_values();
+      } else {
+        RebuildRSOutOfRegionClosure rebuild_rs(this);
+        heap_region_iterate(&rebuild_rs);
+      }
+
+      if (true) { // FIXME
+        MetaspaceGC::compute_new_size();
+      }
 
 #ifdef TRACESPINNING
-    ParallelTaskTerminator::print_termination_counts();
+      ParallelTaskTerminator::print_termination_counts();
 #endif
 
-    gc_epilogue(true);
-
-    // Discard all rset updates
-    JavaThread::dirty_card_queue_set().abandon_logs();
-    assert(!G1DeferredRSUpdate
-           || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
-
-    _young_list->reset_sampled_info();
-    // At this point there should be no regions in the
-    // entire heap tagged as young.
-    assert( check_young_list_empty(true /* check_heap */),
-      "young list should be empty at this point");
-
-    // Update the number of full collections that have been completed.
-    increment_old_marking_cycles_completed(false /* concurrent */);
-
-    _hrs.verify_optional();
-    verify_region_sets_optional();
+      // Discard all rset updates
+      JavaThread::dirty_card_queue_set().abandon_logs();
+      assert(!G1DeferredRSUpdate
+             || (G1DeferredRSUpdate &&
+                (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
+
+      _young_list->reset_sampled_info();
+      // At this point there should be no regions in the
+      // entire heap tagged as young.
+      assert(check_young_list_empty(true /* check_heap */),
+             "young list should be empty at this point");
+
+      // Update the number of full collections that have been completed.
+      increment_old_marking_cycles_completed(false /* concurrent */);
+
+      _hrs.verify_optional();
+      verify_region_sets_optional();
+
+      // Start a new incremental collection set for the next pause
+      assert(g1_policy()->collection_set() == NULL, "must be");
+      g1_policy()->start_incremental_cset_building();
+
+      // Clear the _cset_fast_test bitmap in anticipation of adding
+      // regions to the incremental collection set for the next
+      // evacuation pause.
+      clear_cset_fast_test();
+
+      init_mutator_alloc_region();
+
+      double end = os::elapsedTime();
+      g1_policy()->record_full_collection_end();
+
+      if (G1Log::fine()) {
+        g1_policy()->print_heap_transition();
+      }
+
+      // We must call G1MonitoringSupport::update_sizes() in the same scoping level
+      // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
+      // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
+      // before any GC notifications are raised.
+      g1mm()->update_sizes();
+
+      gc_epilogue(true);
+    }
+
+    if (G1Log::finer()) {
+      g1_policy()->print_detailed_heap_transition();
+    }
 
     print_heap_after_gc();
 
-    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
-    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
-    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
-    // before any GC notifications are raised.
-    g1mm()->update_sizes();
-  }
-
-  post_full_gc_dump();
+    post_full_gc_dump();
+  }
 
   return true;
 }
@@ -3838,7 +3844,6 @@
         // The elapsed time induced by the start time below deliberately elides
         // the possible verification above.
         double sample_start_time_sec = os::elapsedTime();
-        size_t start_used_bytes = used();
 
 #if YOUNG_LIST_VERBOSE
         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
@@ -3846,8 +3851,7 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->record_collection_pause_start(sample_start_time_sec,
-                                                   start_used_bytes);
+        g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
         double scan_wait_start = os::elapsedTime();
         // We have to wait until the CM threads finish scanning the
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -406,7 +406,6 @@
   }
   _free_regions_at_end_of_collection = _g1->free_regions();
   update_young_list_target_length();
-  _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
 
   // We may immediately start allocating regions and placing them on the
   // collection set list. Initialize the per-collection set info
@@ -746,6 +745,7 @@
 
 void G1CollectorPolicy::record_full_collection_start() {
   _full_collection_start_sec = os::elapsedTime();
+  record_heap_size_info_at_start();
   // Release the future to-space so that it is available for compaction into.
   _g1->set_full_collection();
 }
@@ -788,8 +788,7 @@
   _stop_world_start = os::elapsedTime();
 }
 
-void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
-                                                      size_t start_used) {
+void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
   // We only need to do this here as the policy will only be applied
   // to the GC we're about to start. so, no point is calculating this
   // every time we calculate / recalculate the target young length.
@@ -803,19 +802,14 @@
   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
   _stop_world_start = 0.0;
 
+  record_heap_size_info_at_start();
+
   phase_times()->record_cur_collection_start_sec(start_time_sec);
-  _cur_collection_pause_used_at_start_bytes = start_used;
-  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   _pending_cards = _g1->pending_card_num();
 
   _collection_set_bytes_used_before = 0;
   _bytes_copied_during_gc = 0;
 
-  YoungList* young_list = _g1->young_list();
-  _eden_bytes_before_gc = young_list->eden_used_bytes();
-  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
-  _capacity_before_gc = _g1->capacity();
-
   _last_gc_was_young = false;
 
   // do that for any other surv rate groups
@@ -1153,6 +1147,21 @@
   byte_size_in_proper_unit((double)(bytes)),                    \
   proper_unit_for_byte_size((bytes))
 
+void G1CollectorPolicy::record_heap_size_info_at_start() {
+  YoungList* young_list = _g1->young_list();
+  _eden_bytes_before_gc = young_list->eden_used_bytes();
+  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
+  _capacity_before_gc = _g1->capacity();
+
+  _cur_collection_pause_used_at_start_bytes = _g1->used();
+  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
+
+  size_t eden_capacity_before_gc =
+         (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
+
+  _prev_eden_capacity = eden_capacity_before_gc;
+}
+
 void G1CollectorPolicy::print_heap_transition() {
   _g1->print_size_transition(gclog_or_tty,
     _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
@@ -1183,8 +1192,6 @@
       EXT_SIZE_PARAMS(_capacity_before_gc),
       EXT_SIZE_PARAMS(used),
       EXT_SIZE_PARAMS(capacity));
-
-    _prev_eden_capacity = eden_capacity;
 }
 
 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Apr 18 16:04:33 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -671,34 +671,36 @@
 
   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
 
-  // Update the heuristic info to record a collection pause of the given
-  // start time, where the given number of bytes were used at the start.
-  // This may involve changing the desired size of a collection set.
+  // Record the start and end of an evacuation pause.
+  void record_collection_pause_start(double start_time_sec);
+  void record_collection_pause_end(double pause_time_ms);
 
-  void record_stop_world_start();
-
-  void record_collection_pause_start(double start_time_sec, size_t start_used);
+  // Record the start and end of a full collection.
+  void record_full_collection_start();
+  void record_full_collection_end();
 
   // Must currently be called while the world is stopped.
-  void record_concurrent_mark_init_end(double
-                                           mark_init_elapsed_time_ms);
+  void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
 
+  // Record start and end of remark.
   void record_concurrent_mark_remark_start();
   void record_concurrent_mark_remark_end();
 
+  // Record start, end, and completion of cleanup.
   void record_concurrent_mark_cleanup_start();
   void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
   void record_concurrent_mark_cleanup_completed();
 
-  void record_concurrent_pause();
+  // Records the information about the heap size for reporting in
+  // print_detailed_heap_transition
+  void record_heap_size_info_at_start();
 
-  void record_collection_pause_end(double pause_time);
+  // Print heap sizing transition (with less and more detail).
   void print_heap_transition();
   void print_detailed_heap_transition();
 
-  // Record the fact that a full collection occurred.
-  void record_full_collection_start();
-  void record_full_collection_end();
+  void record_stop_world_start();
+  void record_concurrent_pause();
 
   // Record how much space we copied during a GC. This is typically
   // called when a GC alloc region is being retired.
--- a/src/share/vm/memory/metaspace.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/memory/metaspace.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -103,27 +103,7 @@
 // a chunk is placed on the free list of blocks (BlockFreelist) and
 // reused from there.
 
-// Pointer to list of Metachunks.
-class ChunkList VALUE_OBJ_CLASS_SPEC {
-  // List of free chunks
-  Metachunk* _head;
-
- public:
-  // Constructor
-  ChunkList() : _head(NULL) {}
-
-  // Accessors
-  Metachunk* head() { return _head; }
-  void set_head(Metachunk* v) { _head = v; }
-
-  // Link at head of the list
-  void add_at_head(Metachunk* head, Metachunk* tail);
-  void add_at_head(Metachunk* head);
-
-  size_t sum_list_size();
-  size_t sum_list_count();
-  size_t sum_list_capacity();
-};
+typedef class FreeList<Metachunk> ChunkList;
 
 // Manages the global free lists of chunks.
 // Has three lists of free chunks, and a total size and
@@ -185,6 +165,10 @@
   // for special, small, medium, and humongous chunks.
   static ChunkIndex list_index(size_t size);
 
+  // Add the simple linked list of chunks to the freelist of chunks
+  // of type index.
+  void return_chunks(ChunkIndex index, Metachunk* chunks);
+
   // Total of the space in the free chunks list
   size_t free_chunks_total();
   size_t free_chunks_total_in_bytes();
@@ -899,6 +883,9 @@
                    Mutex::_no_safepoint_check_flag);
   bool initialization_succeeded = grow_vs(word_size);
 
+  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
+  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
+  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
   assert(initialization_succeeded,
     " VirtualSpaceList initialization should not fail");
 }
@@ -913,6 +900,9 @@
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
   bool succeeded = class_entry->initialize();
+  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
+  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
+  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
   assert(succeeded, " VirtualSpaceList initialization should not fail");
   link_vs(class_entry, rs.size()/BytesPerWord);
 }
@@ -1380,76 +1370,6 @@
 }
 #endif
 
-// ChunkList methods
-
-size_t ChunkList::sum_list_size() {
-  size_t result = 0;
-  Metachunk* cur = head();
-  while (cur != NULL) {
-    result += cur->word_size();
-    cur = cur->next();
-  }
-  return result;
-}
-
-size_t ChunkList::sum_list_count() {
-  size_t result = 0;
-  Metachunk* cur = head();
-  while (cur != NULL) {
-    result++;
-    cur = cur->next();
-  }
-  return result;
-}
-
-size_t ChunkList::sum_list_capacity() {
-  size_t result = 0;
-  Metachunk* cur = head();
-  while (cur != NULL) {
-    result += cur->capacity_word_size();
-    cur = cur->next();
-  }
-  return result;
-}
-
-void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
-  assert_lock_strong(SpaceManager::expand_lock());
-  assert(head == tail || tail->next() == NULL,
-         "Not the tail or the head has already been added to a list");
-
-  if (TraceMetadataChunkAllocation && Verbose) {
-    gclog_or_tty->print("ChunkList::add_at_head(head, tail): ");
-    Metachunk* cur = head;
-    while (cur != NULL) {
-      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
-      cur = cur->next();
-    }
-    gclog_or_tty->print_cr("");
-  }
-
-  if (tail != NULL) {
-    tail->set_next(_head);
-  }
-  set_head(head);
-}
-
-void ChunkList::add_at_head(Metachunk* list) {
-  if (list == NULL) {
-    // Nothing to add
-    return;
-  }
-  assert_lock_strong(SpaceManager::expand_lock());
-  Metachunk* head = list;
-  Metachunk* tail = list;
-  Metachunk* cur = head->next();
-  // Search for the tail since it is not passed.
-  while (cur != NULL) {
-    tail = cur;
-    cur = cur->next();
-  }
-  add_at_head(head, tail);
-}
-
 // ChunkManager methods
 
 // Verification of _free_chunks_total and _free_chunks_count does not
@@ -1553,7 +1473,7 @@
       continue;
     }
 
-    result = result + list->sum_list_capacity();
+    result = result + list->count() * list->size();
   }
   result = result + humongous_dictionary()->total_size();
   return result;
@@ -1567,7 +1487,7 @@
     if (list == NULL) {
       continue;
     }
-    count = count + list->sum_list_count();
+    count = count + list->count();
   }
   count = count + humongous_dictionary()->total_free_blocks();
   return count;
@@ -1622,7 +1542,7 @@
     }
 
     // Remove the chunk as the head of the list.
-    free_list->set_head(chunk->next());
+    free_list->remove_chunk(chunk);
 
     // Chunk is being removed from the chunks free list.
     dec_free_chunks_total(chunk->capacity_word_size());
@@ -1679,7 +1599,7 @@
     size_t list_count;
     if (list_index(word_size) < HumongousIndex) {
       ChunkList* list = find_free_chunks_list(word_size);
-      list_count = list->sum_list_count();
+      list_count = list->count();
     } else {
       list_count = humongous_dictionary()->total_count();
     }
@@ -1958,6 +1878,29 @@
   }
 }
 
+void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
+  if (chunks == NULL) {
+    return;
+  }
+  ChunkList* list = free_chunks(index);
+  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
+  assert_lock_strong(SpaceManager::expand_lock());
+  Metachunk* cur = chunks;
+
+  // This return chunks one at a time.  If a new
+  // class List can be created that is a base class
+  // of FreeList then something like FreeList::prepend()
+  // can be used in place of this loop
+  while (cur != NULL) {
+    // Capture the next link before it is changed
+    // by the call to return_chunk_at_head();
+    Metachunk* next = cur->next();
+    cur->set_is_free(true);
+    list->return_chunk_at_head(cur);
+    cur = next;
+  }
+}
+
 SpaceManager::~SpaceManager() {
   // This call this->_lock which can't be done while holding expand_lock()
   const size_t in_use_before = sum_capacity_in_chunks_in_use();
@@ -1995,11 +1938,11 @@
                              chunk_size_name(i));
     }
     Metachunk* chunks = chunks_in_use(i);
-    chunk_manager->free_chunks(i)->add_at_head(chunks);
+    chunk_manager->return_chunks(i, chunks);
     set_chunks_in_use(i, NULL);
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print_cr("updated freelist count %d %s",
-                             chunk_manager->free_chunks(i)->sum_list_count(),
+                             chunk_manager->free_chunks(i)->count(),
                              chunk_size_name(i));
     }
     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
--- a/src/share/vm/runtime/thread.hpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/runtime/thread.hpp	Thu Apr 18 16:04:33 2013 -0700
@@ -1056,11 +1056,11 @@
 #if INCLUDE_NMT
   // native memory tracking
   inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
-  inline void         set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; }
+  inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
 
  private:
   // per-thread memory recorder
-  volatile MemRecorder* _recorder;
+  MemRecorder* volatile _recorder;
 #endif // INCLUDE_NMT
 
   // Suspend/resume support for JavaThread
--- a/src/share/vm/services/memTrackWorker.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/services/memTrackWorker.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -39,7 +39,7 @@
   }
 }
 
-MemTrackWorker::MemTrackWorker() {
+MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
   // create thread uses cgc thread type for now. We should revisit
   // the option, or create new thread type.
   _has_error = !os::create_thread(this, os::cgc_thread);
@@ -88,8 +88,7 @@
   assert(MemTracker::is_on(), "native memory tracking is off");
   this->initialize_thread_local_storage();
   this->record_stack_base_and_size();
-  MemSnapshot* snapshot = MemTracker::get_snapshot();
-  assert(snapshot != NULL, "Worker should not be started");
+  assert(_snapshot != NULL, "Worker should not be started");
   MemRecorder* rec;
   unsigned long processing_generation = 0;
   bool          worker_idle = false;
@@ -109,7 +108,7 @@
       }
 
       // merge the recorder into staging area
-      if (!snapshot->merge(rec)) {
+      if (!_snapshot->merge(rec)) {
         MemTracker::shutdown(MemTracker::NMT_out_of_memory);
       } else {
         NOT_PRODUCT(_merge_count ++;)
@@ -132,7 +131,7 @@
           _head = (_head + 1) % MAX_GENERATIONS;
         }
         // promote this generation data to snapshot
-        if (!snapshot->promote(number_of_classes)) {
+        if (!_snapshot->promote(number_of_classes)) {
           // failed to promote, means out of memory
           MemTracker::shutdown(MemTracker::NMT_out_of_memory);
         }
@@ -140,7 +139,7 @@
         // worker thread is idle
         worker_idle = true;
         MemTracker::report_worker_idle();
-        snapshot->wait(1000);
+        _snapshot->wait(1000);
         ThreadCritical tc;
         // check if more data arrived
         if (!_gen[_head].has_more_recorder()) {
--- a/src/share/vm/services/memTrackWorker.hpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/services/memTrackWorker.hpp	Thu Apr 18 16:04:33 2013 -0700
@@ -85,8 +85,10 @@
 
   bool            _has_error;
 
+  MemSnapshot*    _snapshot;
+
  public:
-  MemTrackWorker();
+  MemTrackWorker(MemSnapshot* snapshot);
   ~MemTrackWorker();
   _NOINLINE_ void* operator new(size_t size);
   _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
--- a/src/share/vm/services/memTracker.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/services/memTracker.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -53,12 +53,12 @@
 }
 
 
-MemRecorder*                    MemTracker::_global_recorder = NULL;
+MemRecorder* volatile           MemTracker::_global_recorder = NULL;
 MemSnapshot*                    MemTracker::_snapshot = NULL;
 MemBaseline                     MemTracker::_baseline;
 Mutex*                          MemTracker::_query_lock = NULL;
-volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
-volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
+MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
+MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
 int                             MemTracker::_sync_point_skip_count = 0;
 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
@@ -128,7 +128,7 @@
 
   _snapshot = new (std::nothrow)MemSnapshot();
   if (_snapshot != NULL) {
-    if (!_snapshot->out_of_memory() && start_worker()) {
+    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
       _state = NMT_started;
       NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
       return;
@@ -209,7 +209,7 @@
 // delete all pooled recorders
 void MemTracker::delete_all_pooled_recorders() {
   // free all pooled recorders
-  volatile MemRecorder* cur_head = _pooled_recorders;
+  MemRecorder* volatile cur_head = _pooled_recorders;
   if (cur_head != NULL) {
     MemRecorder* null_ptr = NULL;
     while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
@@ -543,14 +543,14 @@
 /*
  * Start worker thread.
  */
-bool MemTracker::start_worker() {
-  assert(_worker_thread == NULL, "Just Check");
-  _worker_thread = new (std::nothrow) MemTrackWorker();
-  if (_worker_thread == NULL || _worker_thread->has_error()) {
-    if (_worker_thread != NULL) {
-      delete _worker_thread;
-      _worker_thread = NULL;
-    }
+bool MemTracker::start_worker(MemSnapshot* snapshot) {
+  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
+  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
+  if (_worker_thread == NULL) {
+    return false;
+  } else if (_worker_thread->has_error()) {
+    delete _worker_thread;
+    _worker_thread = NULL;
     return false;
   }
   _worker_thread->start();
--- a/src/share/vm/services/memTracker.hpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/services/memTracker.hpp	Thu Apr 18 16:04:33 2013 -0700
@@ -421,7 +421,7 @@
 
  private:
   // start native memory tracking worker thread
-  static bool start_worker();
+  static bool start_worker(MemSnapshot* snapshot);
 
   // called by worker thread to complete shutdown process
   static void final_shutdown();
@@ -475,18 +475,18 @@
   // a thread can start to allocate memory before it is attached
   // to VM 'Thread', those memory activities are recorded here.
   // ThreadCritical is required to guard this global recorder.
-  static MemRecorder*     _global_recorder;
+  static MemRecorder* volatile _global_recorder;
 
   // main thread id
   debug_only(static intx   _main_thread_tid;)
 
   // pending recorders to be merged
-  static volatile MemRecorder*      _merge_pending_queue;
+  static MemRecorder* volatile     _merge_pending_queue;
 
   NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
 
   // pooled memory recorders
-  static volatile MemRecorder*      _pooled_recorders;
+  static MemRecorder* volatile     _pooled_recorders;
 
   // memory recorder pool management, uses following
   // counter to determine if a released memory recorder
--- a/src/share/vm/services/runtimeService.cpp	Thu Apr 11 13:57:44 2013 +0200
+++ b/src/share/vm/services/runtimeService.cpp	Thu Apr 18 16:04:33 2013 -0700
@@ -120,6 +120,8 @@
 
   // Print the time interval in which the app was executing
   if (PrintGCApplicationConcurrentTime) {
+    gclog_or_tty->date_stamp(PrintGCDateStamps);
+    gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print_cr("Application time: %3.7f seconds",
                                 last_application_time_sec());
   }
@@ -150,6 +152,8 @@
   // Print the time interval for which the app was stopped
   // during the current safepoint operation.
   if (PrintGCApplicationStoppedTime) {
+    gclog_or_tty->date_stamp(PrintGCDateStamps);
+    gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print_cr("Total time for which application threads "
                            "were stopped: %3.7f seconds",
                            last_safepoint_time_sec());