changeset 4675:e50c5a1869b1 hs24-b46

Merge
author amurillo
date Thu, 23 May 2013 13:57:41 -0700
parents af383e67806b (current diff) f2a9de120e2d (diff)
children f2614c006bb7
files
diffstat 29 files changed, 321 insertions(+), 103 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed May 22 16:01:50 2013 -0700
+++ b/make/hotspot_version	Thu May 23 13:57:41 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=45
+HS_BUILD_NUMBER=46
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.properties	Wed May 22 16:01:50 2013 -0700
+++ b/make/jprt.properties	Thu May 23 13:57:41 2013 -0700
@@ -38,7 +38,7 @@
 
 # This tells jprt what default release we want to build
 
-jprt.hotspot.default.release=jdk7u14
+jprt.hotspot.default.release=jdk7u40
 
 jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
 
@@ -54,97 +54,97 @@
 # Define the Solaris platforms we want for the various releases
 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7u14=${jprt.my.solaris.sparc.jdk7}
+jprt.my.solaris.sparc.jdk7u40=${jprt.my.solaris.sparc.jdk7}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7u14=${jprt.my.solaris.sparcv9.jdk7}
+jprt.my.solaris.sparcv9.jdk7u40=${jprt.my.solaris.sparcv9.jdk7}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7u14=${jprt.my.solaris.i586.jdk7}
+jprt.my.solaris.i586.jdk7u40=${jprt.my.solaris.i586.jdk7}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7u14=${jprt.my.solaris.x64.jdk7}
+jprt.my.solaris.x64.jdk7u40=${jprt.my.solaris.x64.jdk7}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
-jprt.my.linux.i586.jdk7u14=${jprt.my.linux.i586.jdk7}
+jprt.my.linux.i586.jdk7u40=${jprt.my.linux.i586.jdk7}
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
-jprt.my.linux.x64.jdk7u14=${jprt.my.linux.x64.jdk7}
+jprt.my.linux.x64.jdk7u40=${jprt.my.linux.x64.jdk7}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7u14=${jprt.my.linux.ppc.jdk7}
+jprt.my.linux.ppc.jdk7u40=${jprt.my.linux.ppc.jdk7}
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7u14=${jprt.my.linux.ppcv2.jdk7}
+jprt.my.linux.ppcv2.jdk7u40=${jprt.my.linux.ppcv2.jdk7}
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7u14=${jprt.my.linux.ppcsflt.jdk7}
+jprt.my.linux.ppcsflt.jdk7u40=${jprt.my.linux.ppcsflt.jdk7}
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7u14=${jprt.my.linux.armvfp.jdk7}
+jprt.my.linux.armvfp.jdk7u40=${jprt.my.linux.armvfp.jdk7}
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfpsflt.jdk8=linux_armvfpsflt_2.6
 jprt.my.linux.armvfpsflt.jdk7=linux_armvfpsflt_2.6
-jprt.my.linux.armvfpsflt.jdk7u14=${jprt.my.linux.armvfpsflt.jdk7}
+jprt.my.linux.armvfpsflt.jdk7u40=${jprt.my.linux.armvfpsflt.jdk7}
 jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfphflt.jdk8=linux_armvfphflt_2.6
 jprt.my.linux.armvfphflt.jdk7=linux_armvfphflt_2.6
-jprt.my.linux.armvfphflt.jdk7u14=${jprt.my.linux.armvfphflt.jdk7}
+jprt.my.linux.armvfphflt.jdk7u40=${jprt.my.linux.armvfphflt.jdk7}
 jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armv6.jdk8=linux_armv6_2.6
 jprt.my.linux.armv6.jdk7=linux_armv6_2.6
-jprt.my.linux.armv6.jdk7u14=${jprt.my.linux.armv6.jdk7}
+jprt.my.linux.armv6.jdk7u40=${jprt.my.linux.armv6.jdk7}
 jprt.my.linux.armv6=${jprt.my.linux.armv6.${jprt.tools.default.release}}
 
 jprt.my.linux.armvs.jdk8=linux_armvs_2.6
 jprt.my.linux.armvs.jdk7=linux_armvs_2.6
-jprt.my.linux.armvs.jdk7u14=${jprt.my.linux.armvs.jdk7}
+jprt.my.linux.armvs.jdk7u40=${jprt.my.linux.armvs.jdk7}
 jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
 
 jprt.my.linux.armvh.jdk8=linux_armvh_2.6
 jprt.my.linux.armvh.jdk7=linux_armvh_2.6
-jprt.my.linux.armvh.jdk7u14=${jprt.my.linux.armvh.jdk7}
+jprt.my.linux.armvh.jdk7u40=${jprt.my.linux.armvh.jdk7}
 jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
 
 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7u14=${jprt.my.linux.armsflt.jdk7}
+jprt.my.linux.armsflt.jdk7u40=${jprt.my.linux.armsflt.jdk7}
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
 jprt.my.macosx.x64.jdk8=macosx_x64_10.7
 jprt.my.macosx.x64.jdk7=macosx_x64_10.7
-jprt.my.macosx.x64.jdk7u14=${jprt.my.macosx.x64.jdk7}
+jprt.my.macosx.x64.jdk7u40=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
 jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
-jprt.my.windows.i586.jdk7u14=${jprt.my.windows.i586.jdk7}
+jprt.my.windows.i586.jdk7u40=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
-jprt.my.windows.x64.jdk7u14=${jprt.my.windows.x64.jdk7}
+jprt.my.windows.x64.jdk7u40=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
@@ -183,7 +183,7 @@
 
 jprt.build.targets.jdk8=${jprt.build.targets.all}
 jprt.build.targets.jdk7=${jprt.build.targets.all}
-jprt.build.targets.jdk7u14=${jprt.build.targets.all}
+jprt.build.targets.jdk7u40=${jprt.build.targets.all}
 jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
 
 # Subset lists of test targets for this source tree
@@ -476,7 +476,7 @@
 
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
-jprt.test.targets.jdk7u14=${jprt.test.targets.jdk7}
+jprt.test.targets.jdk7u40=${jprt.test.targets.jdk7}
 jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
 
 # The default test/Makefile targets that should be run
@@ -536,7 +536,7 @@
 
 jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7u14=${jprt.make.rule.test.targets.jdk7}
+jprt.make.rule.test.targets.jdk7u40=${jprt.make.rule.test.targets.jdk7}
 jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
 
 # 7155453: Work-around to prevent popups on OSX from blocking test completion
--- a/src/os/posix/vm/os_posix.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/os/posix/vm/os_posix.cpp	Thu May 23 13:57:41 2013 -0700
@@ -134,6 +134,10 @@
   return aligned_base;
 }
 
+bool os::can_release_partial_region() {
+  return true;
+}
+
 void os::Posix::print_load_average(outputStream* st) {
   st->print("load average:");
   double loadavg[3];
--- a/src/os/windows/vm/os_windows.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Thu May 23 13:57:41 2013 -0700
@@ -2971,6 +2971,10 @@
   }
 }
 
+bool os::can_release_partial_region() {
+  return false;
+}
+
 // Multiple threads can race in this code but it's not possible to unmap small sections of
 // virtual space to get requested alignment, like posix-like os's.
 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu May 23 13:57:41 2013 -0700
@@ -285,6 +285,7 @@
       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
     }
+    coalBirth(mr.word_size());
   }
   _promoInfo.reset();
   _smallLinearAllocBlock._ptr = NULL;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 23 13:57:41 2013 -0700
@@ -62,7 +62,8 @@
 
 // statics
 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool          CMSCollector::_full_gc_requested          = false;
+bool CMSCollector::_full_gc_requested = false;
+GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
 
 //////////////////////////////////////////////////////////////////
 // In support of CMS/VM thread synchronization
@@ -1683,12 +1684,13 @@
 
 }
 
-void CMSCollector::request_full_gc(unsigned int full_gc_count) {
+void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   unsigned int gc_count = gch->total_full_collections();
   if (gc_count == full_gc_count) {
     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
     _full_gc_requested = true;
+    _full_gc_cause = cause;
     CGC_lock->notify();   // nudge CMS thread
   } else {
     assert(gc_count > full_gc_count, "Error: causal loop");
@@ -1886,6 +1888,14 @@
     // Reference objects are active.
     ref_processor()->clean_up_discovered_references();
 
+    if (first_state > Idling) {
+      save_heap_summary();
+    }
+
+    if (first_state > Idling) {
+      save_heap_summary();
+    }
+
     do_compaction_work(clear_all_soft_refs);
 
     // Has the GC time limit been exceeded?
@@ -2123,7 +2133,7 @@
       // required.
       _collectorState = FinalMarking;
   }
-  collect_in_foreground(clear_all_soft_refs);
+  collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
 
   // For a mark-sweep, compute_new_size() will be called
   // in the heap's do_collection() method.
@@ -2186,7 +2196,7 @@
 // one "collect" method between the background collector and the foreground
 // collector but the if-then-else required made it cleaner to have
 // separate methods.
-void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
+void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
@@ -2205,7 +2215,7 @@
     } else {
       assert(_collectorState == Idling, "Should be idling before start.");
       _collectorState = InitialMarking;
-      register_gc_start(GCCause::_cms_concurrent_mark);
+      register_gc_start(cause);
       // Reset the expansion cause, now that we are about to begin
       // a new cycle.
       clear_expansion_cause();
@@ -2214,6 +2224,7 @@
     // ensuing concurrent GC cycle.
     update_should_unload_classes();
     _full_gc_requested = false;           // acks all outstanding full gc requests
+    _full_gc_cause = GCCause::_no_gc;
     // Signal that we are about to start a collection
     gch->increment_total_full_collections();  // ... starting a collection cycle
     _collection_count_start = gch->total_full_collections();
@@ -2460,7 +2471,7 @@
   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_perm_gen_summary);
 }
 
-void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
+void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
          "Foreground collector should be waiting, not executing");
   assert(Thread::current()->is_VM_thread(), "A foreground collection"
@@ -2493,7 +2504,7 @@
     }
     switch (_collectorState) {
       case InitialMarking:
-        register_foreground_gc_start(GenCollectedHeap::heap()->gc_cause());
+        register_foreground_gc_start(cause);
         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
         checkpointRootsInitial(false);
         assert(_collectorState == Marking, "Collector state should have changed"
@@ -6396,7 +6407,6 @@
 }
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
-  gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 23 13:57:41 2013 -0700
@@ -573,8 +573,9 @@
   bool _completed_initialization;
 
   // In support of ExplicitGCInvokesConcurrent
-  static   bool _full_gc_requested;
-  unsigned int  _collection_count_start;
+  static bool _full_gc_requested;
+  static GCCause::Cause _full_gc_cause;
+  unsigned int _collection_count_start;
 
   // Should we unload classes this concurrent cycle?
   bool _should_unload_classes;
@@ -905,11 +906,11 @@
                bool   clear_all_soft_refs,
                size_t size,
                bool   tlab);
-  void collect_in_background(bool clear_all_soft_refs);
-  void collect_in_foreground(bool clear_all_soft_refs);
+  void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
+  void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
 
   // In support of ExplicitGCInvokesConcurrent
-  static void request_full_gc(unsigned int full_gc_count);
+  static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
   // Should we unload classes in a particular concurrent cycle?
   bool should_unload_classes() const {
     return _should_unload_classes;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu May 23 13:57:41 2013 -0700
@@ -140,7 +140,9 @@
   while (!_should_terminate) {
     sleepBeforeNextCycle();
     if (_should_terminate) break;
-    _collector->collect_in_background(false);  // !clear_all_soft_refs
+    GCCause::Cause cause = _collector->_full_gc_requested ?
+      _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
+    _collector->collect_in_background(false, cause);
   }
   assert(_should_terminate, "just checking");
   // Check that the state of any protocol for synchronization
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Thu May 23 13:57:41 2013 -0700
@@ -241,7 +241,7 @@
     // In case CMS thread was in icms_wait(), wake it up.
     CMSCollector::start_icms();
     // Nudge the CMS thread to start a concurrent collection.
-    CMSCollector::request_full_gc(_full_gc_count_before);
+    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
     assert(_full_gc_count_before < gch->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 23 13:57:41 2013 -0700
@@ -1310,7 +1310,6 @@
 
     // Timing
     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
-    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Thu May 23 13:57:41 2013 -0700
@@ -227,7 +227,6 @@
 }
 
 void VM_CGC_Operation::doit() {
-  gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
   SharedHeap* sh = SharedHeap::heap();
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 23 13:57:41 2013 -0700
@@ -170,7 +170,6 @@
   {
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 23 13:57:41 2013 -0700
@@ -2062,7 +2062,6 @@
     gc_task_manager()->task_idle_workers();
     heap->set_par_threads(gc_task_manager()->active_workers());
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 23 13:57:41 2013 -0700
@@ -333,7 +333,6 @@
     ResourceMark rm;
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Thu May 23 13:57:41 2013 -0700
@@ -91,24 +91,32 @@
   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
 }
 
-class ObjectCountEventSenderClosure : public KlassInfoClosure {
-  GCTracer* _gc_tracer;
- public:
-  ObjectCountEventSenderClosure(GCTracer* gc_tracer) : _gc_tracer(gc_tracer) {}
- private:
-  void do_cinfo(KlassInfoEntry* entry) {
-    if (is_visible_klass(entry->klass())) {
-      _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
-                                                   entry->words() * BytesPerWord);
-      }
+void ObjectCountEventSenderClosure::do_cinfo(KlassInfoEntry* entry) {
+  if (should_send_event(entry)) {
+    send_event(entry);
   }
+}
 
+void ObjectCountEventSenderClosure::send_event(KlassInfoEntry* entry) {
+  _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
+                                               entry->words() * BytesPerWord);
+}
+
+bool ObjectCountEventSenderClosure::should_send_event(KlassInfoEntry* entry) const {
+  double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
+  return percentage_of_heap > _size_threshold_percentage;
+}
+
+bool ObjectCountFilter::do_object_b(oop obj) {
+  bool is_alive = _is_alive == NULL? true : _is_alive->do_object_b(obj);
+  return is_alive && is_externally_visible_klass(obj->klass());
+}
+
+bool ObjectCountFilter::is_externally_visible_klass(klassOop k) const {
   // Do not expose internal implementation specific classes
-  bool is_visible_klass(klassOop k) {
-    return k->klass_part()->oop_is_instance() ||
-           (k->klass_part()->oop_is_array() && k != Universe::systemObjArrayKlassObj());
-  }
-};
+  return (k->klass_part()->oop_is_instance() || k->klass_part()->oop_is_array()) &&
+         k != Universe::systemObjArrayKlassObj();
+}
 
 void GCTracer::report_object_count_after_gc(BoolObjectClosure *is_alive_cl) {
   if (should_send_object_count_after_gc_event()) {
@@ -116,8 +124,11 @@
 
     KlassInfoTable cit(HeapInspection::start_of_perm_gen());
     if (!cit.allocation_failed()) {
-      ObjectCountEventSenderClosure event_sender(this);
-      HeapInspection::instance_inspection(&cit, &event_sender, false, is_alive_cl);
+      ObjectCountFilter object_filter(is_alive_cl);
+      HeapInspection::populate_table(&cit, false, &object_filter);
+
+      ObjectCountEventSenderClosure event_sender(this, cit.size_of_instances_in_words());
+      cit.iterate(&event_sender);
     }
   }
 }
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Thu May 23 13:57:41 2013 -0700
@@ -30,6 +30,7 @@
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
+#include "memory/klassInfoClosure.hpp"
 #include "memory/referenceType.hpp"
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -139,6 +140,33 @@
   bool should_send_object_count_after_gc_event() const;
 };
 
+class ObjectCountEventSenderClosure : public KlassInfoClosure {
+  GCTracer* _gc_tracer;
+  const double _size_threshold_percentage;
+  const size_t _total_size_in_words;
+ public:
+  ObjectCountEventSenderClosure(GCTracer* gc_tracer, size_t total_size_in_words) :
+    _gc_tracer(gc_tracer),
+    _size_threshold_percentage(ObjectCountCutOffPercent / 100),
+    _total_size_in_words(total_size_in_words)
+  {}
+  virtual void do_cinfo(KlassInfoEntry* entry);
+ protected:
+  virtual void send_event(KlassInfoEntry* entry);
+ private:
+  bool should_send_event(KlassInfoEntry* entry) const;
+};
+
+class ObjectCountFilter : public BoolObjectClosure {
+  BoolObjectClosure* _is_alive;
+ public:
+  ObjectCountFilter(BoolObjectClosure* is_alive = NULL) : _is_alive(is_alive) {}
+  bool do_object_b(oop obj);
+  void do_object(oop obj) { ShouldNotReachHere(); }
+ private:
+  bool is_externally_visible_klass(klassOop k) const;
+};
+
 class YoungGCTracer : public GCTracer {
   static const uint UNSET_TENURING_THRESHOLD = (uint) -1;
 
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Thu May 23 13:57:41 2013 -0700
@@ -58,10 +58,8 @@
   }
 
   if (_doit) {
-    if (PrintGCTimeStamps) {
-      gclog_or_tty->stamp();
-      gclog_or_tty->print(": ");
-    }
+    gclog_or_tty->date_stamp(PrintGCDateStamps);
+    gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print("[%s", title);
     gclog_or_tty->flush();
   }
--- a/src/share/vm/memory/allocation.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/memory/allocation.hpp	Thu May 23 13:57:41 2013 -0700
@@ -555,4 +555,23 @@
   void check()    PRODUCT_RETURN;
 };
 
+// Helper class to allocate arrays that may become large.
+// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
+// and uses mapped memory for larger allocations.
+// Most OS mallocs do something similar but Solaris malloc does not revert
+// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
+// is set so that we always use malloc except for Solaris where we set the
+// limit to get mapped memory.
+template <class E, MEMFLAGS F>
+class ArrayAllocator : StackObj {
+  char* _addr;
+  bool _use_malloc;
+  size_t _size;
+ public:
+  ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
+  ~ArrayAllocator() { free(); }
+  E* allocate(size_t length);
+  void free();
+};
+
 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP
--- a/src/share/vm/memory/allocation.inline.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/memory/allocation.inline.hpp	Thu May 23 13:57:41 2013 -0700
@@ -103,5 +103,49 @@
    FreeHeap(p, F);
 }
 
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::allocate(size_t length) {
+  assert(_addr == NULL, "Already in use");
+
+  _size = sizeof(E) * length;
+  _use_malloc = _size < ArrayAllocatorMallocLimit;
+
+  if (_use_malloc) {
+    _addr = AllocateHeap(_size, F);
+    if (_addr == NULL && _size >=  (size_t)os::vm_allocation_granularity()) {
+      // malloc failed let's try with mmap instead
+      _use_malloc = false;
+    } else {
+      return (E*)_addr;
+    }
+  }
+
+  int alignment = os::vm_allocation_granularity();
+  _size = align_size_up(_size, alignment);
+
+  _addr = os::reserve_memory(_size, NULL, alignment, F);
+  if (_addr == NULL) {
+    vm_exit_out_of_memory(_size, "Allocator (reserve)");
+  }
+
+  bool success = os::commit_memory(_addr, _size, false /* executable */);
+  if (!success) {
+    vm_exit_out_of_memory(_size, "Allocator (commit)");
+  }
+
+  return (E*)_addr;
+}
+
+template<class E, MEMFLAGS F>
+void ArrayAllocator<E, F>::free() {
+  if (_addr != NULL) {
+    if (_use_malloc) {
+      FreeHeap(_addr, F);
+    } else {
+      os::release_memory(_addr, _size);
+    }
+    _addr = NULL;
+  }
+}
 
 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu May 23 13:57:41 2013 -0700
@@ -477,7 +477,6 @@
 
     bool complete = full && (max_level == (n_gens()-1));
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 
--- a/src/share/vm/memory/heapInspection.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/memory/heapInspection.cpp	Thu May 23 13:57:41 2013 -0700
@@ -113,9 +113,8 @@
   }
 }
 
-KlassInfoTable::KlassInfoTable(HeapWord* ref) {
-  _size = 0;
-  _ref = ref;
+KlassInfoTable::KlassInfoTable(HeapWord* ref) :
+  _size(0), _ref(ref), _size_of_instances_in_words(0) {
   _buckets = (KlassInfoBucket *) os::malloc(sizeof(KlassInfoBucket) * _num_buckets, mtInternal);
   if (_buckets != NULL) {
     _size = _num_buckets;
@@ -160,6 +159,7 @@
   if (elt != NULL) {
     elt->set_count(elt->count() + 1);
     elt->set_words(elt->words() + obj->size());
+    _size_of_instances_in_words += obj->size();
     return true;
   } else {
     return false;
@@ -173,6 +173,10 @@
   }
 }
 
+size_t KlassInfoTable::size_of_instances_in_words() const {
+  return _size_of_instances_in_words;
+}
+
 int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
   return (*e1)->compare(*e1,*e2);
 }
@@ -282,10 +286,9 @@
   }
 }
 
-size_t HeapInspection::instance_inspection(KlassInfoTable* cit,
-                                           KlassInfoClosure* cl,
-                                           bool need_prologue,
-                                           BoolObjectClosure* filter) {
+size_t HeapInspection::populate_table(KlassInfoTable* cit,
+                                      bool need_prologue,
+                                      BoolObjectClosure *filter) {
   ResourceMark rm;
 
   if (need_prologue) {
@@ -294,7 +297,6 @@
 
   RecordInstanceClosure ric(cit, filter);
   Universe::heap()->object_iterate(&ric);
-  cit->iterate(cl);
 
   // need to run epilogue if we run prologue
   if (need_prologue) {
@@ -309,17 +311,20 @@
 
   KlassInfoTable cit(start_of_perm_gen());
   if (!cit.allocation_failed()) {
+    size_t missed_count = populate_table(&cit, need_prologue);
+    if (missed_count != 0) {
+      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
+                   " total instances in data below",
+                   missed_count);
+    }
+
     KlassInfoHisto histo("\n"
                      " num     #instances         #bytes  class name\n"
                      "----------------------------------------------");
     HistoClosure hc(&histo);
 
-    size_t missed_count = instance_inspection(&cit, &hc, need_prologue);
-    if (missed_count != 0) {
-      st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
-                   " total instances in data below",
-                   missed_count);
-    }
+    cit.iterate(&hc);
+
     histo.sort();
     histo.print_on(st);
   } else {
--- a/src/share/vm/memory/heapInspection.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/memory/heapInspection.hpp	Thu May 23 13:57:41 2013 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_MEMORY_HEAPINSPECTION_HPP
 
 #include "memory/allocation.inline.hpp"
+#include "memory/klassInfoClosure.hpp"
 #include "oops/oop.inline.hpp"
 
 
@@ -64,12 +65,6 @@
   void print_on(outputStream* st) const;
 };
 
-class KlassInfoClosure: public StackObj {
- public:
-  // Called for each KlassInfoEntry.
-  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
-};
-
 class KlassInfoBucket: public CHeapObj<mtInternal> {
  private:
   KlassInfoEntry* _list;
@@ -86,6 +81,7 @@
  private:
   int _size;
   static const int _num_buckets = 20011;
+  size_t _size_of_instances_in_words;
 
   // An aligned reference address (typically the least
   // address in the perm gen) used for hashing klass
@@ -102,6 +98,7 @@
   bool record_instance(const oop obj);
   void iterate(KlassInfoClosure* cic);
   bool allocation_failed() { return _buckets == NULL; }
+  size_t size_of_instances_in_words() const;
 };
 
 class KlassInfoHisto : public StackObj {
@@ -125,10 +122,9 @@
 class HeapInspection : public AllStatic {
  public:
   static void heap_inspection(outputStream* st, bool need_prologue);
-  static size_t instance_inspection(KlassInfoTable* cit,
-                                    KlassInfoClosure* cl,
-                                    bool need_prologue,
-                                    BoolObjectClosure* filter = NULL);
+  static size_t populate_table(KlassInfoTable* cit,
+                               bool need_prologue,
+                               BoolObjectClosure* filter = NULL);
   static HeapWord* start_of_perm_gen();
   static void find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result);
  private:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/klassInfoClosure.hpp	Thu May 23 13:57:41 2013 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
+#define SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
+
+class KlassInfoEntry;
+
+class KlassInfoClosure : public StackObj {
+ public:
+  // Called for each KlassInfoEntry.
+  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
+};
+
+#endif // SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
--- a/src/share/vm/runtime/globals.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu May 23 13:57:41 2013 -0700
@@ -2302,6 +2302,10 @@
           "Print diagnostic message when GC is stalled"                     \
           "by JNI critical section")                                        \
                                                                             \
+  experimental(double, ObjectCountCutOffPercent, 0.5,                       \
+          "The percentage of the used heap that the instances of a class "  \
+          "must occupy for the class to generate a trace event.")           \
+                                                                            \
   /* GC log rotation setting */                                             \
                                                                             \
   product(bool, UseGCLogFileRotation, false,                                \
@@ -3633,6 +3637,11 @@
   product(bool, PrintGCCause, true,                                         \
           "Include GC cause in GC logging")                                 \
                                                                             \
+  experimental(uintx, ArrayAllocatorMallocLimit,                            \
+          SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx),                        \
+          "Allocation less than this value will be allocated "              \
+          "using malloc. Larger allocations will use mmap.")                \
+                                                                            \
   product(bool, EnableTracing, false,                                       \
                   "Enable event-based tracing")                             \
   product(bool, UseLockedTracing, false,                                    \
--- a/src/share/vm/runtime/os.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/runtime/os.cpp	Thu May 23 13:57:41 2013 -0700
@@ -1461,6 +1461,12 @@
   return res;
 }
 
+bool os::release_or_uncommit_partial_region(char * addr, size_t bytes) {
+  if (can_release_partial_region()) {
+    return release_memory(addr, bytes);
+  }
+  return uncommit_memory(addr, bytes);
+}
 
 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
                            char *addr, size_t bytes, bool read_only,
--- a/src/share/vm/runtime/os.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/runtime/os.hpp	Thu May 23 13:57:41 2013 -0700
@@ -266,6 +266,8 @@
                               bool executable = false);
   static bool   uncommit_memory(char* addr, size_t bytes);
   static bool   release_memory(char* addr, size_t bytes);
+  static bool   can_release_partial_region();
+  static bool   release_or_uncommit_partial_region(char* addr, size_t bytes);
 
   enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
   static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
--- a/src/share/vm/runtime/virtualspace.cpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.cpp	Thu May 23 13:57:41 2013 -0700
@@ -81,17 +81,41 @@
   const size_t end_delta = len - (beg_delta + required_size);
 
   if (beg_delta != 0) {
-    os::release_memory(addr, beg_delta);
+    os::release_or_uncommit_partial_region(addr, beg_delta);
   }
 
   if (end_delta != 0) {
     char* release_addr = (char*) (s + beg_delta + required_size);
-    os::release_memory(release_addr, end_delta);
+    os::release_or_uncommit_partial_region(release_addr, end_delta);
   }
 
   return (char*) (s + beg_delta);
 }
 
+void ReservedSpace::set_raw_base_and_size(char * const raw_base,
+                                          size_t raw_size) {
+  assert(raw_base == NULL || !os::can_release_partial_region(), "sanity");
+  _raw_base = raw_base;
+  _raw_size = raw_size;
+}
+
+// On some systems (e.g., windows), the address returned by os::reserve_memory()
+// is the only addr that can be passed to os::release_memory().  If alignment
+// was done by this class, that original address is _raw_base.
+void ReservedSpace::release_memory(char* default_addr, size_t default_size) {
+  bool ok;
+  if (_raw_base == NULL) {
+    ok = os::release_memory(default_addr, default_size);
+  } else {
+    assert(!os::can_release_partial_region(), "sanity");
+    ok = os::release_memory(_raw_base, _raw_size);
+  }
+  if (!ok) {
+    fatal("os::release_memory failed");
+  }
+  set_raw_base_and_size(NULL, 0);
+}
+
 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
                                        const size_t prefix_size,
                                        const size_t prefix_align,
@@ -110,6 +134,10 @@
     fatal("os::release_memory failed");
   }
 
+  if (!os::can_release_partial_region()) {
+    set_raw_base_and_size(raw_addr, reserve_size);
+  }
+
 #ifdef ASSERT
   if (result != NULL) {
     const size_t raw = size_t(raw_addr);
@@ -127,8 +155,10 @@
 }
 
 // Helper method.
-static bool failed_to_reserve_as_requested(char* base, char* requested_address,
-                                           const size_t size, bool special)
+bool ReservedSpace::failed_to_reserve_as_requested(char* base,
+                                                   char* requested_address,
+                                                   const size_t size,
+                                                   bool special)
 {
   if (base == requested_address || requested_address == NULL)
     return false; // did not fail
@@ -147,9 +177,7 @@
         fatal("os::release_memory_special failed");
       }
     } else {
-      if (!os::release_memory(base, size)) {
-        fatal("os::release_memory failed");
-      }
+      release_memory(base, size);
     }
   }
   return true;
@@ -177,6 +205,8 @@
   assert(noaccess_prefix == 0 ||
          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 
+  set_raw_base_and_size(NULL, 0);
+
   // Add in noaccess_prefix to prefix_size;
   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   const size_t size = adjusted_prefix_size + suffix_size;
@@ -224,9 +254,7 @@
     // result is often the same address (if the kernel hands out virtual
     // addresses from low to high), or an address that is offset by the increase
     // in size.  Exploit that to minimize the amount of extra space requested.
-    if (!os::release_memory(addr, size)) {
-      fatal("os::release_memory failed");
-    }
+    release_memory(addr, size);
 
     const size_t extra = MAX2(ofs, suffix_align - ofs);
     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
@@ -265,6 +293,8 @@
   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
          "not a power of 2");
 
+  set_raw_base_and_size(NULL, 0);
+
   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 
   // Assert that if noaccess_prefix is used, it is the same as alignment.
@@ -340,7 +370,8 @@
     // Check alignment constraints
     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
       // Base not aligned, retry
-      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+      release_memory(base, size);
+
       // Make sure that size is aligned
       size = align_size_up(size, alignment);
       base = os::reserve_memory_aligned(size, alignment);
@@ -378,6 +409,7 @@
          "size not allocation aligned");
   _base = base;
   _size = size;
+  set_raw_base_and_size(NULL, 0);
   _alignment = alignment;
   _noaccess_prefix = 0;
   _special = special;
@@ -433,7 +465,7 @@
     if (special()) {
       os::release_memory_special(real_base, real_size);
     } else{
-      os::release_memory(real_base, real_size);
+      release_memory(real_base, real_size);
     }
     _base = NULL;
     _size = 0;
--- a/src/share/vm/runtime/virtualspace.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.hpp	Thu May 23 13:57:41 2013 -0700
@@ -35,6 +35,12 @@
   char*  _base;
   size_t _size;
   size_t _noaccess_prefix;
+
+  // The base and size prior to any alignment done by this class; used only on
+  // systems that cannot release part of a region.
+  char*  _raw_base;
+  size_t _raw_size;
+
   size_t _alignment;
   bool   _special;
   bool   _executable;
@@ -42,11 +48,20 @@
   // ReservedSpace
   ReservedSpace(char* base, size_t size, size_t alignment, bool special,
                 bool executable);
+
+  bool failed_to_reserve_as_requested(char* base, char* requested_address,
+                                      const size_t size, bool special);
   void initialize(size_t size, size_t alignment, bool large,
                   char* requested_address,
                   const size_t noaccess_prefix,
                   bool executable);
 
+  inline void set_raw_base_and_size(char * const raw_base, size_t raw_size);
+
+  // Release virtual address space.  If alignment was done, use the saved
+  // address and size when releasing.
+  void release_memory(char * default_addr, size_t default_size);
+
   // Release parts of an already-reserved memory region [addr, addr + len) to
   // get a new region that has "compound alignment."  Return the start of the
   // resulting region, or NULL on failure.
--- a/src/share/vm/utilities/taskqueue.hpp	Wed May 22 16:01:50 2013 -0700
+++ b/src/share/vm/utilities/taskqueue.hpp	Thu May 23 13:57:41 2013 -0700
@@ -253,6 +253,7 @@
 
 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 class GenericTaskQueue: public TaskQueueSuper<N, F> {
+  ArrayAllocator<E, F> _array_allocator;
 protected:
   typedef typename TaskQueueSuper<N, F>::Age Age;
   typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
@@ -314,7 +315,7 @@
 
 template<class E, MEMFLAGS F, unsigned int N>
 void GenericTaskQueue<E, F, N>::initialize() {
-  _elems = NEW_C_HEAP_ARRAY(E, N, F);
+  _elems = _array_allocator.allocate(N);
 }
 
 template<class E, MEMFLAGS F, unsigned int N>