changeset 4486:2e093b564241

7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine Summary: Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit. Reviewed-by: brutisso, ehelin, johnc
author mgerdin
date Thu, 28 Mar 2013 10:27:28 +0100
parents 42e370795a39
children 754c24457b20
files src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp src/share/vm/memory/collectorPolicy.cpp src/share/vm/runtime/globals.hpp
diffstat 6 files changed, 51 insertions(+), 12 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Mar 28 10:27:28 2013 +0100
@@ -854,7 +854,8 @@
   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 
   unsigned int dummy_gc_count_before;
-  return attempt_allocation(word_size, &dummy_gc_count_before);
+  int dummy_gclocker_retry_count = 0;
+  return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 }
 
 HeapWord*
@@ -863,14 +864,14 @@
   assert_heap_not_locked_and_not_at_safepoint();
 
   // Loop until the allocation is satisified, or unsatisfied after GC.
-  for (int try_count = 1; /* we'll return */; try_count += 1) {
+  for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
     unsigned int gc_count_before;
 
     HeapWord* result = NULL;
     if (!isHumongous(word_size)) {
-      result = attempt_allocation(word_size, &gc_count_before);
+      result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
     } else {
-      result = attempt_allocation_humongous(word_size, &gc_count_before);
+      result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
     }
     if (result != NULL) {
       return result;
@@ -894,6 +895,9 @@
       }
       return result;
     } else {
+      if (gclocker_retry_count > GCLockerRetryAllocationCount) {
+        return NULL;
+      }
       assert(op.result() == NULL,
              "the result should be NULL if the VM op did not succeed");
     }
@@ -910,7 +914,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
-                                           unsigned int *gc_count_before_ret) {
+                                           unsigned int *gc_count_before_ret,
+                                           int* gclocker_retry_count_ret) {
   // Make sure you read the note in attempt_allocation_humongous().
 
   assert_heap_not_locked_and_not_at_safepoint();
@@ -986,10 +991,16 @@
         return NULL;
       }
     } else {
+      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = total_collections();
+        return NULL;
+      }
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
       GC_locker::stall_until_clear();
+      (*gclocker_retry_count_ret) += 1;
     }
 
     // We can reach here if we were unsuccessul in scheduling a
@@ -1019,7 +1030,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
-                                          unsigned int * gc_count_before_ret) {
+                                          unsigned int * gc_count_before_ret,
+                                          int* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
   // into a single one is that such a method would require several "if
@@ -1104,10 +1116,16 @@
         return NULL;
       }
     } else {
+      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
+        MutexLockerEx x(Heap_lock);
+        *gc_count_before_ret = total_collections();
+        return NULL;
+      }
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
       GC_locker::stall_until_clear();
+      (*gclocker_retry_count_ret) += 1;
     }
 
     // We can reach here if we were unsuccessul in scheduling a
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Mar 28 10:27:28 2013 +0100
@@ -559,18 +559,21 @@
   // the mutator alloc region without taking the Heap_lock. This
   // should only be used for non-humongous allocations.
   inline HeapWord* attempt_allocation(size_t word_size,
-                                      unsigned int* gc_count_before_ret);
+                                      unsigned int* gc_count_before_ret,
+                                      int* gclocker_retry_count_ret);
 
   // Second-level mutator allocation attempt: take the Heap_lock and
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
-                                    unsigned int* gc_count_before_ret);
+                                    unsigned int* gc_count_before_ret,
+                                    int* gclocker_retry_count_ret);
 
   // Takes the Heap_lock and attempts a humongous allocation. It can
   // potentially schedule a GC pause.
   HeapWord* attempt_allocation_humongous(size_t word_size,
-                                         unsigned int* gc_count_before_ret);
+                                         unsigned int* gc_count_before_ret,
+                                         int* gclocker_retry_count_ret);
 
   // Allocation attempt that should be called during safepoints (e.g.,
   // at the end of a successful GC). expect_null_mutator_alloc_region
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Mar 28 10:27:28 2013 +0100
@@ -60,7 +60,8 @@
 
 inline HeapWord*
 G1CollectedHeap::attempt_allocation(size_t word_size,
-                                    unsigned int* gc_count_before_ret) {
+                                    unsigned int* gc_count_before_ret,
+                                    int* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
@@ -68,7 +69,9 @@
   HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
                                                       false /* bot_updates */);
   if (result == NULL) {
-    result = attempt_allocation_slow(word_size, gc_count_before_ret);
+    result = attempt_allocation_slow(word_size,
+                                     gc_count_before_ret,
+                                     gclocker_retry_count_ret);
   }
   assert_heap_not_locked();
   if (result != NULL) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Mar 28 10:27:28 2013 +0100
@@ -326,6 +326,7 @@
 
   uint loop_count = 0;
   uint gc_count = 0;
+  int gclocker_stalled_count = 0;
 
   while (result == NULL) {
     // We don't want to have multiple collections for a single filled generation.
@@ -354,6 +355,10 @@
         return result;
       }
 
+      if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
+        return NULL;
+      }
+
       // Failed to allocate without a gc.
       if (GC_locker::is_active_and_needs_gc()) {
         // If this thread is not in a jni critical section, we stall
@@ -366,6 +371,7 @@
         if (!jthr->in_critical()) {
           MutexUnlocker mul(Heap_lock);
           GC_locker::stall_until_clear();
+          gclocker_stalled_count += 1;
           continue;
         } else {
           if (CheckJNICalls) {
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Mar 28 10:27:28 2013 +0100
@@ -532,7 +532,7 @@
 
   // Loop until the allocation is satisified,
   // or unsatisfied after GC.
-  for (int try_count = 1; /* return or throw */; try_count += 1) {
+  for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
     HandleMark hm; // discard any handles allocated in each iteration
 
     // First allocation attempt is lock-free.
@@ -576,6 +576,10 @@
           }
         }
 
+        if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
+          return NULL; // we didn't get to do a GC and we didn't get any memory
+        }
+
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
         // GC allowed. When the critical section clears, a GC is
@@ -587,6 +591,7 @@
           MutexUnlocker mul(Heap_lock);
           // Wait for JNI critical section to be exited
           GC_locker::stall_until_clear();
+          gclocker_stalled_count += 1;
           continue;
         } else {
           if (CheckJNICalls) {
--- a/src/share/vm/runtime/globals.hpp	Wed Mar 27 10:55:37 2013 +0100
+++ b/src/share/vm/runtime/globals.hpp	Thu Mar 28 10:27:28 2013 +0100
@@ -1402,6 +1402,10 @@
           "How much the GC can expand the eden by while the GC locker  "    \
           "is active (as a percentage)")                                    \
                                                                             \
+  diagnostic(intx, GCLockerRetryAllocationCount, 2,                         \
+          "Number of times to retry allocations when"                       \
+          " blocked by the GC locker")                                      \
+                                                                            \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")                  \
                                                                             \