changeset 5998:436c55e3e3bf

Merge
author asaha
date Tue, 07 Oct 2014 12:19:13 -0700
parents 9b04c0f54507 (current diff) 4c218194cc6c (diff)
children 735b6a7be7dc
files src/os/bsd/vm/os_bsd.cpp src/os/linux/vm/os_linux.cpp src/share/vm/classfile/classFileParser.cpp src/share/vm/opto/graphKit.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp
diffstat 24 files changed, 217 insertions(+), 179 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -614,8 +614,8 @@
     // Save the regs and make space for a C call
     __ save(SP, -96, SP);
     __ save_all_globals_into_locals();
-    BLOCK_COMMENT("call os::naked_sleep");
-    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
+    BLOCK_COMMENT("call os::naked_short_sleep");
+    __ call(CAST_FROM_FN_PTR(address, os::naked_short_sleep));
     __ delayed()->nop();
     __ restore_globals_from_locals();
     __ restore();
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -98,6 +98,27 @@
   _supports_cx8 = has_v9();
   _supports_atomic_getset4 = true; // swap instruction
 
+  // There are Fujitsu Sparc64 CPUs which support blk_init as well so
+  // we have to take this check out of the 'is_niagara()' block below.
+  if (has_blk_init()) {
+    // When using CMS or G1, we cannot use memset() in BOT updates
+    // because the sun4v/CMT version in libc_psr uses BIS which
+    // exposes "phantom zeros" to concurrent readers. See 6948537.
+    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
+      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
+    }
+    // Issue a stern warning if the user has explicitly set
+    // UseMemSetInBOT (it is known to cause issues), but allow
+    // use for experimentation and debugging.
+    if (UseConcMarkSweepGC || UseG1GC) {
+      if (UseMemSetInBOT) {
+        assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
+        warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
+                " on sun4v; please understand that you are using at your own risk!");
+      }
+    }
+  }
+
   if (is_niagara()) {
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseInlineCaches)) {
@@ -107,12 +128,6 @@
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
-    // When using CMS or G1, we cannot use memset() in BOT updates
-    // because the sun4v/CMT version in libc_psr uses BIS which
-    // exposes "phantom zeros" to concurrent readers. See 6948537.
-    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
-      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
-    }
 #ifdef _LP64
     // 32-bit oops don't make sense for the 64-bit VM on sparc
     // since the 32-bit VM has the same registers and smaller objects.
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Tue Oct 07 12:19:13 2014 -0700
@@ -96,7 +96,13 @@
   static bool is_M_family(int features) { return (features & M_family_m) != 0; }
   static bool is_T_family(int features) { return (features & T_family_m) != 0; }
   static bool is_niagara() { return is_T_family(_features); }
-  DEBUG_ONLY( static bool is_niagara(int features)  { return (features & sun4v_m) != 0; } )
+#ifdef ASSERT
+  static bool is_niagara(int features)  {
+    // 'sun4v_m' may be defined on both Sun/Oracle Sparc CPUs as well as
+    // on Fujitsu Sparc64 CPUs, but only Sun/Oracle Sparcs can be 'niagaras'.
+    return (features & sun4v_m) != 0 && (features & sparc64_family_m) == 0;
+  }
+#endif
 
   // Returns true if it is niagara1 (T1).
   static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
--- a/src/os/bsd/dtrace/libjvm_db.c	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/bsd/dtrace/libjvm_db.c	Tue Oct 07 12:19:13 2014 -0700
@@ -261,6 +261,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3711,9 +3711,21 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/linux/vm/os_linux.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3866,9 +3866,33 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+//
+// Short sleep, direct OS call.
+//
+// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
+// sched_yield(2) will actually give up the CPU:
+//
+//   * Alone on this pariticular CPU, keeps running.
+//   * Before the introduction of "skip_buddy" with "compat_yield" disabled
+//     (pre 2.6.39).
+//
+// So calling this with 0 is an alternative.
+//
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/solaris/dtrace/libjvm_db.c	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/solaris/dtrace/libjvm_db.c	Tue Oct 07 12:19:13 2014 -0700
@@ -261,6 +261,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3742,9 +3742,14 @@
   return os_sleep(millis, interruptible);
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os_sleep(1, false);
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+
+  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
+  // Solaris requires -lrt for this.
+  usleep((ms * 1000));
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/windows/vm/os_windows.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3421,6 +3421,16 @@
   return result;
 }
 
+//
+// Short sleep, direct OS call.
+//
+// ms = 0, means allow others (if any) to run.
+//
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  Sleep(ms);
+}
+
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3663,6 +3663,7 @@
 
   // now perform tests that are based on flag settings
   if (callee->force_inline()) {
+    if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
     print_inlining(callee, "force inline by annotation");
   } else if (callee->should_inline()) {
     print_inlining(callee, "force inline by CompileOracle");
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -3747,7 +3747,7 @@
     }
 
     // Allocate mirror and initialize static fields
-    java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
+    java_lang_Class::create_mirror(this_klass, class_loader, CHECK_(nullHandle));
 
     ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()),
                                              false /* not shared class */);
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -530,10 +530,10 @@
       }
     }
   }
-  create_mirror(k, CHECK);
+  create_mirror(k, Handle(NULL), CHECK);
 }
 
-oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
+oop java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, TRAPS) {
   assert(k->java_mirror() == NULL, "should only assign mirror once");
   // Use this moment of initialization to cache modifier_flags also,
   // to support Class.getModifiers().  Instance classes recalculate
@@ -574,6 +574,8 @@
       // Initialize static fields
       instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
     }
+    // set the classLoader field in the java_lang_Class instance
+    set_class_loader(mirror(), class_loader());
     return mirror();
   } else {
     return NULL;
@@ -599,6 +601,18 @@
   java_class->int_field_put(_static_oop_field_count_offset, size);
 }
 
+void java_lang_Class::set_class_loader(oop java_class, oop loader) {
+  // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet.
+  if (_class_loader_offset != 0) {
+    java_class->obj_field_put(_class_loader_offset, loader);
+  }
+}
+
+oop java_lang_Class::class_loader(oop java_class) {
+  assert(_class_loader_offset != 0, "must be set");
+  return java_class->obj_field(_class_loader_offset);
+}
+
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
@@ -762,6 +776,12 @@
   compute_optional_offset(classRedefinedCount_offset,
                           klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
 
+  // Needs to be optional because the old build runs Queens during bootstrapping
+  // and jdk8-9 doesn't have coordinated pushes yet.
+  compute_optional_offset(_class_loader_offset,
+                 klass_oop, vmSymbols::classClassLoader_name(),
+                 vmSymbols::classloader_signature());
+
   CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
 }
 
@@ -2830,6 +2850,7 @@
 int java_lang_Class::_resolved_constructor_offset;
 int java_lang_Class::_oop_size_offset;
 int java_lang_Class::_static_oop_field_count_offset;
+int java_lang_Class::_class_loader_offset;
 int java_lang_Throwable::backtrace_offset;
 int java_lang_Throwable::detailMessage_offset;
 int java_lang_Throwable::cause_offset;
@@ -3277,3 +3298,4 @@
   JavaClasses::check_offsets();
   FilteredFieldsMap::initialize();  // must be done after computing offsets.
 }
+
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Tue Oct 07 12:19:13 2014 -0700
@@ -224,14 +224,21 @@
   static int _oop_size_offset;
   static int _static_oop_field_count_offset;
 
+  static int _class_loader_offset;
+
   static bool offsets_computed;
   static int classRedefinedCount_offset;
 
+  static void set_class_loader(oop java_class, oop class_loader);
  public:
   static void compute_offsets();
 
   // Instance creation
-  static oop  create_mirror(KlassHandle k, TRAPS);
+  static oop  create_mirror(KlassHandle k, Handle class_loader, TRAPS);
+  static oop  create_mirror(KlassHandle k, TRAPS) {
+    return create_mirror(k, Handle(), THREAD);
+  }
+
   static void fixup_mirror(KlassHandle k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
   // Conversion
@@ -267,6 +274,8 @@
   static int classRedefinedCount(oop the_class_mirror);
   static void set_classRedefinedCount(oop the_class_mirror, int value);
 
+  static oop class_loader(oop java_class);
+
   static int oop_size(oop java_class);
   static void set_oop_size(oop java_class, int size);
   static int static_oop_field_count(oop java_class);
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Oct 07 12:19:13 2014 -0700
@@ -546,6 +546,7 @@
   template(serializePropertiesToByteArray_signature,   "()[B")                                                    \
   template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
+  template(classClassLoader_name,                      "classLoader")                                             \
                                                                                                                   \
   /* trace signatures */                                                                                          \
   TRACE_TEMPLATES(template)                                                                                       \
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -364,6 +364,13 @@
     set_msg("not an accessor");
     return false;
   }
+
+  // Limit inlining depth in case inlining is forced or
+  // _max_inline_level was increased to compensate for lambda forms.
+  if (inline_level() > MaxForceInlineLevel) {
+    set_msg("MaxForceInlineLevel");
+    return false;
+  }
   if (inline_level() > _max_inline_level) {
     if (!callee_method->force_inline() || !IncrementalInline) {
       set_msg("inlining too deep");
--- a/src/share/vm/opto/callGenerator.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/opto/callGenerator.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -742,7 +742,7 @@
         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
         const int vtable_index = methodOopDesc::invalid_vtable_index;
         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
-        assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
       }
@@ -808,7 +808,7 @@
         }
 
         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
-        assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
       }
--- a/src/share/vm/opto/graphKit.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -2746,17 +2746,23 @@
   }
 
   Node* cast_obj = NULL;
-  if (data != NULL &&
-      // Counter has never been decremented (due to cast failure).
-      // ...This is a reasonable thing to expect.  It is true of
-      // all casts inserted by javac to implement generic types.
-      data->as_CounterData()->count() >= 0) {
-    cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
-    if (cast_obj != NULL) {
-      if (failure_control != NULL) // failure is now impossible
-        (*failure_control) = top();
-      // adjust the type of the phi to the exact klass:
-      phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+  if (tk->klass_is_exact()) {
+    // The following optimization tries to statically cast the speculative type of the object
+    // (for example obtained during profiling) to the type of the superklass and then do a
+    // dynamic check that the type of the object is what we expect. To work correctly
+    // for checkcast and aastore the type of superklass should be exact.
+    if (data != NULL &&
+        // Counter has never been decremented (due to cast failure).
+        // ...This is a reasonable thing to expect.  It is true of
+        // all casts inserted by javac to implement generic types.
+        data->as_CounterData()->count() >= 0) {
+      cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
+      if (cast_obj != NULL) {
+        if (failure_control != NULL) // failure is now impossible
+          (*failure_control) = top();
+        // adjust the type of the phi to the exact klass:
+        phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+      }
     }
   }
 
--- a/src/share/vm/runtime/arguments.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -2084,19 +2084,6 @@
   status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize");
 #endif
 
-#ifdef SPARC
-  if (UseConcMarkSweepGC || UseG1GC) {
-    // Issue a stern warning if the user has explicitly set
-    // UseMemSetInBOT (it is known to cause issues), but allow
-    // use for experimentation and debugging.
-    if (VM_Version::is_sun4v() && UseMemSetInBOT) {
-      assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
-      warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
-          " on sun4v; please understand that you are using at your own risk!");
-    }
-  }
-#endif // SPARC
-
   // check native memory tracking flags
   if (PrintNMTStatistics && MemTracker::tracking_level() == MemTracker::NMT_off) {
     warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
--- a/src/share/vm/runtime/globals.hpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/globals.hpp	Tue Oct 07 12:19:13 2014 -0700
@@ -2866,6 +2866,9 @@
   product(intx, MaxRecursiveInlineLevel, 1,                                 \
           "maximum number of nested recursive calls that are inlined")      \
                                                                             \
+  develop(intx, MaxForceInlineLevel, 100,                                   \
+          "maximum number of nested @ForceInline calls that are inlined")   \
+                                                                            \
   product_pd(intx, InlineSmallCode,                                         \
           "Only inline already compiled methods if their code size is "     \
           "less than this")                                                 \
--- a/src/share/vm/runtime/objectMonitor.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/objectMonitor.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -1612,33 +1612,25 @@
      // post monitor waited event. Note that this is past-tense, we are done waiting.
      if (JvmtiExport::should_post_monitor_waited()) {
        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
-     }
 
-     // Without the fix for 8028280, it is possible for the above call:
-     //
-     //   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
-     //
-     // to consume the unpark() that was done when the successor was set.
-     // The solution for this very rare possibility is to redo the unpark()
-     // outside of the JvmtiExport::should_post_monitor_waited() check.
-     //
-     if (node._notified != 0 && _succ == Self) {
-       // In this part of the monitor wait-notify-reenter protocol it
-       // is possible (and normal) for another thread to do a fastpath
-       // monitor enter-exit while this thread is still trying to get
-       // to the reenter portion of the protocol.
-       //
-       // The ObjectMonitor was notified and the current thread is
-       // the successor which also means that an unpark() has already
-       // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
-       // consume the unpark() that was done when the successor was
-       // set because the same ParkEvent is shared between Java
-       // monitors and JVM/TI RawMonitors (for now).
-       //
-       // We redo the unpark() to ensure forward progress, i.e., we
-       // don't want all pending threads hanging (parked) with none
-       // entering the unlocked monitor.
-       node._event->unpark();
+       if (node._notified != 0 && _succ == Self) {
+         // In this part of the monitor wait-notify-reenter protocol it
+         // is possible (and normal) for another thread to do a fastpath
+         // monitor enter-exit while this thread is still trying to get
+         // to the reenter portion of the protocol.
+         //
+         // The ObjectMonitor was notified and the current thread is
+         // the successor which also means that an unpark() has already
+         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+         // consume the unpark() that was done when the successor was
+         // set because the same ParkEvent is shared between Java
+         // monitors and JVM/TI RawMonitors (for now).
+         //
+         // We redo the unpark() to ensure forward progress, i.e., we
+         // don't want all pending threads hanging (parked) with none
+         // entering the unlocked monitor.
+         node._event->unpark();
+       }
      }
 
      if (event.should_commit()) {
--- a/src/share/vm/runtime/os.hpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/os.hpp	Tue Oct 07 12:19:13 2014 -0700
@@ -420,7 +420,10 @@
   static intx current_thread_id();
   static int current_process_id();
   static int sleep(Thread* thread, jlong ms, bool interruptable);
-  static int naked_sleep();
+  // Short standalone OS sleep suitable for slow path spin loop.
+  // Ignores Thread.interrupt() (so keep it short).
+  // ms = 0, will sleep for the least amount of time allowed by the OS.
+  static void naked_short_sleep(jlong ms);
   static void infinite_sleep(); // never returns, use with CAUTION
   static void yield();        // Yields to all threads with same priority
   enum YieldResult {
--- a/src/share/vm/runtime/park.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/park.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -59,58 +59,22 @@
 
   // Start by trying to recycle an existing but unassociated
   // ParkEvent from the global free list.
-  for (;;) {
-    ev = FreeList ;
-    if (ev == NULL) break ;
-    // 1: Detach - sequester or privatize the list
-    // Tantamount to ev = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
-       continue ;
+  // Using a spin lock since we are part of the mutex impl.
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
+  {
+    ev = FreeList;
+    if (ev != NULL) {
+      FreeList = ev->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    ParkEvent * List = ev->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        ParkEvent * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (ev != NULL) {
     guarantee (ev->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new ParkEvent.
-    // In rare cases an allocating thread might detach a long list --
-    // installing null into FreeList -- and then stall or be obstructed.
-    // A 2nd thread calling Allocate() would see FreeList == null.
-    // The list held privately by the 1st thread is unavailable to the 2nd thread.
-    // In that case the 2nd thread would have to materialize a new ParkEvent,
-    // even though free ParkEvents existed in the system.  In this case we end up
-    // with more ParkEvents in circulation than we need, but the race is
-    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
-    // is equal to the maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the freelist
-    // can be transiently inaccessible.  At worst we may end up with the
-    // # of ParkEvents in circulation slightly above the ideal.
-    // Note that if we didn't have the TSM/immortal constraint, then
-    // when reattaching, above, we could trim the list.
     ev = new ParkEvent () ;
     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
   }
@@ -124,13 +88,14 @@
   if (ev == NULL) return ;
   guarantee (ev->FreeNext == NULL      , "invariant") ;
   ev->AssociatedWith = NULL ;
-  for (;;) {
-    // Push ev onto FreeList
-    // The mechanism is "half" lock-free.
-    ParkEvent * List = FreeList ;
-    ev->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+  // Note that if we didn't have the TSM/immortal constraint, then
+  // when reattaching we could trim the list.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
+  {
+    ev->FreeNext = FreeList;
+    FreeList = ev;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
 // Override operator new and delete so we can ensure that the
@@ -164,56 +129,21 @@
 
   // Start by trying to recycle an existing but unassociated
   // Parker from the global free list.
-  for (;;) {
-    p = FreeList ;
-    if (p  == NULL) break ;
-    // 1: Detach
-    // Tantamount to p = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
-       continue ;
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
+  {
+    p = FreeList;
+    if (p != NULL) {
+      FreeList = p->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    Parker * List = p->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        Parker * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (p != NULL) {
     guarantee (p->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new Parker..
-    // In rare cases an allocating thread might detach
-    // a long list -- installing null into FreeList --and
-    // then stall.  Another thread calling Allocate() would see
-    // FreeList == null and then invoke the ctor.  In this case we
-    // end up with more Parkers in circulation than we need, but
-    // the race is rare and the outcome is benign.
-    // Ideally, the # of extant Parkers is equal to the
-    // maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the
-    // freelist can be transiently inaccessible.  At worst
-    // we may end up with the # of Parkers in circulation
-    // slightly above the ideal.
     p = new Parker() ;
   }
   p->AssociatedWith = t ;          // Associate p with t
@@ -227,11 +157,12 @@
   guarantee (p->AssociatedWith != NULL, "invariant") ;
   guarantee (p->FreeNext == NULL      , "invariant") ;
   p->AssociatedWith = NULL ;
-  for (;;) {
-    // Push p onto FreeList
-    Parker * List = FreeList ;
-    p->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
+  {
+    p->FreeNext = FreeList;
+    FreeList = p;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
--- a/src/share/vm/runtime/thread.cpp	Wed Sep 10 15:38:47 2014 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Oct 07 12:19:13 2014 -0700
@@ -4412,9 +4412,7 @@
         ++ctr ;
         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
            if (Yields > 5) {
-             // Consider using a simple NakedSleep() instead.
-             // Then SpinAcquire could be called by non-JVM threads
-             Thread::current()->_ParkEvent->park(1) ;
+             os::naked_short_sleep(1);
            } else {
              os::NakedYield() ;
              ++Yields ;
--- a/test/Makefile	Wed Sep 10 15:38:47 2014 -0700
+++ b/test/Makefile	Tue Oct 07 12:19:13 2014 -0700
@@ -173,8 +173,8 @@
   JTREG_TESTDIRS = $(TESTDIRS)
 endif
 
-# Default JTREG to run (win32 script works for everybody)
-JTREG = $(JT_HOME)/win32/bin/jtreg
+# Default JTREG to run
+JTREG = $(JT_HOME)/bin/jtreg
 
 # Option to tell jtreg to not run tests marked with "ignore"
 ifeq ($(PLATFORM), windows)