changeset 4430:10e0043bda08 jdk7u14-b16

Merge
author amurillo
date Tue, 12 Mar 2013 13:05:53 -0700
parents 0addb9ef7b4e (current diff) 53152f5f34c4 (diff)
children 55d0822d1370
files .hgtags
diffstat 17 files changed, 220 insertions(+), 179 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Mar 11 14:48:37 2013 -0700
+++ b/.hgtags	Tue Mar 12 13:05:53 2013 -0700
@@ -454,3 +454,4 @@
 e3d2c238e29c421c3b5c001e400acbfb30790cfc jdk7u14-b14
 860ae068f4dff62a77c8315f0335b7e935087e86 hs24-b34
 12619005c5e29be6e65f0dc9891ca19d9ffb1aaa jdk7u14-b15
+be21f8a4d42c03cafde4f616fd80ece791ba2f21 hs24-b35
--- a/make/hotspot_version	Mon Mar 11 14:48:37 2013 -0700
+++ b/make/hotspot_version	Tue Mar 12 13:05:53 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=34
+HS_BUILD_NUMBER=35
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/os/bsd/vm/os_bsd.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -5471,11 +5471,12 @@
      }
      -- _nParked ;
 
-    // In theory we could move the ST of 0 into _Event past the unlock(),
-    // but then we'd need a MEMBAR after the ST.
     _Event = 0 ;
      status = pthread_mutex_unlock(_mutex);
      assert_status(status == 0, status, "mutex_unlock");
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other.
+    OrderAccess::fence();
   }
   guarantee (_Event >= 0, "invariant") ;
 }
@@ -5538,40 +5539,44 @@
   status = pthread_mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock");
   assert (_nParked == 0, "invariant") ;
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other.
+  OrderAccess::fence();
   return ret;
 }
 
 void os::PlatformEvent::unpark() {
-  int v, AnyWaiters ;
-  for (;;) {
-      v = _Event ;
-      if (v > 0) {
-         // The LD of _Event could have reordered or be satisfied
-         // by a read-aside from this processor's write buffer.
-         // To avoid problems execute a barrier and then
-         // ratify the value.
-         OrderAccess::fence() ;
-         if (_Event == v) return ;
-         continue ;
-      }
-      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
-  }
-  if (v < 0) {
-     // Wait for the thread associated with the event to vacate
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     AnyWaiters = _nParked ;
-     assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
-     if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
-        AnyWaiters = 0 ;
-        pthread_cond_signal (_cond);
-     }
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
-     if (AnyWaiters != 0) {
-        status = pthread_cond_signal(_cond);
-        assert_status(status == 0, status, "cond_signal");
-     }
+  // Transitions for _Event:
+  //    0 :=> 1
+  //    1 :=> 1
+  //   -1 :=> either 0 or 1; must signal target thread
+  //          That is, we can safely transition _Event from -1 to either
+  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
+  //          unpark() calls.
+  // See also: "Semaphores in Plan 9" by Mullender & Cox
+  //
+  // Note: Forcing a transition from "-1" to "1" on an unpark() means
+  // that it will take two back-to-back park() calls for the owning
+  // thread to block. This has the benefit of forcing a spurious return
+  // from the first park() call after an unpark() call which will help
+  // shake out uses of park() and unpark() without condition variables.
+
+  if (Atomic::xchg(1, &_Event) >= 0) return;
+
+  // Wait for the thread associated with the event to vacate
+  int status = pthread_mutex_lock(_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+  int AnyWaiters = _nParked;
+  assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
+  if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
+    AnyWaiters = 0;
+    pthread_cond_signal(_cond);
+  }
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+  if (AnyWaiters != 0) {
+    status = pthread_cond_signal(_cond);
+    assert_status(status == 0, status, "cond_signal");
   }
 
   // Note that we signal() _after dropping the lock for "immortal" Events.
@@ -5657,13 +5662,14 @@
 }
 
 void Parker::park(bool isAbsolute, jlong time) {
+  // Ideally we'd do something useful while spinning, such
+  // as calling unpackTime().
+
   // Optional fast-path check:
   // Return immediately if a permit is available.
-  if (_counter > 0) {
-      _counter = 0 ;
-      OrderAccess::fence();
-      return ;
-  }
+  // We depend on Atomic::xchg() having full barrier semantics
+  // since we are doing a lock-free update to _counter.
+  if (Atomic::xchg(0, &_counter) > 0) return;
 
   Thread* thread = Thread::current();
   assert(thread->is_Java_thread(), "Must be JavaThread");
@@ -5704,6 +5710,8 @@
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other and Java-level accesses.
     OrderAccess::fence();
     return;
   }
@@ -5740,12 +5748,14 @@
   _counter = 0 ;
   status = pthread_mutex_unlock(_mutex) ;
   assert_status(status == 0, status, "invariant") ;
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other and Java-level accesses.
+  OrderAccess::fence();
+
   // If externally suspended while waiting, re-suspend
   if (jt->handle_special_suspend_equivalent_condition()) {
     jt->java_suspend_self();
   }
-
-  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/linux/vm/os_linux.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -5094,11 +5094,12 @@
      }
      -- _nParked ;
 
-    // In theory we could move the ST of 0 into _Event past the unlock(),
-    // but then we'd need a MEMBAR after the ST.
     _Event = 0 ;
      status = pthread_mutex_unlock(_mutex);
      assert_status(status == 0, status, "mutex_unlock");
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other.
+    OrderAccess::fence();
   }
   guarantee (_Event >= 0, "invariant") ;
 }
@@ -5161,40 +5162,44 @@
   status = pthread_mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock");
   assert (_nParked == 0, "invariant") ;
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other.
+  OrderAccess::fence();
   return ret;
 }
 
 void os::PlatformEvent::unpark() {
-  int v, AnyWaiters ;
-  for (;;) {
-      v = _Event ;
-      if (v > 0) {
-         // The LD of _Event could have reordered or be satisfied
-         // by a read-aside from this processor's write buffer.
-         // To avoid problems execute a barrier and then
-         // ratify the value.
-         OrderAccess::fence() ;
-         if (_Event == v) return ;
-         continue ;
-      }
-      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
-  }
-  if (v < 0) {
-     // Wait for the thread associated with the event to vacate
-     int status = pthread_mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     AnyWaiters = _nParked ;
-     assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
-     if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
-        AnyWaiters = 0 ;
-        pthread_cond_signal (_cond);
-     }
-     status = pthread_mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
-     if (AnyWaiters != 0) {
-        status = pthread_cond_signal(_cond);
-        assert_status(status == 0, status, "cond_signal");
-     }
+  // Transitions for _Event:
+  //    0 :=> 1
+  //    1 :=> 1
+  //   -1 :=> either 0 or 1; must signal target thread
+  //          That is, we can safely transition _Event from -1 to either
+  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
+  //          unpark() calls.
+  // See also: "Semaphores in Plan 9" by Mullender & Cox
+  //
+  // Note: Forcing a transition from "-1" to "1" on an unpark() means
+  // that it will take two back-to-back park() calls for the owning
+  // thread to block. This has the benefit of forcing a spurious return
+  // from the first park() call after an unpark() call which will help
+  // shake out uses of park() and unpark() without condition variables.
+
+  if (Atomic::xchg(1, &_Event) >= 0) return;
+
+  // Wait for the thread associated with the event to vacate
+  int status = pthread_mutex_lock(_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+  int AnyWaiters = _nParked;
+  assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
+  if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
+    AnyWaiters = 0;
+    pthread_cond_signal(_cond);
+  }
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+  if (AnyWaiters != 0) {
+    status = pthread_cond_signal(_cond);
+    assert_status(status == 0, status, "cond_signal");
   }
 
   // Note that we signal() _after dropping the lock for "immortal" Events.
@@ -5280,13 +5285,14 @@
 }
 
 void Parker::park(bool isAbsolute, jlong time) {
+  // Ideally we'd do something useful while spinning, such
+  // as calling unpackTime().
+
   // Optional fast-path check:
   // Return immediately if a permit is available.
-  if (_counter > 0) {
-      _counter = 0 ;
-      OrderAccess::fence();
-      return ;
-  }
+  // We depend on Atomic::xchg() having full barrier semantics
+  // since we are doing a lock-free update to _counter.
+  if (Atomic::xchg(0, &_counter) > 0) return;
 
   Thread* thread = Thread::current();
   assert(thread->is_Java_thread(), "Must be JavaThread");
@@ -5327,6 +5333,8 @@
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other and Java-level accesses.
     OrderAccess::fence();
     return;
   }
@@ -5363,12 +5371,14 @@
   _counter = 0 ;
   status = pthread_mutex_unlock(_mutex) ;
   assert_status(status == 0, status, "invariant") ;
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other and Java-level accesses.
+  OrderAccess::fence();
+
   // If externally suspended while waiting, re-suspend
   if (jt->handle_special_suspend_equivalent_condition()) {
     jt->java_suspend_self();
   }
-
-  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/solaris/vm/os_solaris.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -6135,6 +6135,9 @@
      _Event = 0 ;
      status = os::Solaris::mutex_unlock(_mutex);
      assert_status(status == 0, status, "mutex_unlock");
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other.
+    OrderAccess::fence();
   }
 }
 
@@ -6176,51 +6179,43 @@
   _Event = 0 ;
   status = os::Solaris::mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock");
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other.
+  OrderAccess::fence();
   return ret;
 }
 
 void os::PlatformEvent::unpark() {
-  int v, AnyWaiters;
-
-  // Increment _Event.
-  // Another acceptable implementation would be to simply swap 1
-  // into _Event:
-  //   if (Swap (&_Event, 1) < 0) {
-  //      mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
-  //      if (AnyWaiters) cond_signal (_cond) ;
-  //   }
-
-  for (;;) {
-    v = _Event ;
-    if (v > 0) {
-       // The LD of _Event could have reordered or be satisfied
-       // by a read-aside from this processor's write buffer.
-       // To avoid problems execute a barrier and then
-       // ratify the value.  A degenerate CAS() would also work.
-       // Viz., CAS (v+0, &_Event, v) == v).
-       OrderAccess::fence() ;
-       if (_Event == v) return ;
-       continue ;
-    }
-    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
-  }
+  // Transitions for _Event:
+  //    0 :=> 1
+  //    1 :=> 1
+  //   -1 :=> either 0 or 1; must signal target thread
+  //          That is, we can safely transition _Event from -1 to either
+  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
+  //          unpark() calls.
+  // See also: "Semaphores in Plan 9" by Mullender & Cox
+  //
+  // Note: Forcing a transition from "-1" to "1" on an unpark() means
+  // that it will take two back-to-back park() calls for the owning
+  // thread to block. This has the benefit of forcing a spurious return
+  // from the first park() call after an unpark() call which will help
+  // shake out uses of park() and unpark() without condition variables.
+
+  if (Atomic::xchg(1, &_Event) >= 0) return;
 
   // If the thread associated with the event was parked, wake it.
-  if (v < 0) {
-     int status ;
-     // Wait for the thread assoc with the PlatformEvent to vacate.
-     status = os::Solaris::mutex_lock(_mutex);
-     assert_status(status == 0, status, "mutex_lock");
-     AnyWaiters = _nParked ;
-     status = os::Solaris::mutex_unlock(_mutex);
-     assert_status(status == 0, status, "mutex_unlock");
-     guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
-     if (AnyWaiters != 0) {
-       // We intentional signal *after* dropping the lock
-       // to avoid a common class of futile wakeups.
-       status = os::Solaris::cond_signal(_cond);
-       assert_status(status == 0, status, "cond_signal");
-     }
+  // Wait for the thread assoc with the PlatformEvent to vacate.
+  int status = os::Solaris::mutex_lock(_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+  int AnyWaiters = _nParked;
+  status = os::Solaris::mutex_unlock(_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+  guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
+  if (AnyWaiters != 0) {
+    // We intentional signal *after* dropping the lock
+    // to avoid a common class of futile wakeups.
+    status = os::Solaris::cond_signal(_cond);
+    assert_status(status == 0, status, "cond_signal");
   }
 }
 
@@ -6298,14 +6293,14 @@
 }
 
 void Parker::park(bool isAbsolute, jlong time) {
+  // Ideally we'd do something useful while spinning, such
+  // as calling unpackTime().
 
   // Optional fast-path check:
   // Return immediately if a permit is available.
-  if (_counter > 0) {
-      _counter = 0 ;
-      OrderAccess::fence();
-      return ;
-  }
+  // We depend on Atomic::xchg() having full barrier semantics
+  // since we are doing a lock-free update to _counter.
+  if (Atomic::xchg(0, &_counter) > 0) return;
 
   // Optional fast-exit: Check interrupt before trying to wait
   Thread* thread = Thread::current();
@@ -6347,6 +6342,8 @@
     _counter = 0;
     status = os::Solaris::mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
+    // Paranoia to ensure our locked and lock-free paths interact
+    // correctly with each other and Java-level accesses.
     OrderAccess::fence();
     return;
   }
@@ -6388,12 +6385,14 @@
   _counter = 0 ;
   status = os::Solaris::mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock") ;
+  // Paranoia to ensure our locked and lock-free paths interact
+  // correctly with each other and Java-level accesses.
+  OrderAccess::fence();
 
   // If externally suspended while waiting, re-suspend
   if (jt->handle_special_suspend_equivalent_condition()) {
     jt->java_suspend_self();
   }
-  OrderAccess::fence();
 }
 
 void Parker::unpark() {
--- a/src/os/windows/vm/os_windows.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4638,6 +4638,7 @@
     }
     v = _Event ;
     _Event = 0 ;
+    // see comment at end of os::PlatformEvent::park() below:
     OrderAccess::fence() ;
     // If we encounter a nearly simultanous timeout expiry and unpark()
     // we return OS_OK indicating we awoke via unpark().
@@ -4675,25 +4676,25 @@
 
 void os::PlatformEvent::unpark() {
   guarantee (_ParkHandle != NULL, "Invariant") ;
-  int v ;
-  for (;;) {
-      v = _Event ;      // Increment _Event if it's < 1.
-      if (v > 0) {
-         // If it's already signaled just return.
-         // The LD of _Event could have reordered or be satisfied
-         // by a read-aside from this processor's write buffer.
-         // To avoid problems execute a barrier and then
-         // ratify the value.  A degenerate CAS() would also work.
-         // Viz., CAS (v+0, &_Event, v) == v).
-         OrderAccess::fence() ;
-         if (_Event == v) return ;
-         continue ;
-      }
-      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
-  }
-  if (v < 0) {
-     ::SetEvent (_ParkHandle) ;
-  }
+
+  // Transitions for _Event:
+  //    0 :=> 1
+  //    1 :=> 1
+  //   -1 :=> either 0 or 1; must signal target thread
+  //          That is, we can safely transition _Event from -1 to either
+  //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
+  //          unpark() calls.
+  // See also: "Semaphores in Plan 9" by Mullender & Cox
+  //
+  // Note: Forcing a transition from "-1" to "1" on an unpark() means
+  // that it will take two back-to-back park() calls for the owning
+  // thread to block. This has the benefit of forcing a spurious return
+  // from the first park() call after an unpark() call which will help
+  // shake out uses of park() and unpark() without condition variables.
+
+  if (Atomic::xchg(1, &_Event) >= 0) return;
+
+  ::SetEvent(_ParkHandle);
 }
 
 
--- a/src/share/vm/c1/c1_LIR.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -805,7 +805,7 @@
 
       // only visit register parameters
       int n = opJavaCall->_arguments->length();
-      for (int i = 0; i < n; i++) {
+      for (int i = opJavaCall->_receiver->is_valid() ? 1 : 0; i < n; i++) {
         if (!opJavaCall->_arguments->at(i)->is_pointer()) {
           do_input(*opJavaCall->_arguments->adr_at(i));
         }
--- a/src/share/vm/classfile/javaClasses.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -921,7 +921,6 @@
 // Write the thread status value to threadStatus field in java.lang.Thread java class.
 void java_lang_Thread::set_thread_status(oop java_thread,
                                          java_lang_Thread::ThreadStatus status) {
-  assert(JavaThread::current()->thread_state() == _thread_in_vm, "Java Thread is not running in vm");
   // The threadStatus is only present starting in 1.5
   if (_thread_status_offset > 0) {
     java_thread->int_field_put(_thread_status_offset, status);
--- a/src/share/vm/runtime/atomic.hpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/atomic.hpp	Tue Mar 12 13:05:53 2013 -0700
@@ -29,10 +29,17 @@
 
 class Atomic : AllStatic {
  public:
+  // Atomic operations on jlong types are not available on all 32-bit
+  // platforms. If atomic ops on jlongs are defined here they must only
+  // be used from code that verifies they are available at runtime and
+  // can provide an alternative action if not - see supports_cx8() for
+  // a means to test availability.
+
   // Atomically store to a location
   static void store    (jbyte    store_value, jbyte*    dest);
   static void store    (jshort   store_value, jshort*   dest);
   static void store    (jint     store_value, jint*     dest);
+  // See comment above about using jlong atomics on 32-bit platforms
   static void store    (jlong    store_value, jlong*    dest);
   static void store_ptr(intptr_t store_value, intptr_t* dest);
   static void store_ptr(void*    store_value, void*     dest);
@@ -40,17 +47,19 @@
   static void store    (jbyte    store_value, volatile jbyte*    dest);
   static void store    (jshort   store_value, volatile jshort*   dest);
   static void store    (jint     store_value, volatile jint*     dest);
+  // See comment above about using jlong atomics on 32-bit platforms
   static void store    (jlong    store_value, volatile jlong*    dest);
   static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
   static void store_ptr(void*    store_value, volatile void*     dest);
 
+  // See comment above about using jlong atomics on 32-bit platforms
   static jlong load(volatile jlong* src);
 
   // Atomically add to a location, return updated value
   static jint     add    (jint     add_value, volatile jint*     dest);
   static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
   static void*    add_ptr(intptr_t add_value, volatile void*     dest);
-
+  // See comment above about using jlong atomics on 32-bit platforms
   static jlong    add    (jlong    add_value, volatile jlong*    dest);
 
   // Atomically increment location
@@ -76,6 +85,7 @@
   // barrier across the cmpxchg.  I.e., it's really a 'fence_cmpxchg_acquire'.
   static jbyte    cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value);
   static jint     cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value);
+  // See comment above about using jlong atomics on 32-bit platforms
   static jlong    cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value);
 
   static unsigned int cmpxchg(unsigned int exchange_value,
--- a/src/share/vm/runtime/mutexLocker.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -281,8 +281,8 @@
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
 
   def(JfrMsg_lock                  , Monitor, nonleaf+2,   true);
-  def(JfrBuffer_lock               , Mutex,   nonleaf+3,   true);
-  def(JfrStream_lock               , Mutex,   nonleaf+4,   true);
+  def(JfrBuffer_lock               , Mutex,   nonleaf+4,   true);
+  def(JfrStream_lock               , Mutex,   nonleaf+5,   true);
   def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
 }
 
--- a/src/share/vm/runtime/objectMonitor.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/objectMonitor.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -669,8 +669,7 @@
     assert (_succ != Self, "invariant") ;
     if (_Responsible == Self) {
         _Responsible = NULL ;
-        // Dekker pivot-point.
-        // Consider OrderAccess::storeload() here
+        OrderAccess::fence(); // Dekker pivot-point
 
         // We may leave threads on cxq|EntryList without a designated
         // "Responsible" thread.  This is benign.  When this thread subsequently
@@ -690,10 +689,6 @@
         //
         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
         // exit operation from floating above the ST Responsible=null.
-        //
-        // In *practice* however, EnterI() is always followed by some atomic
-        // operation such as the decrement of _count in ::enter().  Those atomics
-        // obviate the need for the explicit MEMBAR, above.
     }
 
     // We've acquired ownership with CAS().
--- a/src/share/vm/runtime/objectMonitor.inline.hpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/objectMonitor.inline.hpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,10 +101,12 @@
   return _count;
 }
 
+// Do NOT set _count = 0. There is a race such that _count could
+// be set while inflating prior to setting _owner
+// Just use Atomic::inc/dec and assert 0 when monitor put on free list
 inline void ObjectMonitor::set_owner(void* owner) {
   _owner = owner;
   _recursions = 0;
-  _count = 0;
 }
 
 
--- a/src/share/vm/runtime/synchronizer.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/synchronizer.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -336,7 +336,9 @@
 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
   TEVENT (jni_exit) ;
   if (UseBiasedLocking) {
-    BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+    Handle h_obj(THREAD, obj);
+    BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
+    obj = h_obj();
   }
   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 
--- a/src/share/vm/runtime/thread.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/thread.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1482,7 +1482,7 @@
   } else {
     _jni_attach_state = _not_attaching_via_jni;
   }
-  assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
+  assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
   _safepoint_visible = false;
 }
 
@@ -1883,9 +1883,16 @@
     JvmtiExport::cleanup_thread(this);
   }
 
+  // We must flush any deferred card marks before removing a thread from
+  // the list of active threads.
+  Universe::heap()->flush_deferred_store_barrier(this);
+  assert(deferred_card_mark().is_empty(), "Should have been flushed");
+
 #ifndef SERIALGC
-  // We must flush G1-related buffers before removing a thread from
-  // the list of active threads.
+  // We must flush the G1-related buffers before removing a thread
+  // from the list of active threads. We must do this after any deferred
+  // card marks have been flushed (above) so that any entries that are
+  // added to the thread's dirty card queue as a result are not lost.
   if (UseG1GC) {
     flush_barrier_queues();
   }
--- a/src/share/vm/runtime/vmThread.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -598,7 +598,11 @@
   if (!t->is_VM_thread()) {
     SkipGCALot sgcalot(t);    // avoid re-entrant attempts to gc-a-lot
     // JavaThread or WatcherThread
-    t->check_for_valid_safepoint_state(true);
+    bool concurrent = op->evaluate_concurrently();
+    // only blocking VM operations need to verify the caller's safepoint state:
+    if (!concurrent) {
+      t->check_for_valid_safepoint_state(true);
+    }
 
     // New request from Java thread, evaluate prologue
     if (!op->doit_prologue()) {
@@ -610,7 +614,6 @@
 
     // It does not make sense to execute the epilogue, if the VM operation object is getting
     // deallocated by the VM thread.
-    bool concurrent     = op->evaluate_concurrently();
     bool execute_epilog = !op->is_cheap_allocated();
     assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
 
--- a/src/share/vm/utilities/ostream.cpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Tue Mar 12 13:05:53 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -433,7 +433,7 @@
 
 rotatingFileStream::rotatingFileStream(const char* file_name) {
   _cur_file_num = 0;
-  _bytes_writen = 0L;
+  _bytes_written = 0L;
   _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
   jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
   _file = fopen(_file_name, "w");
@@ -442,7 +442,7 @@
 
 rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
   _cur_file_num = 0;
-  _bytes_writen = 0L;
+  _bytes_written = 0L;
   _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
   jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
   _file = fopen(_file_name, opentype);
@@ -450,10 +450,9 @@
 }
 
 void rotatingFileStream::write(const char* s, size_t len) {
-  if (_file != NULL)  {
-    // Make an unused local variable to avoid warning from gcc 4.x compiler.
+  if (_file != NULL) {
     size_t count = fwrite(s, 1, len, _file);
-    Atomic::add((jlong)count, &_bytes_writen);
+    _bytes_written += count;
   }
   update_position(s, len);
 }
@@ -467,7 +466,10 @@
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
 void rotatingFileStream::rotate_log() {
-  if (_bytes_writen < (jlong)GCLogFileSize) return;
+  if (_bytes_written < (jlong)GCLogFileSize) {
+    return;
+  }
+
 #ifdef ASSERT
   Thread *thread = Thread::current();
   assert(thread == NULL ||
@@ -477,7 +479,7 @@
   if (NumberOfGCLogFiles == 1) {
     // rotate in same file
     rewind();
-    _bytes_writen = 0L;
+    _bytes_written = 0L;
     return;
   }
 
@@ -493,7 +495,7 @@
   }
   _file = fopen(_file_name, "w");
   if (_file != NULL) {
-    _bytes_writen = 0L;
+    _bytes_written = 0L;
     _need_close = true;
   } else {
     tty->print_cr("failed to open rotation log file %s due to %s\n",
--- a/src/share/vm/utilities/ostream.hpp	Mon Mar 11 14:48:37 2013 -0700
+++ b/src/share/vm/utilities/ostream.hpp	Tue Mar 12 13:05:53 2013 -0700
@@ -231,7 +231,7 @@
 class rotatingFileStream : public fileStream {
  protected:
   char*  _file_name;
-  jlong  _bytes_writen;
+  jlong  _bytes_written;
   uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
  public:
   rotatingFileStream(const char* file_name);