changeset 6004:4722cfd15c83 icedtea-2.6pre09

Merge 2014/10/14
author andrew
date Thu, 16 Oct 2014 19:18:18 +0100
parents d24203ca6ebd (current diff) 735b6a7be7dc (diff)
children b231464d6734
files .hgtags make/hotspot_version src/os/bsd/vm/os_bsd.cpp src/os/linux/vm/os_linux.cpp src/share/vm/adlc/formssel.cpp src/share/vm/c1/c1_globals.hpp src/share/vm/classfile/javaClasses.cpp src/share/vm/gc_implementation/g1/g1AllocRegion.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/interpreter/linkResolver.cpp src/share/vm/opto/callGenerator.cpp src/share/vm/opto/graphKit.cpp src/share/vm/opto/library_call.cpp src/share/vm/opto/reg_split.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/jvm.h src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/objectMonitor.cpp src/share/vm/runtime/os.hpp src/share/vm/runtime/thread.cpp
diffstat 56 files changed, 1406 insertions(+), 288 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Oct 01 14:51:36 2014 +0100
+++ b/.hgtags	Thu Oct 16 19:18:18 2014 +0100
@@ -648,6 +648,9 @@
 8175599864880938d68d0a515fa561043d7d5fd0 jdk7u55-b31
 ba9270b8fb1f4852ff1d9dab15571eb9e0714495 jdk7u55-b32
 0901a8cf66a0494b55bf104c9666d4e3c6ff93f0 jdk7u55-b33
+278d7e230b297a4632b94ddc07d591e74736e039 jdk7u55-b34
+db88943dba0b7672a09e22974934022fbe8ba8dd jdk7u55-b35
+b3e388601b0fc0922b311e2cc68b9417cedd16ef jdk7u55-b36
 ae4adc1492d1c90a70bd2d139a939fc0c8329be9 jdk7u60-b00
 af1fc2868a2b919727bfbb0858449bd991bbee4a jdk7u40-b60
 cc83359f5e5eb46dd9176b0a272390b1a0a51fdc hs24.60-b01
@@ -680,6 +683,10 @@
 617a6338e0c4f7699eed5061d7e8f576c3ace029 jdk7u60-b18
 4a9635c98a917cfcef506ca5d034c733a33c53f3 jdk7u65-b01
 361493c7cdb5f75b28efc63389d6cebaaaa43a2c jdk7u60-b19
+13f561930b3e80a94e2baddc51dfc6c43c5ca601 jdk7u60-b30
+35b2dbe7f7c69ea0f2feb1e66fe8651511a5fb6d jdk7u60-b31
+f166d2e391993f1b12b4ad1685baf999c78e6372 jdk7u60-b32
+cc1fea28c886ef100632247a708eac0c83640914 jdk7u60-b33
 eb797fab50d3b440b17b3e7c5d83f42bfa73655e jdk7u65-b02
 bb00df28ecdbd0da89ab4ed81f6f2b732fa512da jdk7u65-b03
 848481af9003067546c7f34c166bb8d745b95d5f jdk7u65-b04
@@ -700,6 +707,52 @@
 d006213be74730453cf5c3ce31f1d1d505334419 jdk7u65-b18
 1d8226b3e9896656451801393eb3ae394faeb638 jdk7u65-b19
 c43b0b843f897a4d8cf0a3566b017b87230dd3b4 jdk7u65-b32
+d3c9265e12fa115052f18d1e3d379143b56bbf63 jdk7u65-b20
+39776d90970221dd260187acb4c37631e41a66a9 jdk7u67-b01
+1d8226b3e9896656451801393eb3ae394faeb638 jdk7u65-b40
+cf8b3a090e597e59177c5f67d44cdec12309777f jdk7u65-b31
+df855c3f4d31dd7db081d68e3054518380127893 jdk7u65-b33
+6b37a189944aaa09e81d97d394496464d16bee42 jdk7u66-b00
+121dc94194d9234e2b13c867d875e23e1bdd6abd jdk7u66-b01
+f28ea516eb0b9e99f1e342954ab4642456af4da1 jdk7u66-b09
+3dc6ae1972a45ba563518cc0e51f09885258f69d jdk7u66-b10
+8d2b3f7d5b3001d019832476d684679ca6be0c8d jdk7u66-b11
+5ee19b64ef208daaef91f063d800aa162427f8f6 jdk7u66-b12
+a1e6f9c4c1f47be1b0edef6bd92399f8f07b7d15 jdk7u66-b13
+b44baba406f2de6eeccc57dbfae653cf124b527b jdk7u66-b14
+d20b495c96d3f8899a64657aba0fc72799773cb3 jdk7u66-b15
+3bbfed065c601187449d319fd70bba6ae1ebb707 jdk7u66-b16
+4abb71ff14b2e6cf932e5c61900f480d5e1afedb jdk7u66-b17
+4ceb9c03fe8ee6b93d22854780ef8c737edd14b2 jdk7u71-b00
+f95d6d32e08006209f1798f82b60d7d05767a3e8 jdk7u71-b01
+1c760efe2d0795f4ce8260ec655b8870bfd77ca1 jdk7u71-b02
+0cb0b5abd0b5aa25fc8bd5920c8d61c5b85a10c6 jdk7u71-b03
+a491e5e52998c23502ebb1340955e3e726d44ad6 jdk7u71-b04
+c93efe6377ffd7484c50cba9a88a37bebf525114 jdk7u71-b05
+f95fa655cc119659686ba68c7242497fd209f9e1 jdk7u71-b06
+7f32b65fde34db41bf951ed81374240840ef88f4 jdk7u71-b07
+4e17bd4fb2304d068023d9d805e86d6b592d4230 jdk7u71-b08
+1ffc702334d960aa4015e5cc6f4fb9e971952b54 jdk7u71-b09
+9a17c184bcb99f13dc6ab714ad98976410429637 jdk7u71-b10
+d6cb97651f0bd8d61f4d22aa7550145bbe6fb051 jdk7u71-b11
+959b4e5d2e3111920c198187f3bc66eba3e457f1 jdk7u71-b12
+608f470d22689bab17bab0ea1dbee3e1a0802d5b jdk7u71-b13
+ad909197a1ce2df483a20ff9ac380382f779a9d3 jdk7u71-b14
+1bd3adac3aac3c29c81303812b35f484ff90cb2b jdk7u72-b01
+0caed46767e35c00eff69b22acf984d98eb66b3d jdk7u72-b02
+3a2934191de4bb8ca9d2faca93f3381e521e8cac jdk7u72-b03
+e4708cde2898df4c936595aacb57bc5b4e15869a jdk7u72-b04
+137e0859cd296cb8d9f9e327112ddc793ed59318 jdk7u72-b05
+4d9d227d70f33b70461230172386217317954312 jdk7u72-b06
+ece56f93f37b41b9c8875e54fbd8010277f6b460 jdk7u72-b07
+439c695a7aa03652ab92681120434b9ce8cdd2b7 jdk7u72-b08
+a27f16d45457a68a723acca621cb11bc173a0eb6 jdk7u72-b09
+e6508ab77271d1d3ce7b5f60d91a7334fdacb03a jdk7u72-b10
+c17a8487086433e14cd22373039a8b6b48e7cbb8 jdk7u72-b11
+a9e695f0d831f115720a4dcad3d33e0003b0acad jdk7u72-b12
+ac701f87d1ea46033c69f3e1cb84fc0a971da70c jdk7u72-b13
+d9b56c6bdddb6f9d8242230f5fdd58f9c7d30ea5 jdk7u72-b14
+a6ae698522bfab3c595a4f8c2c3ee7e8939eb1bb jdk7u72-b30
 b92f390febd01615af4a736b4f830f6052aa1d09 hs24.80-b00
 1448ebfef4f1aae0174eca983ad05507730ca6fd hs24.80-b01
 b1d29549dca7e36a4d050af5a54f8f56963a5c7d hs24.80-b02
@@ -718,3 +771,4 @@
 8ffb87775f56ed5c602f320d2513351298ee4778 icedtea-2.6pre07
 b517477362d1b0d4f9b567c82db85136fd14bc6e icedtea-2.6pre06
 6d5ec408f4cac2c2004bf6120403df1b18051a21 icedtea-2.6pre08
+bad107a5d096b070355c5a2d80aa50bc5576144b jdk7u80-b02
--- a/make/bsd/makefiles/mapfile-vers-debug	Wed Oct 01 14:51:36 2014 +0100
+++ b/make/bsd/makefiles/mapfile-vers-debug	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,7 @@
                 _JVM_EnableCompiler
                 _JVM_Exit
                 _JVM_FillInStackTrace
+                _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
                 _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
--- a/make/bsd/makefiles/mapfile-vers-product	Wed Oct 01 14:51:36 2014 +0100
+++ b/make/bsd/makefiles/mapfile-vers-product	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,7 @@
                 _JVM_EnableCompiler
                 _JVM_Exit
                 _JVM_FillInStackTrace
+                _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
                 _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
--- a/make/linux/makefiles/mapfile-vers-debug	Wed Oct 01 14:51:36 2014 +0100
+++ b/make/linux/makefiles/mapfile-vers-debug	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
--- a/make/linux/makefiles/mapfile-vers-product	Wed Oct 01 14:51:36 2014 +0100
+++ b/make/linux/makefiles/mapfile-vers-product	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,7 @@
                 JVM_EnableCompiler;
                 JVM_Exit;
                 JVM_FillInStackTrace;
+                JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
                 JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
--- a/make/solaris/makefiles/mapfile-vers	Wed Oct 01 14:51:36 2014 +0100
+++ b/make/solaris/makefiles/mapfile-vers	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,7 @@
 		JVM_EnableCompiler;
 		JVM_Exit;
 		JVM_FillInStackTrace;
+		JVM_FindClassFromCaller;
 		JVM_FindClassFromClass;
 		JVM_FindClassFromClassLoader;
 		JVM_FindClassFromBootLoader;
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -614,8 +614,8 @@
     // Save the regs and make space for a C call
     __ save(SP, -96, SP);
     __ save_all_globals_into_locals();
-    BLOCK_COMMENT("call os::naked_sleep");
-    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
+    BLOCK_COMMENT("call os::naked_short_sleep");
+    __ call(CAST_FROM_FN_PTR(address, os::naked_short_sleep));
     __ delayed()->nop();
     __ restore_globals_from_locals();
     __ restore();
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -98,6 +98,27 @@
   _supports_cx8 = has_v9();
   _supports_atomic_getset4 = true; // swap instruction
 
+  // There are Fujitsu Sparc64 CPUs which support blk_init as well so
+  // we have to take this check out of the 'is_niagara()' block below.
+  if (has_blk_init()) {
+    // When using CMS or G1, we cannot use memset() in BOT updates
+    // because the sun4v/CMT version in libc_psr uses BIS which
+    // exposes "phantom zeros" to concurrent readers. See 6948537.
+    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
+      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
+    }
+    // Issue a stern warning if the user has explicitly set
+    // UseMemSetInBOT (it is known to cause issues), but allow
+    // use for experimentation and debugging.
+    if (UseConcMarkSweepGC || UseG1GC) {
+      if (UseMemSetInBOT) {
+        assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
+        warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
+                " on sun4v; please understand that you are using at your own risk!");
+      }
+    }
+  }
+
   if (is_niagara()) {
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseInlineCaches)) {
@@ -107,12 +128,6 @@
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
-    // When using CMS or G1, we cannot use memset() in BOT updates
-    // because the sun4v/CMT version in libc_psr uses BIS which
-    // exposes "phantom zeros" to concurrent readers. See 6948537.
-    if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) {
-      FLAG_SET_DEFAULT(UseMemSetInBOT, false);
-    }
 #ifdef _LP64
     // 32-bit oops don't make sense for the 64-bit VM on sparc
     // since the 32-bit VM has the same registers and smaller objects.
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -96,7 +96,13 @@
   static bool is_M_family(int features) { return (features & M_family_m) != 0; }
   static bool is_T_family(int features) { return (features & T_family_m) != 0; }
   static bool is_niagara() { return is_T_family(_features); }
-  DEBUG_ONLY( static bool is_niagara(int features)  { return (features & sun4v_m) != 0; } )
+#ifdef ASSERT
+  static bool is_niagara(int features)  {
+    // 'sun4v_m' may be defined on both Sun/Oracle Sparc CPUs as well as
+    // on Fujitsu Sparc64 CPUs, but only Sun/Oracle Sparcs can be 'niagaras'.
+    return (features & sun4v_m) != 0 && (features & sparc64_family_m) == 0;
+  }
+#endif
 
   // Returns true if it is niagara1 (T1).
   static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -7641,10 +7641,12 @@
   // if fast computation is not possible, result is NaN. Requires
   // fallback from user of this macro.
   // increase precision for intermediate steps of the computation
+  BLOCK_COMMENT("fast_pow {");
   increase_precision();
   fyl2x();                 // Stack: (Y*log2(X)) ...
   pow_exp_core_encoding(); // Stack: exp(X) ...
   restore_precision();
+  BLOCK_COMMENT("} fast_pow");
 }
 
 void MacroAssembler::fast_exp() {
--- a/src/os/bsd/dtrace/libjvm_db.c	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/bsd/dtrace/libjvm_db.c	Thu Oct 16 19:18:18 2014 +0100
@@ -261,6 +261,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -3711,9 +3711,21 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/linux/vm/os_linux.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -3944,9 +3944,33 @@
   }
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+//
+// Short sleep, direct OS call.
+//
+// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
+// sched_yield(2) will actually give up the CPU:
+//
+//   * Alone on this pariticular CPU, keeps running.
+//   * Before the introduction of "skip_buddy" with "compat_yield" disabled
+//     (pre 2.6.39).
+//
+// So calling this with 0 is an alternative.
+//
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/solaris/dtrace/libjvm_db.c	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/solaris/dtrace/libjvm_db.c	Thu Oct 16 19:18:18 2014 +0100
@@ -261,6 +261,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/solaris/vm/os_solaris.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -3742,9 +3742,14 @@
   return os_sleep(millis, interruptible);
 }
 
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os_sleep(1, false);
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+
+  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
+  // Solaris requires -lrt for this.
+  usleep((ms * 1000));
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
--- a/src/os/windows/vm/os_windows.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -3421,6 +3421,16 @@
   return result;
 }
 
+//
+// Short sleep, direct OS call.
+//
+// ms = 0, means allow others (if any) to run.
+//
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  Sleep(ms);
+}
+
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
--- a/src/share/vm/adlc/formssel.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/adlc/formssel.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -565,12 +565,6 @@
     attr = (Attribute *)attr->_next;
   }
 
-  // Ugly: until a better fix is implemented, disable rematerialization for
-  // negD nodes because they are proved to be problematic.
-  if (is_ideal_negD()) {
-    return false;
-  }
-
   // Constants
   if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
     rematerialize = true;
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -3663,6 +3663,7 @@
 
   // now perform tests that are based on flag settings
   if (callee->force_inline()) {
+    if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
     print_inlining(callee, "force inline by annotation");
   } else if (callee->should_inline()) {
     print_inlining(callee, "force inline by CompileOracle");
--- a/src/share/vm/classfile/classFileParser.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -2721,6 +2721,11 @@
       "bootstrap_method_index %u has bad constant type in class file %s",
       bootstrap_method_index,
       CHECK);
+
+    guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(),
+      "Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s",
+      CHECK);
+
     operands->short_at_put(operand_fill_index++, bootstrap_method_index);
     operands->short_at_put(operand_fill_index++, argument_count);
 
@@ -2738,7 +2743,6 @@
   }
 
   assert(operand_fill_index == operands()->length(), "exact fill");
-  assert(constantPoolOopDesc::operand_array_length(operands()) == attribute_array_length, "correct decode");
 
   u1* current_end = cfs->current();
   guarantee_property(current_end == current_start + attribute_byte_length,
@@ -3743,7 +3747,7 @@
     }
 
     // Allocate mirror and initialize static fields
-    java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle));
+    java_lang_Class::create_mirror(this_klass, class_loader, CHECK_(nullHandle));
 
     ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()),
                                              false /* not shared class */);
--- a/src/share/vm/classfile/javaClasses.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -533,10 +533,10 @@
       }
     }
   }
-  create_mirror(k, CHECK);
+  create_mirror(k, Handle(NULL), CHECK);
 }
 
-oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
+oop java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, TRAPS) {
   assert(k->java_mirror() == NULL, "should only assign mirror once");
   // Use this moment of initialization to cache modifier_flags also,
   // to support Class.getModifiers().  Instance classes recalculate
@@ -577,6 +577,8 @@
       // Initialize static fields
       instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
     }
+    // set the classLoader field in the java_lang_Class instance
+    set_class_loader(mirror(), class_loader());
     return mirror();
   } else {
     return NULL;
@@ -602,6 +604,18 @@
   java_class->int_field_put(_static_oop_field_count_offset, size);
 }
 
+void java_lang_Class::set_class_loader(oop java_class, oop loader) {
+  // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet.
+  if (_class_loader_offset != 0) {
+    java_class->obj_field_put(_class_loader_offset, loader);
+  }
+}
+
+oop java_lang_Class::class_loader(oop java_class) {
+  assert(_class_loader_offset != 0, "must be set");
+  return java_class->obj_field(_class_loader_offset);
+}
+
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
@@ -765,6 +779,12 @@
   compute_optional_offset(classRedefinedCount_offset,
                           klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
 
+  // Needs to be optional because the old build runs Queens during bootstrapping
+  // and jdk8-9 doesn't have coordinated pushes yet.
+  compute_optional_offset(_class_loader_offset,
+                 klass_oop, vmSymbols::classClassLoader_name(),
+                 vmSymbols::classloader_signature());
+
   CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
 }
 
@@ -2833,6 +2853,7 @@
 int java_lang_Class::_resolved_constructor_offset;
 int java_lang_Class::_oop_size_offset;
 int java_lang_Class::_static_oop_field_count_offset;
+int java_lang_Class::_class_loader_offset;
 int java_lang_Throwable::backtrace_offset;
 int java_lang_Throwable::detailMessage_offset;
 int java_lang_Throwable::cause_offset;
@@ -3280,3 +3301,4 @@
   JavaClasses::check_offsets();
   FilteredFieldsMap::initialize();  // must be done after computing offsets.
 }
+
--- a/src/share/vm/classfile/javaClasses.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -224,14 +224,21 @@
   static int _oop_size_offset;
   static int _static_oop_field_count_offset;
 
+  static int _class_loader_offset;
+
   static bool offsets_computed;
   static int classRedefinedCount_offset;
 
+  static void set_class_loader(oop java_class, oop class_loader);
  public:
   static void compute_offsets();
 
   // Instance creation
-  static oop  create_mirror(KlassHandle k, TRAPS);
+  static oop  create_mirror(KlassHandle k, Handle class_loader, TRAPS);
+  static oop  create_mirror(KlassHandle k, TRAPS) {
+    return create_mirror(k, Handle(), THREAD);
+  }
+
   static void fixup_mirror(KlassHandle k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
   // Conversion
@@ -267,6 +274,8 @@
   static int classRedefinedCount(oop the_class_mirror);
   static void set_classRedefinedCount(oop the_class_mirror, int value);
 
+  static oop class_loader(oop java_class);
+
   static int oop_size(oop java_class);
   static void set_oop_size(oop java_class, int size);
   static int static_oop_field_count(oop java_class);
--- a/src/share/vm/classfile/stackMapFrame.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/stackMapFrame.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,15 +54,19 @@
   return frame;
 }
 
-bool StackMapFrame::has_new_object() const {
+// Return true if frame has an uninitialized (new) object that differs
+// from the target frame's object.
+bool StackMapFrame::has_nonmatching_new_object(const StackMapFrame *target_frame) const {
   int32_t i;
   for (i = 0; i < _max_locals; i++) {
-    if (_locals[i].is_uninitialized()) {
+    if (_locals[i].is_uninitialized() &&
+        !_locals[i].equals(target_frame->_locals[i])) {
       return true;
     }
   }
   for (i = 0; i < _stack_size; i++) {
-    if (_stack[i].is_uninitialized()) {
+    if (_stack[i].is_uninitialized() &&
+        !_stack[i].equals(target_frame->_stack[i])) {
       return true;
     }
   }
--- a/src/share/vm/classfile/stackMapFrame.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/stackMapFrame.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,8 +155,9 @@
     const methodHandle m, VerificationType thisKlass, TRAPS);
 
   // Search local variable type array and stack type array.
-  // Return true if an uninitialized object is found.
-  bool has_new_object() const;
+  // Return true if an uninitialized object is found that is
+  // not equal to the corresponding object on the target frame.
+  bool has_nonmatching_new_object(const StackMapFrame *target_frame) const;
 
   // Search local variable type array and stack type array.
   // Set every element with type of old_object to new_object.
--- a/src/share/vm/classfile/stackMapTable.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/stackMapTable.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -138,7 +138,10 @@
 
 void StackMapTable::check_new_object(
     const StackMapFrame* frame, int32_t target, TRAPS) const {
-  if (frame->offset() > target && frame->has_new_object()) {
+  int frame_index = get_index_from_offset(target);
+  assert(frame_index >= 0 && frame_index < _frame_count, "bad frame index");
+  if (frame->offset() > target &&
+      frame->has_nonmatching_new_object(_frame_array[frame_index])) {
     frame->verifier()->verify_error(
         ErrorContext::bad_code(frame->offset()),
         "Uninitialized object exists on backward branch %d", target);
--- a/src/share/vm/classfile/verifier.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/verifier.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -2222,6 +2222,181 @@
   }
 }
 
+// Look at the method's handlers.  If the bci is in the handler's try block
+// then check if the handler_pc is already on the stack.  If not, push it.
+void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
+                                  GrowableArray<u4>* handler_stack,
+                                  u4 bci) {
+  int exlength = exhandlers->length();
+  for(int x = 0; x < exlength; x++) {
+    if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
+      handler_stack->append_if_missing(exhandlers->handler_pc(x));
+    }
+  }
+}
+
+// Return TRUE if all code paths starting with start_bc_offset end in
+// bytecode athrow or loop.
+bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
+  ResourceMark rm;
+  // Create bytecode stream.
+  RawBytecodeStream bcs(method());
+  u4 code_length = method()->code_size();
+  bcs.set_start(start_bc_offset);
+  u4 target;
+  // Create stack for storing bytecode start offsets for if* and *switch.
+  GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
+  // Create stack for handlers for try blocks containing this handler.
+  GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
+  // Create list of visited branch opcodes (goto* and if*).
+  GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
+  ExceptionTable exhandlers(_method());
+
+  while (true) {
+    if (bcs.is_last_bytecode()) {
+      // if no more starting offsets to parse or if at the end of the
+      // method then return false.
+      if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
+        return false;
+      // Pop a bytecode starting offset and scan from there.
+      bcs.set_start(bci_stack->pop());
+    }
+    Bytecodes::Code opcode = bcs.raw_next();
+    u4 bci = bcs.bci();
+
+    // If the bytecode is in a TRY block, push its handlers so they
+    // will get parsed.
+    push_handlers(&exhandlers, handler_stack, bci);
+
+    switch (opcode) {
+      case Bytecodes::_if_icmpeq:
+      case Bytecodes::_if_icmpne:
+      case Bytecodes::_if_icmplt:
+      case Bytecodes::_if_icmpge:
+      case Bytecodes::_if_icmpgt:
+      case Bytecodes::_if_icmple:
+      case Bytecodes::_ifeq:
+      case Bytecodes::_ifne:
+      case Bytecodes::_iflt:
+      case Bytecodes::_ifge:
+      case Bytecodes::_ifgt:
+      case Bytecodes::_ifle:
+      case Bytecodes::_if_acmpeq:
+      case Bytecodes::_if_acmpne:
+      case Bytecodes::_ifnull:
+      case Bytecodes::_ifnonnull:
+        target = bcs.dest();
+        if (visited_branches->contains(bci)) {
+          if (bci_stack->is_empty()) return true;
+          // Pop a bytecode starting offset and scan from there.
+          bcs.set_start(bci_stack->pop());
+        } else {
+          if (target > bci) { // forward branch
+            if (target >= code_length) return false;
+            // Push the branch target onto the stack.
+            bci_stack->push(target);
+            // then, scan bytecodes starting with next.
+            bcs.set_start(bcs.next_bci());
+          } else { // backward branch
+            // Push bytecode offset following backward branch onto the stack.
+            bci_stack->push(bcs.next_bci());
+            // Check bytecodes starting with branch target.
+            bcs.set_start(target);
+          }
+          // Record target so we don't branch here again.
+          visited_branches->append(bci);
+        }
+        break;
+
+      case Bytecodes::_goto:
+      case Bytecodes::_goto_w:
+        target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
+        if (visited_branches->contains(bci)) {
+          if (bci_stack->is_empty()) return true;
+          // Been here before, pop new starting offset from stack.
+          bcs.set_start(bci_stack->pop());
+        } else {
+          if (target >= code_length) return false;
+          // Continue scanning from the target onward.
+          bcs.set_start(target);
+          // Record target so we don't branch here again.
+          visited_branches->append(bci);
+        }
+        break;
+
+      // Check that all switch alternatives end in 'athrow' bytecodes. Since it
+      // is  difficult to determine where each switch alternative ends, parse
+      // each switch alternative until either hit a 'return', 'athrow', or reach
+      // the end of the method's bytecodes.  This is gross but should be okay
+      // because:
+      // 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
+      //    constructor invocations should be rare.
+      // 2. if each switch alternative ends in an athrow then the parsing should be
+      //    short.  If there is no athrow then it is bogus code, anyway.
+      case Bytecodes::_lookupswitch:
+      case Bytecodes::_tableswitch:
+        {
+          address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
+          u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
+          int keys, delta;
+          if (opcode == Bytecodes::_tableswitch) {
+            jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
+            jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
+            // This is invalid, but let the regular bytecode verifier
+            // report this because the user will get a better error message.
+            if (low > high) return true;
+            keys = high - low + 1;
+            delta = 1;
+          } else {
+            keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
+            delta = 2;
+          }
+          // Invalid, let the regular bytecode verifier deal with it.
+          if (keys < 0) return true;
+
+          // Push the offset of the next bytecode onto the stack.
+          bci_stack->push(bcs.next_bci());
+
+          // Push the switch alternatives onto the stack.
+          for (int i = 0; i < keys; i++) {
+            u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
+            if (target > code_length) return false;
+            bci_stack->push(target);
+          }
+
+          // Start bytecode parsing for the switch at the default alternative.
+          if (default_offset > code_length) return false;
+          bcs.set_start(default_offset);
+          break;
+        }
+
+      case Bytecodes::_return:
+        return false;
+
+      case Bytecodes::_athrow:
+        {
+          if (bci_stack->is_empty()) {
+            if (handler_stack->is_empty()) {
+              return true;
+            } else {
+              // Parse the catch handlers for try blocks containing athrow.
+              bcs.set_start(handler_stack->pop());
+            }
+          } else {
+            // Pop a bytecode offset and starting scanning from there.
+            bcs.set_start(bci_stack->pop());
+          }
+        }
+        break;
+
+      default:
+        ;
+    } // end switch
+  } // end while loop
+
+  return false;
+}
+
 void ClassVerifier::verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
     StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
@@ -2241,18 +2416,26 @@
       return;
     }
 
-    // Make sure that this call is not done from within a TRY block because
-    // that can result in returning an incomplete object.  Simply checking
-    // (bci >= start_pc) also ensures that this call is not done after a TRY
-    // block.  That is also illegal because this call must be the first Java
-    // statement in the constructor.
+    // Check if this call is done from inside of a TRY block.  If so, make
+    // sure that all catch clause paths end in a throw.  Otherwise, this
+    // can result in returning an incomplete object.
     ExceptionTable exhandlers(_method());
     int exlength = exhandlers.length();
     for(int i = 0; i < exlength; i++) {
-      if (bci >= exhandlers.start_pc(i)) {
-        verify_error(ErrorContext::bad_code(bci),
-                     "Bad <init> method call from after the start of a try block");
-        return;
+      u2 start_pc = exhandlers.start_pc(i);
+      u2 end_pc = exhandlers.end_pc(i);
+
+      if (bci >= start_pc && bci < end_pc) {
+        if (!ends_in_athrow(exhandlers.handler_pc(i))) {
+          verify_error(ErrorContext::bad_code(bci),
+            "Bad <init> method call from after the start of a try block");
+          return;
+        } else if (VerboseVerification) {
+          ResourceMark rm;
+          tty->print_cr(
+            "Survived call to ends_in_athrow(): %s",
+                        current_class()->name()->as_C_string());
+        }
       }
     }
 
--- a/src/share/vm/classfile/verifier.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/verifier.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -30,6 +30,7 @@
 #include "oops/klass.hpp"
 #include "oops/methodOop.hpp"
 #include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
 #include "utilities/exceptions.hpp"
 
 // The verifier class
@@ -301,6 +302,16 @@
     StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
     constantPoolHandle cp, TRAPS);
 
+  // Used by ends_in_athrow() to push all handlers that contain bci onto
+  // the handler_stack, if the handler is not already on the stack.
+  void push_handlers(ExceptionTable* exhandlers,
+                     GrowableArray<u4>* handler_stack,
+                     u4 bci);
+
+  // Returns true if all paths starting with start_bc_offset end in athrow
+  // bytecode or loop.
+  bool ends_in_athrow(u4 start_bc_offset);
+
   void verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
     bool* this_uninit, VerificationType return_type,
--- a/src/share/vm/classfile/vmSymbols.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -546,6 +546,7 @@
   template(serializePropertiesToByteArray_signature,   "()[B")                                                    \
   template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
+  template(classClassLoader_name,                      "classLoader")                                             \
                                                                                                                   \
   /* trace signatures */                                                                                          \
   TRACE_TEMPLATES(template)                                                                                       \
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -177,7 +177,7 @@
 
   // Should be called when we want to release the active region which
   // is returned after it's been retired.
-  HeapRegion* release();
+  virtual HeapRegion* release();
 
 #if G1_ALLOC_REGION_TRACING
   void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -6628,6 +6628,35 @@
   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
                                GCAllocForTenured);
 }
+
+HeapRegion* OldGCAllocRegion::release() {
+  HeapRegion* cur = get();
+  if (cur != NULL) {
+    // Determine how far we are from the next card boundary. If it is smaller than
+    // the minimum object size we can allocate into, expand into the next card.
+    HeapWord* top = cur->top();
+    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+    if (to_allocate_words != 0) {
+      // We are not at a card boundary. Fill up, possibly into the next, taking the
+      // end of the region and the minimum object size into account.
+      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+      // Skip allocation if there is not enough space to allocate even the smallest
+      // possible object. In this case this region will not be retained, so the
+      // original problem cannot occur.
+      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+        CollectedHeap::fill_with_object(dummy, to_allocate_words);
+      }
+    }
+  }
+  return G1AllocRegion::release();
+}
+
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -183,6 +183,13 @@
 public:
   OldGCAllocRegion()
   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+  // This specialization of release() makes sure that the last card that has been
+  // allocated into has been completely filled by a dummy object.
+  // This avoids races when remembered set scanning wants to update the BOT of the
+  // last card in the retained old gc alloc region, and allocation threads
+  // allocating into that card at the same time.
+  virtual HeapRegion* release();
 };
 
 // The G1 STW is alive closure.
--- a/src/share/vm/interpreter/linkResolver.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,14 @@
 
 void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   methodOop result_oop = klass->uncached_lookup_method(name, signature);
+
+  //JDK 7 does not support default methods, but this code ported from JDK8 to keep code consistent for all JDK.
+  if (klass->oop_is_array()) {
+    // Only consider klass and super klass for arrays
+    result = methodHandle(THREAD, result_oop);
+    return;
+  }
+
   if (EnableInvokeDynamic && result_oop != NULL) {
     vmIntrinsics::ID iid = result_oop->intrinsic_id();
     if (MethodHandles::is_signature_polymorphic(iid)) {
@@ -424,7 +432,7 @@
   // 2. lookup method in resolved klass and its super klasses
   lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
-  if (resolved_method.is_null()) { // not found in the class hierarchy
+  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy
     // 3. lookup method in all the interfaces implemented by the resolved klass
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
@@ -437,16 +445,16 @@
         CLEAR_PENDING_EXCEPTION;
       }
     }
+  }
 
-    if (resolved_method.is_null()) {
-      // 4. method lookup failed
-      ResourceMark rm(THREAD);
-      THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
-                      methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
-                                                              method_name,
-                                                              method_signature),
-                      nested_exception);
-    }
+  if (resolved_method.is_null()) {
+    // 4. method lookup failed
+    ResourceMark rm(THREAD);
+    THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
+                    methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
+                                                            method_name,
+                                                            method_signature),
+                    nested_exception);
   }
 
   // 5. check if method is concrete
@@ -517,17 +525,18 @@
   // lookup method in this interface or its super, java.lang.Object
   lookup_instance_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
 
-  if (resolved_method.is_null()) {
+  if (resolved_method.is_null() && !resolved_klass->oop_is_array()) {
     // lookup method in all the super-interfaces
     lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
-    if (resolved_method.is_null()) {
-      // no method found
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
-                methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
-                                                        method_name,
-                                                        method_signature));
-    }
+  }
+
+  if (resolved_method.is_null()) {
+    // no method found
+    ResourceMark rm(THREAD);
+    THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
+              methodOopDesc::name_and_sig_as_C_string(Klass::cast(resolved_klass()),
+                                                      method_name,
+                                                      method_signature));
   }
 
   if (check_access) {
@@ -617,7 +626,7 @@
 
   // Resolve instance field
   fieldDescriptor fd; // find_field initializes fd if found
-  KlassHandle sel_klass(THREAD, instanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
+  KlassHandle sel_klass(THREAD, resolved_klass->find_field(field, sig, &fd));
   // check if field exists; i.e., if a klass containing the field def has been selected
   if (sel_klass.is_null()){
     ResourceMark rm(THREAD);
--- a/src/share/vm/oops/arrayKlass.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,13 @@
   return NULL;
 }
 
+// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+klassOop arrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+  // There are no fields in an array klass but look to the super class (Object)
+  assert(super(), "super klass must be present");
+  return Klass::cast(super())->find_field(name, sig, fd);
+}
+
 methodOop arrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
   // There are no methods in an array klass but the super class (Object) has some
   assert(super(), "super klass must be present");
--- a/src/share/vm/oops/arrayKlass.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 #include "oops/klassOop.hpp"
 #include "oops/klassVtable.hpp"
 
+class fieldDescriptor;
+
 // arrayKlass is the abstract baseclass for all array classes
 
 class arrayKlass: public Klass {
@@ -83,6 +85,9 @@
   virtual oop multi_allocate(int rank, jint* sizes, TRAPS);
   objArrayOop allocate_arrayArray(int n, int length, TRAPS);
 
+  // find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
+  klassOop find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
+
   // Lookup operations
   methodOop uncached_lookup_method(Symbol* name, Symbol* signature) const;
 
--- a/src/share/vm/oops/klass.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/oops/klass.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -119,6 +119,15 @@
   return is_subclass_of(k);
 }
 
+klassOop Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
+#ifdef ASSERT
+  tty->print_cr("Error: find_field called on a klass oop."
+                " Likely error: reflection method does not correctly"
+                " wrap return value in a mirror object.");
+#endif
+  ShouldNotReachHere();
+  return NULL;
+}
 
 methodOop Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
 #ifdef ASSERT
--- a/src/share/vm/oops/klass.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/oops/klass.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,6 +88,7 @@
 class klassVtable;
 class KlassHandle;
 class OrderAccess;
+class fieldDescriptor;
 
 // Holder (or cage) for the C++ vtable of each kind of Klass.
 // We want to tightly constrain the location of the C++ vtable in the overall layout.
@@ -514,6 +515,7 @@
   virtual void initialize(TRAPS);
   // lookup operation for MethodLookupCache
   friend class MethodLookupCache;
+  virtual klassOop find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
   virtual methodOop uncached_lookup_method(Symbol* name, Symbol* signature) const;
  public:
   methodOop lookup_method(Symbol* name, Symbol* signature) const {
--- a/src/share/vm/oops/objArrayOop.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/oops/objArrayOop.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -45,9 +45,10 @@
 private:
   // Give size of objArrayOop in HeapWords minus the header
   static int array_size(int length) {
-    const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+    const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
     assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
            "Else the following (new) computation would be in error");
+    uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
 #ifdef ASSERT
     // The old code is left in for sanity-checking; it'll
     // go away pretty soon. XXX
@@ -55,16 +56,15 @@
     // oop->length() * HeapWordsPerOop;
     // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
     // The oop elements are aligned up to wordSize
-    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
-    int old_res;
+    const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
+    uint old_res;
     if (HeapWordsPerOop > 0) {
       old_res = length * HeapWordsPerOop;
     } else {
-      old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+      old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
     }
+    assert(res == old_res, "Inconsistency between old and new.");
 #endif  // ASSERT
-    int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
-    assert(res == old_res, "Inconsistency between old and new.");
     return res;
   }
 
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -364,6 +364,13 @@
     set_msg("not an accessor");
     return false;
   }
+
+  // Limit inlining depth in case inlining is forced or
+  // _max_inline_level was increased to compensate for lambda forms.
+  if (inline_level() > MaxForceInlineLevel) {
+    set_msg("MaxForceInlineLevel");
+    return false;
+  }
   if (inline_level() > _max_inline_level) {
     if (!callee_method->force_inline() || !IncrementalInline) {
       set_msg("inlining too deep");
--- a/src/share/vm/opto/callGenerator.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/callGenerator.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -742,7 +742,7 @@
         guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
         const int vtable_index = methodOopDesc::invalid_vtable_index;
         CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
-        assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
       }
@@ -808,7 +808,7 @@
         }
 
         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
-        assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
         if (cg != NULL && cg->is_inline())
           return cg;
       }
--- a/src/share/vm/opto/graphKit.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -2750,17 +2750,23 @@
   }
 
   Node* cast_obj = NULL;
-  if (data != NULL &&
-      // Counter has never been decremented (due to cast failure).
-      // ...This is a reasonable thing to expect.  It is true of
-      // all casts inserted by javac to implement generic types.
-      data->as_CounterData()->count() >= 0) {
-    cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
-    if (cast_obj != NULL) {
-      if (failure_control != NULL) // failure is now impossible
-        (*failure_control) = top();
-      // adjust the type of the phi to the exact klass:
-      phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+  if (tk->klass_is_exact()) {
+    // The following optimization tries to statically cast the speculative type of the object
+    // (for example obtained during profiling) to the type of the superklass and then do a
+    // dynamic check that the type of the object is what we expect. To work correctly
+    // for checkcast and aastore the type of superklass should be exact.
+    if (data != NULL &&
+        // Counter has never been decremented (due to cast failure).
+        // ...This is a reasonable thing to expect.  It is true of
+        // all casts inserted by javac to implement generic types.
+        data->as_CounterData()->count() >= 0) {
+      cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
+      if (cast_obj != NULL) {
+        if (failure_control != NULL) // failure is now impossible
+          (*failure_control) = top();
+        // adjust the type of the phi to the exact klass:
+        phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+      }
     }
   }
 
--- a/src/share/vm/opto/ifnode.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/ifnode.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -503,7 +503,7 @@
   jint  off = 0;
   if (l->is_top()) {
     return 0;
-  } else if (l->is_Add()) {
+  } else if (l->Opcode() == Op_AddI) {
     if ((off = l->in(1)->find_int_con(0)) != 0) {
       ind = l->in(2);
     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
--- a/src/share/vm/opto/library_call.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/library_call.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -201,7 +201,7 @@
   bool inline_math(vmIntrinsics::ID id);
   bool inline_exp();
   bool inline_pow();
-  void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
+  Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
   bool inline_min_max(vmIntrinsics::ID id);
   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
@@ -1580,7 +1580,7 @@
   return true;
 }
 
-void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
+Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
   //-------------------
   //result=(result.isNaN())? funcAddr():result;
   // Check: If isNaN() by checking result!=result? then either trap
@@ -1596,7 +1596,7 @@
       uncommon_trap(Deoptimization::Reason_intrinsic,
                     Deoptimization::Action_make_not_entrant);
     }
-    set_result(result);
+    return result;
   } else {
     // If this inlining ever returned NaN in the past, we compile a call
     // to the runtime to properly handle corner cases
@@ -1626,9 +1626,10 @@
 
       result_region->init_req(2, control());
       result_val->init_req(2, value);
-      set_result(result_region, result_val);
+      set_control(_gvn.transform(result_region));
+      return _gvn.transform(result_val);
     } else {
-      set_result(result);
+      return result;
     }
   }
 }
@@ -1640,7 +1641,8 @@
   Node* arg = round_double_node(argument(0));
   Node* n   = _gvn.transform(new (C) ExpDNode(C, control(), arg));
 
-  finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+  n = finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+  set_result(n);
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
   return true;
@@ -1650,27 +1652,48 @@
 // Inline power instructions, if possible.
 bool LibraryCallKit::inline_pow() {
   // Pseudocode for pow
-  // if (x <= 0.0) {
-  //   long longy = (long)y;
-  //   if ((double)longy == y) { // if y is long
-  //     if (y + 1 == y) longy = 0; // huge number: even
-  //     result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
+  // if (y == 2) {
+  //   return x * x;
+  // } else {
+  //   if (x <= 0.0) {
+  //     long longy = (long)y;
+  //     if ((double)longy == y) { // if y is long
+  //       if (y + 1 == y) longy = 0; // huge number: even
+  //       result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
+  //     } else {
+  //       result = NaN;
+  //     }
   //   } else {
-  //     result = NaN;
+  //     result = DPow(x,y);
   //   }
-  // } else {
-  //   result = DPow(x,y);
+  //   if (result != result)?  {
+  //     result = uncommon_trap() or runtime_call();
+  //   }
+  //   return result;
   // }
-  // if (result != result)?  {
-  //   result = uncommon_trap() or runtime_call();
-  // }
-  // return result;
 
   Node* x = round_double_node(argument(0));
   Node* y = round_double_node(argument(2));
 
   Node* result = NULL;
 
+  Node*   const_two_node = makecon(TypeD::make(2.0));
+  Node*   cmp_node       = _gvn.transform(new (C) CmpDNode(y, const_two_node));
+  Node*   bool_node      = _gvn.transform(new (C) BoolNode(cmp_node, BoolTest::eq));
+  IfNode* if_node        = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
+  Node*   if_true        = _gvn.transform(new (C) IfTrueNode(if_node));
+  Node*   if_false       = _gvn.transform(new (C) IfFalseNode(if_node));
+
+  RegionNode* region_node = new (C) RegionNode(3);
+  region_node->init_req(1, if_true);
+
+  Node* phi_node = new (C) PhiNode(region_node, Type::DOUBLE);
+  // special case for x^y where y == 2, we can convert it to x * x
+  phi_node->init_req(1, _gvn.transform(new (C) MulDNode(x, x)));
+
+  // set control to if_false since we will now process the false branch
+  set_control(if_false);
+
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
     // Short form: skip the fancy tests and just check for NaN result.
     result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
@@ -1794,7 +1817,15 @@
     result = _gvn.transform(phi);
   }
 
-  finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+  result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+
+  // control from finish_pow_exp is now input to the region node
+  region_node->set_req(2, control());
+  // the result from finish_pow_exp is now input to the phi node
+  phi_node->init_req(2, result);
+  set_control(_gvn.transform(region_node));
+  record_for_igvn(region_node);
+  set_result(_gvn.transform(phi_node));
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
   return true;
--- a/src/share/vm/opto/loopopts.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/loopopts.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -2754,11 +2754,11 @@
       // Hit!  Refactor use to use the post-incremented tripcounter.
       // Compute a post-increment tripcounter.
       Node *opaq = new (C) Opaque2Node( C, cle->incr() );
-      register_new_node( opaq, u_ctrl );
+      register_new_node(opaq, exit);
       Node *neg_stride = _igvn.intcon(-cle->stride_con());
       set_ctrl(neg_stride, C->root());
       Node *post = new (C) AddINode( opaq, neg_stride);
-      register_new_node( post, u_ctrl );
+      register_new_node(post, exit);
       _igvn.rehash_node_delayed(use);
       for (uint j = 1; j < use->req(); j++) {
         if (use->in(j) == phi)
--- a/src/share/vm/opto/reg_split.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/opto/reg_split.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -51,15 +51,6 @@
 
 static const char out_of_nodes[] = "out of nodes during split";
 
-static bool contains_no_live_range_input(const Node* def) {
-  for (uint i = 1; i < def->req(); ++i) {
-    if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
-      return false;
-    }
-  }
-  return true;
-}
-
 //------------------------------get_spillcopy_wide-----------------------------
 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
@@ -326,7 +317,6 @@
   if( def->req() > 1 ) {
     for( uint i = 1; i < def->req(); i++ ) {
       Node *in = def->in(i);
-      // Check for single-def (LRG cannot redefined)
       uint lidx = n2lidx(in);
 
       // On PPC we see rematerialized nodes that have a live-range
@@ -337,10 +327,13 @@
       // count.
 #if defined(PPC64) 
       if( lidx >= _maxlrg ) return def;
-#else
-      if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy
 #endif
-      if (lrgs(lidx).is_singledef()) continue;
+      // We do not need this for live ranges that are only defined once.
+      // However, this is not true for spill copies that are added in this
+      // Split() pass, since they might get coalesced later on in this pass.
+      if (lidx < _maxlrg && lrgs(lidx).is_singledef()) {
+         continue;
+       }
 
       Block *b_def = _cfg._bbs[def->_idx];
       int idx_def = b_def->find_node(def);
@@ -1314,7 +1307,7 @@
       Node *def = Reaches[pidx][slidx];
       assert( def, "must have reaching def" );
       // If input up/down sense and reg-pressure DISagree
-      if (def->rematerialize() && contains_no_live_range_input(def)) {
+      if (def->rematerialize()) {
         // Place the rematerialized node above any MSCs created during
         // phi node splitting.  end_idx points at the insertion point
         // so look at the node before it.
--- a/src/share/vm/prims/jvm.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/prims/jvm.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -755,6 +755,7 @@
   return (jclass) JNIHandles::make_local(env, Klass::cast(k)->java_mirror());
 JVM_END
 
+// Not used; JVM_FindClassFromCaller replaces this.
 JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
                                                jboolean init, jobject loader,
                                                jboolean throwError))
@@ -781,6 +782,42 @@
   return result;
 JVM_END
 
+// Find a class with this name in this loader, using the caller's protection domain.
+JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
+                                          jboolean init, jobject loader,
+                                          jclass caller))
+  JVMWrapper2("JVM_FindClassFromCaller %s throws ClassNotFoundException", name);
+  // Java libraries should ensure that name is never null...
+  if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
+    // It's impossible to create this class;  the name cannot fit
+    // into the constant pool.
+    THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
+  }
+
+  TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
+
+  oop loader_oop = JNIHandles::resolve(loader);
+  oop from_class = JNIHandles::resolve(caller);
+  oop protection_domain = NULL;
+  // If loader is null, shouldn't call ClassLoader.checkPackageAccess; otherwise get
+  // NPE. Put it in another way, the bootstrap class loader has all permission and
+  // thus no checkPackageAccess equivalence in the VM class loader.
+  // The caller is also passed as NULL by the java code if there is no security
+  // manager to avoid the performance cost of getting the calling class.
+  if (from_class != NULL && loader_oop != NULL) {
+    protection_domain = instanceKlass::cast(java_lang_Class::as_klassOop(from_class))->protection_domain();
+  }
+
+  Handle h_loader(THREAD, loader_oop);
+  Handle h_prot(THREAD, protection_domain);
+  jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
+                                               h_prot, false, THREAD);
+
+  if (TraceClassResolution && result != NULL) {
+    trace_class_resolution(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(result)));
+  }
+  return result;
+JVM_END
 
 JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
                                          jboolean init, jclass from))
@@ -4107,10 +4144,15 @@
 
 // Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
 
-jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {
+jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init,
+                                    Handle loader, Handle protection_domain,
+                                    jboolean throwError, TRAPS) {
   // Security Note:
   //   The Java level wrapper will perform the necessary security check allowing
-  //   us to pass the NULL as the initiating class loader.
+  //   us to pass the NULL as the initiating class loader.  The VM is responsible for
+  //   the checkPackageAccess relative to the initiating class loader via the
+  //   protection_domain. The protection_domain is passed as NULL by the java code
+  //   if there is no security manager in 3-arg Class.forName().
   klassOop klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL);
 
   KlassHandle klass_handle(THREAD, klass);
--- a/src/share/vm/prims/jvm.h	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/prims/jvm.h	Thu Oct 16 19:18:18 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -418,6 +418,19 @@
 JVM_FindClassFromBootLoader(JNIEnv *env, const char *name);
 
 /*
+ * Find a class from a given class loader.  Throws ClassNotFoundException.
+ *  name:   name of class
+ *  init:   whether initialization is done
+ *  loader: class loader to look up the class. This may not be the same as the caller's
+ *          class loader.
+ *  caller: initiating class. The initiating class may be null when a security
+ *          manager is not installed.
+ */
+JNIEXPORT jclass JNICALL
+JVM_FindClassFromCaller(JNIEnv *env, const char *name, jboolean init,
+                        jobject loader, jclass caller);
+
+/*
  * Find a class from a given class.
  */
 JNIEXPORT jclass JNICALL
--- a/src/share/vm/runtime/arguments.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -2082,19 +2082,9 @@
                                      1, 100, "TLABWasteTargetPercent");
 
   status = status && verify_object_alignment();
-
-#ifdef SPARC
-  if (UseConcMarkSweepGC || UseG1GC) {
-    // Issue a stern warning if the user has explicitly set
-    // UseMemSetInBOT (it is known to cause issues), but allow
-    // use for experimentation and debugging.
-    if (VM_Version::is_sun4v() && UseMemSetInBOT) {
-      assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error");
-      warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
-          " on sun4v; please understand that you are using at your own risk!");
-    }
-  }
-#endif // SPARC
+#ifdef COMPILER1
+  status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize");
+#endif
 
   // check native memory tracking flags
   if (PrintNMTStatistics && MemTracker::tracking_level() == MemTracker::NMT_off) {
@@ -2102,6 +2092,10 @@
     PrintNMTStatistics = false;
   }
 
+#ifdef COMPILER1
+  status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset");
+#endif
+
   return status;
 }
 
--- a/src/share/vm/runtime/globals.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/globals.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -1108,9 +1108,11 @@
           "Prevent spurious or premature wakeups from object.wait "         \
           "(Solaris only)")                                                 \
                                                                             \
-  product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
-  product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
-  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" )                  \
+  experimental(intx, NativeMonitorTimeout, -1, "(Unstable)")                \
+                                                                            \
+  experimental(intx, NativeMonitorFlags, 0, "(Unstable)")                   \
+                                                                            \
+  experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)")              \
                                                                             \
   develop(bool, UsePthreads, false,                                         \
           "Use pthread-based instead of libthread-based synchronization "   \
@@ -2883,6 +2885,9 @@
   product(intx, MaxRecursiveInlineLevel, 1,                                 \
           "maximum number of nested recursive calls that are inlined")      \
                                                                             \
+  develop(intx, MaxForceInlineLevel, 100,                                   \
+          "maximum number of nested @ForceInline calls that are inlined")   \
+                                                                            \
   product_pd(intx, InlineSmallCode,                                         \
           "Only inline already compiled methods if their code size is "     \
           "less than this")                                                 \
--- a/src/share/vm/runtime/objectMonitor.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/objectMonitor.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -417,6 +417,15 @@
       jt->java_suspend_self();
     }
     Self->set_current_pending_monitor(NULL);
+
+    // We cleared the pending monitor info since we've just gotten past
+    // the enter-check-for-suspend dance and we now own the monitor free
+    // and clear, i.e., it is no longer pending. The ThreadBlockInVM
+    // destructor can go to a safepoint at the end of this block. If we
+    // do a thread dump during that safepoint, then this thread will show
+    // as having "-locked" the monitor, but the OS and java.lang.Thread
+    // states will still report that the thread is blocked trying to
+    // acquire it.
   }
 
   Atomic::dec_ptr(&_count);
@@ -1599,33 +1608,25 @@
      // post monitor waited event. Note that this is past-tense, we are done waiting.
      if (JvmtiExport::should_post_monitor_waited()) {
        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
-     }
 
-     // Without the fix for 8028280, it is possible for the above call:
-     //
-     //   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
-     //
-     // to consume the unpark() that was done when the successor was set.
-     // The solution for this very rare possibility is to redo the unpark()
-     // outside of the JvmtiExport::should_post_monitor_waited() check.
-     //
-     if (node._notified != 0 && _succ == Self) {
-       // In this part of the monitor wait-notify-reenter protocol it
-       // is possible (and normal) for another thread to do a fastpath
-       // monitor enter-exit while this thread is still trying to get
-       // to the reenter portion of the protocol.
-       //
-       // The ObjectMonitor was notified and the current thread is
-       // the successor which also means that an unpark() has already
-       // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
-       // consume the unpark() that was done when the successor was
-       // set because the same ParkEvent is shared between Java
-       // monitors and JVM/TI RawMonitors (for now).
-       //
-       // We redo the unpark() to ensure forward progress, i.e., we
-       // don't want all pending threads hanging (parked) with none
-       // entering the unlocked monitor.
-       node._event->unpark();
+       if (node._notified != 0 && _succ == Self) {
+         // In this part of the monitor wait-notify-reenter protocol it
+         // is possible (and normal) for another thread to do a fastpath
+         // monitor enter-exit while this thread is still trying to get
+         // to the reenter portion of the protocol.
+         //
+         // The ObjectMonitor was notified and the current thread is
+         // the successor which also means that an unpark() has already
+         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+         // consume the unpark() that was done when the successor was
+         // set because the same ParkEvent is shared between Java
+         // monitors and JVM/TI RawMonitors (for now).
+         //
+         // We redo the unpark() to ensure forward progress, i.e., we
+         // don't want all pending threads hanging (parked) with none
+         // entering the unlocked monitor.
+         node._event->unpark();
+       }
      }
 
      if (event.should_commit()) {
--- a/src/share/vm/runtime/os.hpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/os.hpp	Thu Oct 16 19:18:18 2014 +0100
@@ -424,7 +424,10 @@
   static intx current_thread_id();
   static int current_process_id();
   static int sleep(Thread* thread, jlong ms, bool interruptable);
-  static int naked_sleep();
+  // Short standalone OS sleep suitable for slow path spin loop.
+  // Ignores Thread.interrupt() (so keep it short).
+  // ms = 0, will sleep for the least amount of time allowed by the OS.
+  static void naked_short_sleep(jlong ms);
   static void infinite_sleep(); // never returns, use with CAUTION
   static void yield();        // Yields to all threads with same priority
   enum YieldResult {
--- a/src/share/vm/runtime/park.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/park.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -59,58 +59,22 @@
 
   // Start by trying to recycle an existing but unassociated
   // ParkEvent from the global free list.
-  for (;;) {
-    ev = FreeList ;
-    if (ev == NULL) break ;
-    // 1: Detach - sequester or privatize the list
-    // Tantamount to ev = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
-       continue ;
+  // Using a spin lock since we are part of the mutex impl.
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
+  {
+    ev = FreeList;
+    if (ev != NULL) {
+      FreeList = ev->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    ParkEvent * List = ev->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        ParkEvent * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (ev != NULL) {
     guarantee (ev->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new ParkEvent.
-    // In rare cases an allocating thread might detach a long list --
-    // installing null into FreeList -- and then stall or be obstructed.
-    // A 2nd thread calling Allocate() would see FreeList == null.
-    // The list held privately by the 1st thread is unavailable to the 2nd thread.
-    // In that case the 2nd thread would have to materialize a new ParkEvent,
-    // even though free ParkEvents existed in the system.  In this case we end up
-    // with more ParkEvents in circulation than we need, but the race is
-    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
-    // is equal to the maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the freelist
-    // can be transiently inaccessible.  At worst we may end up with the
-    // # of ParkEvents in circulation slightly above the ideal.
-    // Note that if we didn't have the TSM/immortal constraint, then
-    // when reattaching, above, we could trim the list.
     ev = new ParkEvent () ;
     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
   }
@@ -124,13 +88,14 @@
   if (ev == NULL) return ;
   guarantee (ev->FreeNext == NULL      , "invariant") ;
   ev->AssociatedWith = NULL ;
-  for (;;) {
-    // Push ev onto FreeList
-    // The mechanism is "half" lock-free.
-    ParkEvent * List = FreeList ;
-    ev->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+  // Note that if we didn't have the TSM/immortal constraint, then
+  // when reattaching we could trim the list.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
+  {
+    ev->FreeNext = FreeList;
+    FreeList = ev;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
 // Override operator new and delete so we can ensure that the
@@ -164,56 +129,21 @@
 
   // Start by trying to recycle an existing but unassociated
   // Parker from the global free list.
-  for (;;) {
-    p = FreeList ;
-    if (p  == NULL) break ;
-    // 1: Detach
-    // Tantamount to p = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
-       continue ;
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
+  {
+    p = FreeList;
+    if (p != NULL) {
+      FreeList = p->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    Parker * List = p->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        Parker * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (p != NULL) {
     guarantee (p->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new Parker..
-    // In rare cases an allocating thread might detach
-    // a long list -- installing null into FreeList --and
-    // then stall.  Another thread calling Allocate() would see
-    // FreeList == null and then invoke the ctor.  In this case we
-    // end up with more Parkers in circulation than we need, but
-    // the race is rare and the outcome is benign.
-    // Ideally, the # of extant Parkers is equal to the
-    // maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the
-    // freelist can be transiently inaccessible.  At worst
-    // we may end up with the # of Parkers in circulation
-    // slightly above the ideal.
     p = new Parker() ;
   }
   p->AssociatedWith = t ;          // Associate p with t
@@ -227,11 +157,12 @@
   guarantee (p->AssociatedWith != NULL, "invariant") ;
   guarantee (p->FreeNext == NULL      , "invariant") ;
   p->AssociatedWith = NULL ;
-  for (;;) {
-    // Push p onto FreeList
-    Parker * List = FreeList ;
-    p->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
+  {
+    p->FreeNext = FreeList;
+    FreeList = p;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
--- a/src/share/vm/runtime/thread.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/thread.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -4419,9 +4419,7 @@
         ++ctr ;
         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
            if (Yields > 5) {
-             // Consider using a simple NakedSleep() instead.
-             // Then SpinAcquire could be called by non-JVM threads
-             Thread::current()->_ParkEvent->park(1) ;
+             os::naked_short_sleep(1);
            } else {
              os::NakedYield() ;
              ++Yields ;
--- a/src/share/vm/runtime/vframe.cpp	Wed Oct 01 14:51:36 2014 +0100
+++ b/src/share/vm/runtime/vframe.cpp	Thu Oct 16 19:18:18 2014 +0100
@@ -197,6 +197,7 @@
         continue;
       }
       if (monitor->owner() != NULL) {
+        // the monitor is associated with an object, i.e., it is locked
 
         // First, assume we have the monitor locked. If we haven't found an
         // owned monitor before and this is the first frame, then we need to
@@ -207,7 +208,11 @@
         if (!found_first_monitor && frame_count == 0) {
           markOop mark = monitor->owner()->mark();
           if (mark->has_monitor() &&
-              mark->monitor() == thread()->current_pending_monitor()) {
+              ( // we have marked ourself as pending on this monitor
+                mark->monitor() == thread()->current_pending_monitor() ||
+                // we are not the owner of this monitor
+                !mark->monitor()->is_entered(thread())
+              )) {
             lock_state = "waiting to lock";
           }
         }
--- a/test/Makefile	Wed Oct 01 14:51:36 2014 +0100
+++ b/test/Makefile	Thu Oct 16 19:18:18 2014 +0100
@@ -173,8 +173,8 @@
   JTREG_TESTDIRS = $(TESTDIRS)
 endif
 
-# Default JTREG to run (win32 script works for everybody)
-JTREG = $(JT_HOME)/win32/bin/jtreg
+# Default JTREG to run
+JTREG = $(JT_HOME)/bin/jtreg
 
 # Option to tell jtreg to not run tests marked with "ignore"
 ifeq ($(PLATFORM), windows)
--- a/test/compiler/7141637/SpreadNullArg.java	Wed Oct 01 14:51:36 2014 +0100
+++ b/test/compiler/7141637/SpreadNullArg.java	Thu Oct 16 19:18:18 2014 +0100
@@ -46,13 +46,17 @@
       mh_spread_target =
         MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg);
       result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null);
-    } catch(NullPointerException e) {
-      // Expected exception - do nothing!
-    } catch(Throwable e) {
+      throw new Error("Expected IllegalArgumentException was not thrown");
+    } catch (IllegalArgumentException e) {
+      System.out.println("Expected exception : " + e);
+    } catch (Throwable e) {
       throw new Error(e);
     }
 
-    if (result != 42) throw new Error("Expected NullPointerException was not thrown");
+    if (result != 42) {
+      throw new Error("result [" + result
+        + "] != 42 : Expected IllegalArgumentException was not thrown?");
+    }
   }
 
   public static int target_spread_arg(Integer i1) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/loopopts/TestLogSum.java	Thu Oct 16 19:18:18 2014 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8046516
+ * @summary Segmentation fault in JVM (easily reproducible)
+ * @run main/othervm -XX:-TieredCompilation -Xbatch TestLogSum
+ * @author jackkamm@gmail.com
+ */
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+public class TestLogSum {
+  public static void main(String[] args) {
+    double sum;
+
+    for (int i = 0; i < 6; i++) {
+        for (int n = 2; n < 30; n++) {
+           for (int j = 1; j <= n; j++) {
+              for (int k = 1; k <= j; k++) {
+                // System.out.println(computeSum(k, j));
+                sum = computeSum(k, j);
+              }
+           }
+        }
+      }
+   }
+
+   private static Map<List<Integer>, Double> cache = new HashMap<List<Integer>, Double>();
+   public static double computeSum(int x, int y) {
+      List<Integer> key = Arrays.asList(new Integer[] {x, y});
+
+      if (!cache.containsKey(key)) {
+
+        // explicitly creating/updating a double[] array, instead of using the LogSumArray wrapper object, will prevent the error
+        LogSumArray toReturn = new LogSumArray(x);
+
+        // changing loop indices will prevent the error
+        // in particular, for(z=0; z<x-1; z++), and then using z+1 in place of z, will not produce error
+        for (int z = 1; z < x+1; z++) {
+           double logSummand = Math.log(z + x + y);
+           toReturn.addLogSummand(logSummand);
+        }
+
+        // returning the value here without cacheing it will prevent the segfault
+        cache.put(key, toReturn.retrieveLogSum());
+      }
+      return cache.get(key);
+   }
+
+   /*
+    * Given a bunch of logarithms log(X),log(Y),log(Z),...
+    * This class is used to compute the log of the sum, log(X+Y+Z+...)
+    */
+   private static class LogSumArray {
+      private double[] logSummandArray;
+      private int currSize;
+
+      private double maxLogSummand;
+
+      public LogSumArray(int maxEntries) {
+        this.logSummandArray = new double[maxEntries];
+
+        this.currSize = 0;
+        this.maxLogSummand = Double.NEGATIVE_INFINITY;
+      }
+
+      public void addLogSummand(double logSummand) {
+        logSummandArray[currSize] = logSummand;
+        currSize++;
+        // removing this line will prevent the error
+        maxLogSummand = Math.max(maxLogSummand, logSummand);
+      }
+
+      public double retrieveLogSum() {
+        if (maxLogSummand == Double.NEGATIVE_INFINITY) return Double.NEGATIVE_INFINITY;
+
+        assert currSize <= logSummandArray.length;
+
+        double factorSum = 0;
+        for (int i = 0; i < currSize; i++) {
+           factorSum += Math.exp(logSummandArray[i] - maxLogSummand);
+        }
+
+        return Math.log(factorSum) + maxLogSummand;
+      }
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/Thread/TestThreadDumpMonitorContention.java	Thu Oct 16 19:18:18 2014 +0100
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug     8036823
+ * @bug     8046287
+ * @summary Creates two threads contending for the same lock and checks
+ *      whether jstack reports "locked" by more than one thread.
+ *
+ * @library /testlibrary
+ * @run main/othervm TestThreadDumpMonitorContention
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.*;
+
+public class TestThreadDumpMonitorContention {
+    // jstack tends to be closely bound to the VM that we are running
+    // so use getTestJDKTool() instead of getCompileJDKTool() or even
+    // getJDKTool() which can fall back to "compile.jdk".
+    final static String JSTACK = JDKToolFinder.getTestJDKTool("jstack");
+    final static String PID = getPid();
+
+    // looking for header lines with these patterns:
+    // "ContendingThread-1" #19 prio=5 os_prio=64 tid=0x000000000079c000 nid=0x23 runnable [0xffff80ffb8b87000]
+    // "ContendingThread-2" #21 prio=5 os_prio=64 tid=0x0000000000780000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
+    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
+    final static Pattern HEADER_PREFIX_PATTERN = Pattern.compile(
+        "^\"ContendingThread-.*");
+    final static Pattern HEADER_WAITING_PATTERN1 = Pattern.compile(
+        "^\"ContendingThread-.* waiting for monitor entry .*");
+    final static Pattern HEADER_WAITING_PATTERN2 = Pattern.compile(
+        "^\"ContendingThread-.* waiting on condition .*");
+    final static Pattern HEADER_RUNNABLE_PATTERN = Pattern.compile(
+        "^\"ContendingThread-.* runnable .*");
+
+    // looking for thread state lines with these patterns:
+    // java.lang.Thread.State: RUNNABLE
+    // java.lang.Thread.State: BLOCKED (on object monitor)
+    final static Pattern THREAD_STATE_PREFIX_PATTERN = Pattern.compile(
+        " *java\\.lang\\.Thread\\.State: .*");
+    final static Pattern THREAD_STATE_BLOCKED_PATTERN = Pattern.compile(
+        " *java\\.lang\\.Thread\\.State: BLOCKED \\(on object monitor\\)");
+    final static Pattern THREAD_STATE_RUNNABLE_PATTERN = Pattern.compile(
+        " *java\\.lang\\.Thread\\.State: RUNNABLE");
+
+    // looking for duplicates of this pattern:
+    // - locked <0x000000076ac59e20> (a TestThreadDumpMonitorContention$1)
+    final static Pattern LOCK_PATTERN = Pattern.compile(
+        ".* locked \\<.*\\(a TestThreadDumpMonitorContention.*");
+
+    // sanity checking header and thread state lines associated
+    // with this pattern:
+    // - waiting to lock <0x000000076ac59e20> (a TestThreadDumpMonitorContention$1)
+    final static Pattern WAITING_PATTERN = Pattern.compile(
+        ".* waiting to lock \\<.*\\(a TestThreadDumpMonitorContention.*");
+
+    final static Object barrier = new Object();
+    volatile static boolean done = false;
+
+    static int barrier_cnt = 0;
+    static int blank_line_match_cnt = 0;
+    static int error_cnt = 0;
+    static boolean have_header_line = false;
+    static boolean have_thread_state_line = false;
+    static String header_line = null;
+    static int header_prefix_match_cnt = 0;
+    static int locked_line_match_cnt = 0;
+    static String[] locked_match_list = new String[2];
+    static int n_samples = 15;
+    static int sum_both_running_cnt = 0;
+    static int sum_both_waiting_cnt = 0;
+    static int sum_contended_cnt = 0;
+    static int sum_locked_hdr_runnable_cnt = 0;
+    static int sum_locked_hdr_waiting1_cnt = 0;
+    static int sum_locked_hdr_waiting2_cnt = 0;
+    static int sum_locked_thr_state_blocked_cnt = 0;
+    static int sum_locked_thr_state_runnable_cnt = 0;
+    static int sum_one_waiting_cnt = 0;
+    static int sum_uncontended_cnt = 0;
+    static int sum_waiting_hdr_waiting1_cnt = 0;
+    static int sum_waiting_thr_state_blocked_cnt = 0;
+    static String thread_state_line = null;
+    static boolean verbose = false;
+    static int waiting_line_match_cnt = 0;
+
+    public static void main(String[] args) throws Exception {
+        if (args.length != 0) {
+            int arg_i = 0;
+            if (args[arg_i].equals("-v")) {
+                verbose = true;
+                arg_i++;
+            }
+
+            try {
+                n_samples = Integer.parseInt(args[arg_i]);
+            } catch (NumberFormatException nfe) {
+                System.err.println(nfe);
+                usage();
+            }
+        }
+
+        Runnable runnable = new Runnable() {
+            public void run() {
+                synchronized (barrier) {
+                    // let the main thread know we're running
+                    barrier_cnt++;
+                    barrier.notify();
+                }
+                while (!done) {
+                    synchronized (this) { }
+                }
+            }
+        };
+        Thread[] thread_list = new Thread[2];
+        thread_list[0] = new Thread(runnable, "ContendingThread-1");
+        thread_list[1] = new Thread(runnable, "ContendingThread-2");
+        synchronized (barrier) {
+            thread_list[0].start();
+            thread_list[1].start();
+
+            // Wait until the contending threads are running so that
+            // we don't sample any thread init states.
+            while (barrier_cnt < 2) {
+                barrier.wait();
+            }
+        }
+
+        doSamples();
+
+        done = true;
+
+        thread_list[0].join();
+        thread_list[1].join();
+
+        if (error_cnt == 0) {
+            System.out.println("Test PASSED.");
+        } else {
+            System.out.println("Test FAILED.");
+            throw new AssertionError("error_cnt=" + error_cnt);
+        }
+    }
+
+    // Reached a blank line which is the end of the
+    // stack trace without matching either LOCK_PATTERN
+    // or WAITING_PATTERN. Rare, but it's not an error.
+    //
+    // Example:
+    // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
+    //    java.lang.Thread.State: RUNNABLE
+    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
+    //         at java.lang.Thread.run(Thread.java:745)
+    //
+    static boolean checkBlankLine(String line) {
+        if (line.length() == 0) {
+            blank_line_match_cnt++;
+            have_header_line = false;
+            have_thread_state_line = false;
+            return true;
+        }
+
+        return false;
+    }
+
+    // Process the locked line here if we found one.
+    //
+    // Example 1:
+    // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f runnable [0xfffffd7fc1111000]
+    //    java.lang.Thread.State: RUNNABLE
+    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
+    //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
+    //         at java.lang.Thread.run(Thread.java:745)
+    //
+    // Example 2:
+    // "ContendingThread-1" #21 prio=5 os_prio=64 tid=0x00000000007b9000 nid=0x2f waiting for monitor entry [0xfffffd7fc1111000]
+    //    java.lang.Thread.State: BLOCKED (on object monitor)
+    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
+    //         - locked <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
+    //         at java.lang.Thread.run(Thread.java:745)
+    //
+    // Example 3:
+    // "ContendingThread-2" #24 prio=5 os_prio=64 tid=0x0000000000ec8800 nid=0x31 waiting on condition [0xfffffd7bbfffe000]
+    //    java.lang.Thread.State: RUNNABLE
+    //    JavaThread state: _thread_blocked
+    // Thread: 0x0000000000ec8800  [0x31] State: _at_safepoint _has_called_back 0 _at_poll_safepoint 0
+    //    JavaThread state: _thread_blocked
+    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
+    //         - locked <0xfffffd7e6d03eb28> (a TestThreadDumpMonitorContention$1)
+    //         at java.lang.Thread.run(Thread.java:745)
+    //
+    static boolean checkLockedLine(String line) {
+        Matcher matcher = LOCK_PATTERN.matcher(line);
+        if (matcher.matches()) {
+            if (verbose) {
+                System.out.println("locked_line='" + line + "'");
+            }
+            locked_match_list[locked_line_match_cnt] = new String(line);
+            locked_line_match_cnt++;
+
+            matcher = HEADER_RUNNABLE_PATTERN.matcher(header_line);
+            if (matcher.matches()) {
+                sum_locked_hdr_runnable_cnt++;
+            } else {
+                // It's strange, but a locked line can also
+                // match the HEADER_WAITING_PATTERN{1,2}.
+                matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
+                if (matcher.matches()) {
+                    sum_locked_hdr_waiting1_cnt++;
+                } else {
+                    matcher = HEADER_WAITING_PATTERN2.matcher(header_line);
+                    if (matcher.matches()) {
+                        sum_locked_hdr_waiting2_cnt++;
+                    } else {
+                        System.err.println();
+                        System.err.println("ERROR: header line does " +
+                            "not match runnable or waiting patterns.");
+                        System.err.println("ERROR: header_line='" +
+                            header_line + "'");
+                        System.err.println("ERROR: locked_line='" + line +
+                            "'");
+                        error_cnt++;
+                    }
+                }
+            }
+
+            matcher = THREAD_STATE_RUNNABLE_PATTERN.matcher(thread_state_line);
+            if (matcher.matches()) {
+                sum_locked_thr_state_runnable_cnt++;
+            } else {
+                // It's strange, but a locked line can also
+                // match the THREAD_STATE_BLOCKED_PATTERN.
+                matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(
+                              thread_state_line);
+                if (matcher.matches()) {
+                    sum_locked_thr_state_blocked_cnt++;
+                } else {
+                    System.err.println();
+                    System.err.println("ERROR: thread state line does not " +
+                        "match runnable or waiting patterns.");
+                    System.err.println("ERROR: " + "thread_state_line='" +
+                        thread_state_line + "'");
+                    System.err.println("ERROR: locked_line='" + line + "'");
+                    error_cnt++;
+                }
+            }
+
+            // Have everything we need from this thread stack
+            // that matches the LOCK_PATTERN.
+            have_header_line = false;
+            have_thread_state_line = false;
+            return true;
+        }
+
+        return false;
+    }
+
+    // Process the waiting line here if we found one.
+    //
+    // Example:
+    // "ContendingThread-2" #22 prio=5 os_prio=64 tid=0x00000000007b9800 nid=0x30 waiting for monitor entry [0xfffffd7fc1010000]
+    //    java.lang.Thread.State: BLOCKED (on object monitor)
+    //         at TestThreadDumpMonitorContention$1.run(TestThreadDumpMonitorContention.java:140)
+    //         - waiting to lock <0xfffffd7e6a2912f8> (a TestThreadDumpMonitorContention$1)
+    //         at java.lang.Thread.run(Thread.java:745)
+    //
+    static boolean checkWaitingLine(String line) {
+        Matcher matcher = WAITING_PATTERN.matcher(line);
+        if (matcher.matches()) {
+            waiting_line_match_cnt++;
+            if (verbose) {
+                System.out.println("waiting_line='" + line + "'");
+            }
+
+            matcher = HEADER_WAITING_PATTERN1.matcher(header_line);
+            if (matcher.matches()) {
+                sum_waiting_hdr_waiting1_cnt++;
+            } else {
+                System.err.println();
+                System.err.println("ERROR: header line does " +
+                    "not match a waiting pattern.");
+                System.err.println("ERROR: header_line='" + header_line + "'");
+                System.err.println("ERROR: waiting_line='" + line + "'");
+                error_cnt++;
+            }
+
+            matcher = THREAD_STATE_BLOCKED_PATTERN.matcher(thread_state_line);
+            if (matcher.matches()) {
+                sum_waiting_thr_state_blocked_cnt++;
+            } else {
+                System.err.println();
+                System.err.println("ERROR: thread state line " +
+                    "does not match a waiting pattern.");
+                System.err.println("ERROR: thread_state_line='" +
+                    thread_state_line + "'");
+                System.err.println("ERROR: waiting_line='" + line + "'");
+                error_cnt++;
+            }
+
+            // Have everything we need from this thread stack
+            // that matches the WAITING_PATTERN.
+            have_header_line = false;
+            have_thread_state_line = false;
+            return true;
+        }
+
+        return false;
+    }
+
+    static void doSamples() throws Exception {
+        for (int count = 0; count < n_samples; count++) {
+            blank_line_match_cnt = 0;
+            header_prefix_match_cnt = 0;
+            locked_line_match_cnt = 0;
+            waiting_line_match_cnt = 0;
+            // verbose mode or an error has a lot of output so add more space
+            if (verbose || error_cnt > 0) System.out.println();
+            System.out.println("Sample #" + count);
+
+            // We don't use the ProcessTools, OutputBuffer or
+            // OutputAnalyzer classes from the testlibrary because
+            // we have a complicated multi-line parse to perform
+            // on a narrow subset of the JSTACK output.
+            //
+            // - we only care about stack traces that match
+            //   HEADER_PREFIX_PATTERN; only two should match
+            // - we care about at most three lines from each stack trace
+            // - if both stack traces match LOCKED_PATTERN, then that's
+            //   a failure and we report it
+            // - for a stack trace that matches LOCKED_PATTERN, we verify:
+            //   - the header line matches HEADER_RUNNABLE_PATTERN
+            //     or HEADER_WAITING_PATTERN{1,2}
+            //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
+            //     or THREAD_STATE_RUNNABLE_PATTERN
+            //   - we report any mismatches as failures
+            // - for a stack trace that matches WAITING_PATTERN, we verify:
+            //   - the header line matches HEADER_WAITING_PATTERN1
+            //   - the thread state line matches THREAD_STATE_BLOCKED_PATTERN
+            //   - we report any mismatches as failures
+            // - the stack traces that match HEADER_PREFIX_PATTERN may
+            //   not match either LOCKED_PATTERN or WAITING_PATTERN
+            //   because we might observe the thread outside of
+            //   monitor operations; this is not considered a failure
+            //
+            // When we do observe LOCKED_PATTERN or WAITING_PATTERN,
+            // then we are checking the header and thread state patterns
+            // that occurred earlier in the current stack trace that
+            // matched HEADER_PREFIX_PATTERN. We don't use data from
+            // stack traces that don't match HEADER_PREFIX_PATTERN and
+            // we don't mix data between the two stack traces that do
+            // match HEADER_PREFIX_PATTERN.
+            //
+            Process process = new ProcessBuilder(JSTACK, PID)
+                .redirectErrorStream(true).start();
+
+            BufferedReader reader = new BufferedReader(new InputStreamReader(
+                                        process.getInputStream()));
+            String line;
+            while ((line = reader.readLine()) != null) {
+                Matcher matcher = null;
+
+                // process the header line here
+                if (!have_header_line) {
+                    matcher = HEADER_PREFIX_PATTERN.matcher(line);
+                    if (matcher.matches()) {
+                        header_prefix_match_cnt++;
+                        if (verbose) {
+                            System.out.println();
+                            System.out.println("header='" + line + "'");
+                        }
+                        header_line = new String(line);
+                        have_header_line = true;
+                        continue;
+                    }
+                    continue;  // skip until have a header line
+                }
+
+                // process the thread state line here
+                if (!have_thread_state_line) {
+                    matcher = THREAD_STATE_PREFIX_PATTERN.matcher(line);
+                    if (matcher.matches()) {
+                        if (verbose) {
+                            System.out.println("thread_state='" + line + "'");
+                        }
+                        thread_state_line = new String(line);
+                        have_thread_state_line = true;
+                        continue;
+                    }
+                    continue;  // skip until we have a thread state line
+                }
+
+                // process the locked line here if we find one
+                if (checkLockedLine(line)) {
+                    continue;
+                }
+
+                // process the waiting line here if we find one
+                if (checkWaitingLine(line)) {
+                    continue;
+                }
+
+                // process the blank line here if we find one
+                if (checkBlankLine(line)) {
+                    continue;
+                }
+            }
+            process.waitFor();
+
+            if (header_prefix_match_cnt != 2) {
+                System.err.println();
+                System.err.println("ERROR: should match exactly two headers.");
+                System.err.println("ERROR: header_prefix_match_cnt=" +
+                    header_prefix_match_cnt);
+                error_cnt++;
+            }
+
+            if (locked_line_match_cnt == 2) {
+                if (locked_match_list[0].equals(locked_match_list[1])) {
+                    System.err.println();
+                    System.err.println("ERROR: matching lock lines:");
+                    System.err.println("ERROR: line[0]'" +
+                        locked_match_list[0] + "'");
+                    System.err.println("ERROR: line[1]'" +
+                        locked_match_list[1] + "'");
+                    error_cnt++;
+                }
+            }
+
+            if (locked_line_match_cnt == 1) {
+                // one thread has the lock
+                if (waiting_line_match_cnt == 1) {
+                    // and the other contended for it
+                    sum_contended_cnt++;
+                } else {
+                    // and the other is just running
+                    sum_uncontended_cnt++;
+                }
+            } else if (waiting_line_match_cnt == 1) {
+                // one thread is waiting
+                sum_one_waiting_cnt++;
+            } else if (waiting_line_match_cnt == 2) {
+                // both threads are waiting
+                sum_both_waiting_cnt++;
+            } else {
+                // both threads are running
+                sum_both_running_cnt++;
+            }
+
+            // slight delay between jstack launches
+            Thread.sleep(500);
+        }
+
+        if (error_cnt != 0) {
+            // skip summary info since there were errors
+            return;
+        }
+
+        System.out.println("INFO: Summary for all samples:");
+        System.out.println("INFO: both_running_cnt=" + sum_both_running_cnt);
+        System.out.println("INFO: both_waiting_cnt=" + sum_both_waiting_cnt);
+        System.out.println("INFO: contended_cnt=" + sum_contended_cnt);
+        System.out.println("INFO: one_waiting_cnt=" + sum_one_waiting_cnt);
+        System.out.println("INFO: uncontended_cnt=" + sum_uncontended_cnt);
+        System.out.println("INFO: locked_hdr_runnable_cnt=" +
+            sum_locked_hdr_runnable_cnt);
+        System.out.println("INFO: locked_hdr_waiting1_cnt=" +
+            sum_locked_hdr_waiting1_cnt);
+        System.out.println("INFO: locked_hdr_waiting2_cnt=" +
+            sum_locked_hdr_waiting2_cnt);
+        System.out.println("INFO: locked_thr_state_blocked_cnt=" +
+            sum_locked_thr_state_blocked_cnt);
+        System.out.println("INFO: locked_thr_state_runnable_cnt=" +
+            sum_locked_thr_state_runnable_cnt);
+        System.out.println("INFO: waiting_hdr_waiting1_cnt=" +
+            sum_waiting_hdr_waiting1_cnt);
+        System.out.println("INFO: waiting_thr_state_blocked_cnt=" +
+            sum_waiting_thr_state_blocked_cnt);
+
+        if (sum_contended_cnt == 0) {
+            System.err.println("WARNING: the primary scenario for 8036823" +
+                " has not been exercised by this test run.");
+        }
+    }
+
+    // This helper relies on RuntimeMXBean.getName() returning a string
+    // that looks like this: 5436@mt-haku
+    //
+    // The testlibrary has tryFindJvmPid(), but that uses a separate
+    // process which is much more expensive for finding out your own PID.
+    //
+    static String getPid() {
+        RuntimeMXBean runtimebean = ManagementFactory.getRuntimeMXBean();
+        String vmname = runtimebean.getName();
+        int i = vmname.indexOf('@');
+        if (i != -1) {
+            vmname = vmname.substring(0, i);
+        }
+        return vmname;
+    }
+
+    static void usage() {
+        System.err.println("Usage: " +
+            "java TestThreadDumpMonitorContention [-v] [n_samples]");
+        System.exit(1);
+    }
+}