changeset 8978:69dc2b5ad5d3

Merge
author asaha
date Thu, 29 Oct 2015 17:03:53 -0700
parents b15be4d7f813 (current diff) bb30bb619d3d (diff)
children 8a6c4458bcfd
files .hgtags
diffstat 28 files changed, 223 insertions(+), 99 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Oct 16 12:15:09 2015 -0700
+++ b/.hgtags	Thu Oct 29 17:03:53 2015 -0700
@@ -766,4 +766,14 @@
 67df26e363fb7e722032fd286673642fc999957c jdk8u71-b01
 1a799d49de23d84f658ade1d3805a1924e7e1e84 jdk8u71-b02
 e06f49d82ef8128b3637937d383b6f0862650deb jdk8u71-b03
+7466029bf3cd3d5eea3055c4f790728263be4a2e jdk8u71-b04
+8a402d51763c083151d0cb434647bd6e1ba4353f jdk8u71-b05
+7dd34cca3538c9bef74a8a1976e14ca51e9857f9 jdk8u71-b06
+b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00
+c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01
+15ef554f2f2e0a8d7c330191432fcd2126d19dab jdk8u72-b02
+bb98a4ba1556d0505461de98aa3dddf75278c09b jdk8u72-b03
+6c8ceb05ccf78f2f8f72f0870e3f6f3bd4895bb1 jdk8u72-b04
+a2969911663ab29c71a61aa3403e53243ad89923 jdk8u72-b05
+acf0d80cb84f14d787c34360abf2bc38b186999a jdk8u72-b06
 c0242ea4bde19d72be5149feda112a39e8c89b0a jdk8u75-b00
--- a/src/os/linux/vm/os_linux.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -5922,9 +5922,11 @@
         status = pthread_mutex_unlock(_mutex);
         assert (status == 0, "invariant");
       } else {
+        // must capture correct index before unlocking
+        int index = _cur_index;
         status = pthread_mutex_unlock(_mutex);
         assert (status == 0, "invariant");
-        status = pthread_cond_signal (&_cond[_cur_index]);
+        status = pthread_cond_signal (&_cond[index]);
         assert (status == 0, "invariant");
       }
     } else {
--- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,10 @@
   return cpuinfo_field_contains("cpu", "Niagara");
 }
 
+static bool detect_M_family() {
+  return cpuinfo_field_contains("cpu", "SPARC-M");
+}
+
 static bool detect_blkinit() {
   return cpuinfo_field_contains("cpucaps", "blkinit");
 }
@@ -66,6 +70,11 @@
     features = niagara1_m | T_family_m;
   }
 
+  if (detect_M_family()) {
+    NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");)
+    features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
+  }
+
   if (detect_blkinit()) {
     features |= blk_init_instructions_m;
   }
--- a/src/share/vm/classfile/stackMapTable.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/classfile/stackMapTable.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -186,7 +186,6 @@
     u2 offset = _stream->get_u2(THREAD);
     if (offset >= _code_length ||
         _code_data[offset] != ClassVerifier::NEW_OFFSET) {
-      ResourceMark rm(THREAD);
       _verifier->class_format_error(
         "StackMapTable format error: bad offset for Uninitialized");
       return VerificationType::bogus_type();
--- a/src/share/vm/code/codeCache.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/codeCache.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -521,15 +521,17 @@
 
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
-  FOR_ALL_ALIVE_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      nmethod *nm = (nmethod*)cb;
-      assert(!nm->is_unloaded(), "Tautology");
-      if (needs_cache_clean()) {
-        nm->cleanup_inline_caches();
+  NOT_DEBUG(if (needs_cache_clean())) {
+    FOR_ALL_ALIVE_BLOBS(cb) {
+      if (cb->is_nmethod()) {
+        nmethod *nm = (nmethod*)cb;
+        assert(!nm->is_unloaded(), "Tautology");
+        DEBUG_ONLY(if (needs_cache_clean())) {
+          nm->cleanup_inline_caches();
+        }
+        DEBUG_ONLY(nm->verify());
+        DEBUG_ONLY(nm->verify_oop_relocations());
       }
-      DEBUG_ONLY(nm->verify());
-      DEBUG_ONLY(nm->verify_oop_relocations());
     }
   }
   set_needs_cache_clean(false);
@@ -734,27 +736,6 @@
   return number_of_marked_CodeBlobs;
 }
 
-void CodeCache::make_marked_nmethods_zombies() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  FOR_ALL_ALIVE_NMETHODS(nm) {
-    if (nm->is_marked_for_deoptimization()) {
-
-      // If the nmethod has already been made non-entrant and it can be converted
-      // then zombie it now. Otherwise make it non-entrant and it will eventually
-      // be zombied when it is no longer seen on the stack. Note that the nmethod
-      // might be "entrant" and not on the stack and so could be zombied immediately
-      // but we can't tell because we don't track it on stack until it becomes
-      // non-entrant.
-
-      if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
-        nm->make_zombie();
-      } else {
-        nm->make_not_entrant();
-      }
-    }
-  }
-}
-
 void CodeCache::make_marked_nmethods_not_entrant() {
   assert_locked_or_safepoint(CodeCache_lock);
   FOR_ALL_ALIVE_NMETHODS(nm) {
--- a/src/share/vm/code/codeCache.hpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/codeCache.hpp	Thu Oct 29 17:03:53 2015 -0700
@@ -179,7 +179,6 @@
 
   static void mark_all_nmethods_for_deoptimization();
   static int  mark_for_deoptimization(Method* dependee);
-  static void make_marked_nmethods_zombies();
   static void make_marked_nmethods_not_entrant();
 
     // tells how many nmethods have dependencies
--- a/src/share/vm/code/compiledIC.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/compiledIC.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -155,6 +155,14 @@
   return _ic_call->destination();
 }
 
+// Clears the IC stub if the compiled IC is in transition state
+void CompiledIC::clear_ic_stub() {
+  if (is_in_transition_state()) {
+    ICStub* stub = ICStub_from_destination_address(stub_address());
+    stub->clear();
+  }
+}
+
 
 //-----------------------------------------------------------------------------
 // High-level access to an inline cache. Guaranteed to be MT-safe.
@@ -279,6 +287,7 @@
   assert( is_c1_method ||
          !is_monomorphic ||
          is_optimized() ||
+         !caller->is_alive() ||
          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 #endif // ASSERT
   return is_monomorphic;
@@ -313,7 +322,7 @@
 }
 
 
-void CompiledIC::set_to_clean() {
+void CompiledIC::set_to_clean(bool in_use) {
   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
   if (TraceInlineCacheClearing || TraceICs) {
     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
@@ -329,17 +338,14 @@
 
   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
   // we only need to patch the destination
-  bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
+  bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
 
   if (safe_transition) {
     // Kill any leftover stub we might have too
-    if (is_in_transition_state()) {
-      ICStub* old_stub = ICStub_from_destination_address(stub_address());
-      old_stub->clear();
-    }
+    clear_ic_stub();
     if (is_optimized()) {
-    set_ic_destination(entry);
-  } else {
+      set_ic_destination(entry);
+    } else {
       set_ic_destination_and_value(entry, (void*)NULL);
     }
   } else {
--- a/src/share/vm/code/compiledIC.hpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/compiledIC.hpp	Thu Oct 29 17:03:53 2015 -0700
@@ -228,8 +228,9 @@
   //
   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
   //
-  void set_to_clean();  // Can only be called during a safepoint operation
+  void set_to_clean(bool in_use = true);
   void set_to_monomorphic(CompiledICInfo& info);
+  void clear_ic_stub();
 
   // Returns true if successful and false otherwise. The call can fail if memory
   // allocation in the code cache fails.
--- a/src/share/vm/code/nmethod.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/nmethod.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -1148,9 +1148,20 @@
   }
 }
 
+// Clear ICStubs of all compiled ICs
+void nmethod::clear_ic_stubs() {
+  assert_locked_or_safepoint(CompiledIC_lock);
+  RelocIterator iter(this);
+  while(iter.next()) {
+    if (iter.type() == relocInfo::virtual_call_type) {
+      CompiledIC* ic = CompiledIC_at(&iter);
+      ic->clear_ic_stub();
+    }
+  }
+}
+
 
 void nmethod::cleanup_inline_caches() {
-
   assert_locked_or_safepoint(CompiledIC_lock);
 
   // If the method is not entrant or zombie then a JMP is plastered over the
@@ -1166,7 +1177,8 @@
     // In fact, why are we bothering to look at oops in a non-entrant method??
   }
 
-  // Find all calls in an nmethod, and clear the ones that points to zombie methods
+  // Find all calls in an nmethod and clear the ones that point to non-entrant,
+  // zombie and unloaded nmethods.
   ResourceMark rm;
   RelocIterator iter(this, low_boundary);
   while(iter.next()) {
@@ -1178,8 +1190,8 @@
         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
+          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
         }
         break;
       }
@@ -1188,7 +1200,7 @@
         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
-          // Clean inline caches pointing to both zombie and not_entrant methods
+          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
           if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
         }
         break;
@@ -1279,7 +1291,7 @@
 // Tell if a non-entrant method can be converted to a zombie (i.e.,
 // there are no activations on the stack, not in use by the VM,
 // and not in use by the ServiceThread)
-bool nmethod::can_not_entrant_be_converted() {
+bool nmethod::can_convert_to_zombie() {
   assert(is_not_entrant(), "must be a non-entrant method");
 
   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
@@ -2695,7 +2707,7 @@
   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
   // seems odd.
 
-  if( is_zombie() || is_not_entrant() )
+  if (is_zombie() || is_not_entrant() || is_unloaded())
     return;
 
   // Make sure all the entry points are correctly aligned for patching.
--- a/src/share/vm/code/nmethod.hpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/code/nmethod.hpp	Thu Oct 29 17:03:53 2015 -0700
@@ -577,6 +577,7 @@
 
   // Inline cache support
   void clear_inline_caches();
+  void clear_ic_stubs();
   void cleanup_inline_caches();
   bool inlinecache_check_contains(address addr) const {
     return (addr >= code_begin() && addr < verified_entry_point());
@@ -604,7 +605,7 @@
 
   // See comment at definition of _last_seen_on_stack
   void mark_as_seen_on_stack();
-  bool can_not_entrant_be_converted();
+  bool can_convert_to_zombie();
 
   // Evolution support. We make old (discarded) compiled methods point to new Method*s.
   void set_method(Method* method) { _method = method; }
--- a/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -117,7 +117,7 @@
 
 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
     _g1h(g1h),
-    _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
+    _process_strong_tasks(G1RP_PS_NumElements),
     _srs(g1h),
     _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false),
     _n_workers_discovered_strong_classes(0) {}
@@ -160,7 +160,7 @@
   {
     // Now the CM ref_processor roots.
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
       // We need to treat the discovered reference lists of the
       // concurrent mark ref processor as roots and keep entries
       // (which are added by the marking threads) on them live
@@ -203,12 +203,12 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) {
       JavaThread::satb_mark_queue_set().filter_thread_buffers();
     }
   }
 
-  _process_strong_tasks->all_tasks_completed();
+  _process_strong_tasks.all_tasks_completed();
 }
 
 void G1RootProcessor::process_strong_roots(OopClosure* oops,
@@ -218,7 +218,7 @@
   process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
   process_vm_roots(oops, NULL, NULL, 0);
 
-  _process_strong_tasks->all_tasks_completed();
+  _process_strong_tasks.all_tasks_completed();
 }
 
 void G1RootProcessor::process_all_roots(OopClosure* oops,
@@ -228,11 +228,11 @@
   process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0);
   process_vm_roots(oops, oops, NULL, 0);
 
-  if (!_process_strong_tasks->is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
+  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(blobs);
   }
 
-  _process_strong_tasks->all_tasks_completed();
+  _process_strong_tasks.all_tasks_completed();
 }
 
 void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
@@ -248,7 +248,7 @@
   // let the thread process the weak CLDs and nmethods.
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
       ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
     }
   }
@@ -265,49 +265,49 @@
                                        uint worker_i) {
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Universe_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
       Universe::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
       JNIHandles::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
-    if (!_process_strong_tasks-> is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
       ObjectSynchronizer::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) {
       FlatProfiler::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Management_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
       Management::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_jvmti_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
       JvmtiExport::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
-    if (!_process_strong_tasks->is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
+    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
       SystemDictionary::roots_oops_do(strong_roots, weak_roots);
     }
   }
@@ -335,5 +335,5 @@
 }
 
 void G1RootProcessor::set_num_workers(int active_workers) {
-  _process_strong_tasks->set_n_threads(active_workers);
+  _process_strong_tasks.set_n_threads(active_workers);
 }
--- a/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp	Thu Oct 29 17:03:53 2015 -0700
@@ -45,7 +45,7 @@
 // worker thread call the process_roots methods.
 class G1RootProcessor : public StackObj {
   G1CollectedHeap* _g1h;
-  SubTasksDone* _process_strong_tasks;
+  SubTasksDone _process_strong_tasks;
   SharedHeap::StrongRootsScope _srs;
 
   // Used to implement the Thread work barrier.
--- a/src/share/vm/opto/block.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/block.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -393,7 +393,7 @@
   VectorSet visited(a);
 
   // Allocate stack with enough space to avoid frequent realloc
-  Node_Stack nstack(a, C->unique() >> 1);
+  Node_Stack nstack(a, C->live_nodes() >> 1);
   nstack.push(_root, 0);
   uint sum = 0;                 // Counter for blocks
 
--- a/src/share/vm/opto/cfgnode.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -791,7 +791,7 @@
   Compile *C = igvn->C;
   Arena *a = Thread::current()->resource_area();
   Node_Array node_map = new Node_Array(a);
-  Node_Stack stack(a, C->unique() >> 4);
+  Node_Stack stack(a, C->live_nodes() >> 4);
   PhiNode *nphi = slice_memory(at);
   igvn->register_new_node_with_optimizer( nphi );
   node_map.map(_idx, nphi);
--- a/src/share/vm/opto/compile.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/compile.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -327,7 +327,7 @@
 // Use breadth-first pass that records state in a Unique_Node_List,
 // recursive traversal is slower.
 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
-  int estimated_worklist_size = unique();
+  int estimated_worklist_size = live_nodes();
   useful.map( estimated_worklist_size, NULL );  // preallocate space
 
   // Initialize worklist
@@ -3212,8 +3212,8 @@
   Final_Reshape_Counts frc;
 
   // Visit everybody reachable!
-  // Allocate stack of size C->unique()/2 to avoid frequent realloc
-  Node_Stack nstack(unique() >> 1);
+  // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
+  Node_Stack nstack(live_nodes() >> 1);
   final_graph_reshaping_walk(nstack, root(), frc);
 
   // Check for unreachable (from below) code (i.e., infinite loops).
--- a/src/share/vm/opto/domgraph.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/domgraph.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -505,8 +505,8 @@
 // Perform DFS search.  Setup 'vertex' as DFS to vertex mapping.  Setup
 // 'semi' as vertex to DFS mapping.  Set 'parent' to DFS parent.
 int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) {
-  // Allocate stack of size C->unique()/8 to avoid frequent realloc
-  GrowableArray <Node *> dfstack(pil->C->unique() >> 3);
+  // Allocate stack of size C->live_nodes()/8 to avoid frequent realloc
+  GrowableArray <Node *> dfstack(pil->C->live_nodes() >> 3);
   Node *b = pil->C->root();
   int dfsnum = 1;
   dfsorder[b->_idx] = dfsnum; // Cache parent's dfsnum for a later use
--- a/src/share/vm/opto/escape.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/escape.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -3183,7 +3183,7 @@
     // Note 2: MergeMem may already contains instance memory slices added
     // during find_inst_mem() call when memory nodes were processed above.
     igvn->hash_delete(nmm);
-    uint nslices = nmm->req();
+    uint nslices = MIN2(nmm->req(), new_index_start);
     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
       Node* mem = nmm->in(i);
       Node* cur = NULL;
--- a/src/share/vm/opto/gcm.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/gcm.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -118,8 +118,8 @@
 //------------------------------schedule_pinned_nodes--------------------------
 // Set the basic block for Nodes pinned into blocks
 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
-  // Allocate node stack of size C->unique()+8 to avoid frequent realloc
-  GrowableArray <Node *> spstack(C->unique() + 8);
+  // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
+  GrowableArray <Node *> spstack(C->live_nodes() + 8);
   spstack.push(_root);
   while (spstack.is_nonempty()) {
     Node* node = spstack.pop();
@@ -1285,7 +1285,7 @@
   visited.Clear();
   Node_List stack(arena);
   // Pre-grow the list
-  stack.map((C->unique() >> 1) + 16, NULL);
+  stack.map((C->live_nodes() >> 1) + 16, NULL);
   if (!schedule_early(visited, stack)) {
     // Bailout without retry
     C->record_method_not_compilable("early schedule failed");
--- a/src/share/vm/opto/loopnode.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/loopnode.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -2230,7 +2230,7 @@
   // _nodes array holds the earliest legal controlling CFG node.
 
   // Allocate stack with enough space to avoid frequent realloc
-  int stack_size = (C->unique() >> 1) + 16; // (unique>>1)+16 from Java2D stats
+  int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats
   Node_Stack nstack( a, stack_size );
 
   visited.Clear();
@@ -2686,7 +2686,7 @@
     }
   }
   if (_dom_stk == NULL) {
-    uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size.
+    uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size.
     if (init_size < 10) init_size = 10;
     _dom_stk = new GrowableArray<uint>(init_size);
   }
@@ -2776,8 +2776,8 @@
 // The sort is of size number-of-control-children, which generally limits
 // it to size 2 (i.e., I just choose between my 2 target loops).
 void PhaseIdealLoop::build_loop_tree() {
-  // Allocate stack of size C->unique()/2 to avoid frequent realloc
-  GrowableArray <Node *> bltstack(C->unique() >> 1);
+  // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc
+  GrowableArray <Node *> bltstack(C->live_nodes() >> 1);
   Node *n = C->root();
   bltstack.push(n);
   int pre_order = 1;
@@ -3666,7 +3666,7 @@
 void PhaseIdealLoop::dump( ) const {
   ResourceMark rm;
   Arena* arena = Thread::current()->resource_area();
-  Node_Stack stack(arena, C->unique() >> 2);
+  Node_Stack stack(arena, C->live_nodes() >> 2);
   Node_List rpo_list;
   VectorSet visited(arena);
   visited.set(C->top()->_idx);
--- a/src/share/vm/opto/matcher.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/matcher.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -335,14 +335,14 @@
   grow_new_node_array(C->unique());
 
   // Reset node counter so MachNodes start with _idx at 0
-  int nodes = C->unique(); // save value
+  int live_nodes = C->live_nodes();
   C->set_unique(0);
   C->reset_dead_node_list();
 
   // Recursively match trees from old space into new space.
   // Correct leaves of new-space Nodes; they point to old-space.
   _visited.Clear();             // Clear visit bits for xform call
-  C->set_cached_top_node(xform( C->top(), nodes ));
+  C->set_cached_top_node(xform( C->top(), live_nodes));
   if (!C->failing()) {
     Node* xroot =        xform( C->root(), 1 );
     if (xroot == NULL) {
@@ -995,7 +995,7 @@
 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
 Node *Matcher::xform( Node *n, int max_stack ) {
   // Use one stack to keep both: child's node/state and parent's node/index
-  MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
+  MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
   mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
 
   while (mstack.is_nonempty()) {
@@ -2021,8 +2021,8 @@
 //------------------------------find_shared------------------------------------
 // Set bits if Node is shared or otherwise a root
 void Matcher::find_shared( Node *n ) {
-  // Allocate stack of size C->unique() * 2 to avoid frequent realloc
-  MStack mstack(C->unique() * 2);
+  // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
+  MStack mstack(C->live_nodes() * 2);
   // Mark nodes as address_visited if they are inputs to an address expression
   VectorSet address_visited(Thread::current()->resource_area());
   mstack.push(n, Visit);     // Don't need to pre-visit root node
--- a/src/share/vm/opto/node.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/node.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -1749,7 +1749,7 @@
   uint depth = (uint)ABS(d);
   int direction = d;
   Compile* C = Compile::current();
-  GrowableArray <Node *> nstack(C->unique());
+  GrowableArray <Node *> nstack(C->live_nodes());
 
   nstack.append(s);
   int begin = 0;
--- a/src/share/vm/opto/phaseX.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/opto/phaseX.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -783,7 +783,7 @@
 //------------------------------PhaseIterGVN-----------------------------------
 // Initialize hash table to fresh and clean for +VerifyOpto
 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
-                                                                      _stack(C->unique() >> 1),
+                                                                      _stack(C->live_nodes() >> 1),
                                                                       _delay_transform(false) {
 }
 
@@ -800,7 +800,11 @@
 // Initialize with previous PhaseGVN info from Parser
 PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
                                               _worklist(*C->for_igvn()),
-                                              _stack(C->unique() >> 1),
+// TODO: Before incremental inlining it was allocated only once and it was fine. Now that
+//       the constructor is used in incremental inlining, this consumes too much memory:
+//                                            _stack(C->live_nodes() >> 1),
+//       So, as a band-aid, we replace this by:
+                                              _stack(C->comp_arena(), 32),
                                               _delay_transform(false)
 {
   uint max;
@@ -1586,7 +1590,7 @@
   _nodes.map( n->_idx, new_node );  // Flag as having been cloned
 
   // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
-  GrowableArray <Node *> trstack(C->unique() >> 1);
+  GrowableArray <Node *> trstack(C->live_nodes() >> 1);
 
   trstack.push(new_node);           // Process children of cloned node
   while ( trstack.is_nonempty() ) {
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -3751,7 +3751,7 @@
     // Deoptimize all activations depending on marked nmethods
     Deoptimization::deoptimize_dependents();
 
-    // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
+    // Make the dependent methods not entrant
     CodeCache::make_marked_nmethods_not_entrant();
 
     // From now on we know that the dependency information is complete
--- a/src/share/vm/runtime/objectMonitor.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/runtime/objectMonitor.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -226,7 +226,8 @@
 //
 // * The monitor entry list operations avoid locks, but strictly speaking
 //   they're not lock-free.  Enter is lock-free, exit is not.
-//   See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
+//   For a description of 'Methods and apparatus providing non-blocking access
+//   to a resource,' see U.S. Pat. No. 7844973.
 //
 // * The cxq can have multiple concurrent "pushers" but only one concurrent
 //   detaching thread.  This mechanism is immune from the ABA corruption.
@@ -1955,7 +1956,8 @@
 // (duration) or we can fix the count at approximately the duration of
 // a context switch and vary the frequency.   Of course we could also
 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
-// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
+// For a description of 'Adaptive spin-then-block mutual exclusion in
+// multi-threaded processing,' see U.S. Pat. No. 8046758.
 //
 // This implementation varies the duration "D", where D varies with
 // the success rate of recent spin attempts. (D is capped at approximately
--- a/src/share/vm/runtime/sweeper.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -538,10 +538,14 @@
   } else if (nm->is_not_entrant()) {
     // If there are no current activations of this method on the
     // stack we can safely convert it to a zombie method
-    if (nm->can_not_entrant_be_converted()) {
+    if (nm->can_convert_to_zombie()) {
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
       }
+      // Clear ICStubs to prevent back patching stubs of zombie or unloaded
+      // nmethods during the next safepoint (see ICStub::finalize).
+      MutexLocker cl(CompiledIC_lock);
+      nm->clear_ic_stubs();
       // Code cache state change is tracked in make_zombie()
       nm->make_zombie();
       _zombified_count++;
@@ -567,6 +571,12 @@
       release_nmethod(nm);
       _flushed_count++;
     } else {
+      {
+        // Clean ICs of unloaded nmethods as well because they may reference other
+        // unloaded nmethods that may be flushed earlier in the sweeper cycle.
+        MutexLocker cl(CompiledIC_lock);
+        nm->cleanup_inline_caches();
+      }
       // Code cache state change is tracked in make_zombie()
       nm->make_zombie();
       _zombified_count++;
--- a/src/share/vm/runtime/vm_operations.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/runtime/vm_operations.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -106,8 +106,8 @@
   // Deoptimize all activations depending on marked nmethods
   Deoptimization::deoptimize_dependents();
 
-  // Make the dependent methods zombies
-  CodeCache::make_marked_nmethods_zombies();
+  // Make the dependent methods not entrant
+  CodeCache::make_marked_nmethods_not_entrant();
 }
 
 
--- a/src/share/vm/services/management.cpp	Fri Oct 16 12:15:09 2015 -0700
+++ b/src/share/vm/services/management.cpp	Thu Oct 29 17:03:53 2015 -0700
@@ -1107,6 +1107,8 @@
                            bool with_locked_monitors,
                            bool with_locked_synchronizers,
                            TRAPS) {
+  // no need to actually perform thread dump if no TIDs are specified
+  if (num_threads == 0) return;
 
   // First get an array of threadObj handles.
   // A JavaThread may terminate before we get the stack trace.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/escapeAnalysis/TestEABadMergeMem.java	Thu Oct 29 17:03:53 2015 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8134031
+ * @summary Bad rewiring of memory edges when we split unique types during EA
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestEABadMergeMem::m_notinlined TestEABadMergeMem
+ *
+ */
+
+public class TestEABadMergeMem {
+
+    static class Box {
+        int i;
+    }
+
+    static void m_notinlined() {
+    }
+
+    static float dummy1;
+    static float dummy2;
+
+    static int test(Box a, Box c, int i, int j, int k, boolean flag1, boolean flag2) {
+        Box b = new Box(); // non escaping
+        a.i = i;
+        b.i = j;
+        c.i = k;
+
+        m_notinlined();
+
+        boolean flag3 = false;
+        if (flag1) {
+            for (int ii = 0; ii < 100; ii++) {
+                if (flag2) {
+                    dummy1 = (float)ii;
+                } else {
+                    dummy2 = (float)ii;
+                }
+            }
+            flag3 = true;
+        }
+        // Memory Phi here with projection of not inlined call as one edge, MergeMem as other
+
+        if (flag3) { // will split through Phi during loopopts
+            int res = c.i + b.i;
+            m_notinlined(); // prevents split through phi during igvn
+            return res;
+        } else {
+            return 44 + 43;
+        }
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            // m(2);
+            Box a = new Box();
+            Box c = new Box();
+            int res = test(a, c, 42, 43, 44, (i%2) == 0, (i%3) == 0);
+            if (res != 44 + 43) {
+                throw new RuntimeException("Bad result " + res);
+            }
+        }
+    }
+
+}