changeset 4479:795212ad5b1b hs24-b38

Merge
author amurillo
date Thu, 28 Mar 2013 10:46:36 -0700
parents 52e13c42fab4 (current diff) 539375f92462 (diff)
children 5e622bdc713e
files src/share/vm/gc_implementation/shared/promotionFailedInfo.hpp
diffstat 91 files changed, 1969 insertions(+), 668 deletions(-) [+]
line wrap: on
line diff
--- a/make/hotspot_version	Wed Mar 27 16:18:19 2013 -0700
+++ b/make/hotspot_version	Thu Mar 28 10:46:36 2013 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=37
+HS_BUILD_NUMBER=38
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1761,13 +1761,13 @@
   }
 #endif
 
-  int len = strlen(file) + strlen(msg) + 1 + 4;
-  sprintf(buffer, "%d", line);
-  len += strlen(buffer);
-  sprintf(buffer, " at offset %d ", offset());
-  len += strlen(buffer);
-  char * real_msg = new char[len];
-  sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
+  const char* real_msg = NULL;
+  {
+    ResourceMark rm;
+    stringStream ss;
+    ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line);
+    real_msg = code_string(ss.as_string());
+  }
 
   // Call indirectly to solve generation ordering problem
   AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
@@ -1799,13 +1799,13 @@
   // plausibility check for oops
   if (!VerifyOops) return;
 
-  char buffer[64];
-  sprintf(buffer, "%d", line);
-  int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
-  sprintf(buffer, " at SP+%d ", addr.disp());
-  len += strlen(buffer);
-  char * real_msg = new char[len];
-  sprintf(real_msg, "%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
+  const char* real_msg = NULL;
+  {
+    ResourceMark rm;
+    stringStream ss;
+    ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
+    real_msg = code_string(ss.as_string());
+  }
 
   // Call indirectly to solve generation ordering problem
   AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
@@ -2006,9 +2006,13 @@
   // in order to run automated test scripts on the VM
   // Use the flag ShowMessageBoxOnError
 
-  char* b = new char[1024];
-  sprintf(b, "untested: %s", what);
-
+  const char* b = NULL;
+  {
+    ResourceMark rm;
+    stringStream ss;
+    ss.print("untested: %s", what);
+    b = code_string(ss.as_string());
+  }
   if (ShowMessageBoxOnError) { STOP(b); }
   else                       { warn(b); }
 }
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -2184,7 +2184,8 @@
                                            int callee_locals_size,
                                            frame* caller,
                                            frame* interpreter_frame,
-                                           bool is_top_frame) {
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
 
   assert(popframe_extra_args == 0, "NEED TO FIX");
   // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -252,6 +252,11 @@
       return false;
     }
 
+    // Could be a zombie method
+    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
+      return false;
+    }
+
     // It should be safe to construct the sender though it might not be valid
 
     frame sender(_SENDER_SP, younger_sp, adjusted_stack);
@@ -294,10 +299,10 @@
       return jcw_safe;
     }
 
-    // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
+    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
     // because you must allocate window space
 
-    if (sender_blob->frame_size() == 0) {
+    if (sender_blob->frame_size() <= 0) {
       assert(!sender_blob->is_nmethod(), "should count return address at least");
       return false;
     }
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1578,7 +1578,8 @@
                                            int callee_local_count,
                                            frame* caller,
                                            frame* interpreter_frame,
-                                           bool is_top_frame) {
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
   // Note: This calculation must exactly parallel the frame setup
   // in InterpreterGenerator::generate_fixed_frame.
   // If f!=NULL, set up the following variables:
@@ -1661,6 +1662,15 @@
       int delta = local_words - parm_words;
       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
+      if (!is_bottom_frame) {
+        // Llast_SP is set below for the current frame to SP (with the
+        // extra space for the callee's locals). Here we adjust
+        // Llast_SP for the caller's frame, removing the extra space
+        // for the current method's locals.
+        *caller->register_addr(Llast_SP) = *interpreter_frame->register_addr(I5_savedSP);
+      } else {
+        assert(*caller->register_addr(Llast_SP) >= *interpreter_frame->register_addr(I5_savedSP), "strange Llast_SP");
+      }
     } else {
       assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
       // Don't have Lesp available; lay out locals block in the caller
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -9651,8 +9651,13 @@
   if (!VerifyOops) return;
 
   // Pass register number to verify_oop_subroutine
-  char* b = new char[strlen(s) + 50];
-  sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+  const char* b = NULL;
+  {
+    ResourceMark rm;
+    stringStream ss;
+    ss.print("verify_oop: %s: %s", reg->name(), s);
+    b = code_string(ss.as_string());
+  }
   BLOCK_COMMENT("verify_oop {");
 #ifdef _LP64
   push(rscratch1);                    // save r10, trashed by movptr()
@@ -9686,9 +9691,14 @@
   { Label L;
     testptr(tmp, tmp);
     if (WizardMode) {
+      const char* buf = NULL;
+      {
+        ResourceMark rm;
+        stringStream ss;
+        ss.print("DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
+        buf = code_string(ss.as_string());
+      }
       jcc(Assembler::notZero, L);
-      char* buf = new char[40];
-      sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
       STOP(buf);
     } else {
       jccb(Assembler::notZero, L);
@@ -9732,9 +9742,13 @@
 
   // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
   // Pass register number to verify_oop_subroutine
-  char* b = new char[strlen(s) + 50];
-  sprintf(b, "verify_oop_addr: %s", s);
-
+  const char* b = NULL;
+  {
+    ResourceMark rm;
+    stringStream ss;
+    ss.print("verify_oop_addr: %s", s);
+    b = code_string(ss.as_string());
+  }
 #ifdef _LP64
   push(rscratch1);                    // save r10, trashed by movptr()
 #endif
@@ -10984,7 +10998,7 @@
   Address::ScaleFactor scale = Address::times_2;
   int stride = 8;
 
-  if (UseAVX >= 2) {
+  if (UseAVX >= 2 && UseSSE42Intrinsics) {
     Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
     Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
     Label COMPARE_TAIL_LONG;
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -2351,7 +2351,8 @@
                                            int callee_locals,
                                            frame* caller,
                                            frame* interpreter_frame,
-                                           bool is_top_frame) {
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
 
   assert(popframe_extra_args == 0, "FIX ME");
   // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
--- a/src/cpu/x86/vm/frame_x86.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/x86/vm/frame_x86.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -188,17 +188,7 @@
       return false;
     }
 
-    // Exception stubs don't make calls
-    if (sender_blob->is_exception_stub()) {
-      return false;
-    }
-
-    if (sender_blob->is_deoptimization_stub()) {
-        return false;
-    }
-
     // Could be the call_stub
-
     if (StubRoutines::returns_to_call_stub(sender_pc)) {
       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
       bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
@@ -228,10 +218,10 @@
       }
     }
 
-    // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
+    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
     // because the return address counts against the callee's frame.
 
-    if (sender_blob->frame_size() == 0) {
+    if (sender_blob->frame_size() <= 0) {
       assert(!sender_blob->is_nmethod(), "should count return address at least");
       return false;
     }
@@ -241,7 +231,9 @@
     // should not be anything but the call stub (already covered), the interpreter (already covered)
     // or an nmethod.
 
-    assert(sender_blob->is_runtime_stub() || sender_blob->is_nmethod(), "Impossible call chain");
+    if (!sender_blob->is_nmethod()) {
+        return false;
+    }
 
     // Could put some more validation for the potential non-interpreted sender
     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1585,7 +1585,8 @@
                                            int callee_locals,
                                            frame* caller,
                                            frame* interpreter_frame,
-                                           bool is_top_frame) {
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
   // Note: This calculation must exactly parallel the frame setup
   // in AbstractInterpreterGenerator::generate_method_entry.
   // If interpreter_frame!=NULL, set up the method, locals, and monitors.
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1599,7 +1599,8 @@
                                            int callee_locals,
                                            frame* caller,
                                            frame* interpreter_frame,
-                                           bool is_top_frame) {
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
   // Note: This calculation must exactly parallel the frame setup
   // in AbstractInterpreterGenerator::generate_method_entry.
   // If interpreter_frame!=NULL, set up the method, locals, and monitors.
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -918,7 +918,8 @@
                                            int       callee_locals,
                                            frame*    caller,
                                            frame*    interpreter_frame,
-                                           bool      is_top_frame) {
+                                           bool      is_top_frame,
+                                           bool      is_bottom_frame) {
   assert(popframe_extra_args == 0, "what to do?");
   assert(!is_top_frame || (!callee_locals && !callee_param_count),
          "top frame should have no caller");
--- a/src/share/vm/adlc/adlparse.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/adlc/adlparse.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -168,7 +168,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing '%{' in instruction definition\n");
+    parse_err(SYNERR, "missing '%%{' in instruction definition\n");
     return;
   }
   next_char();                     // Maintain the invariant
@@ -253,7 +253,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in instruction definition\n");
+    parse_err(SYNERR, "missing '%%}' in instruction definition\n");
     return;
   }
   // Check for "Set" form of chain rule
@@ -423,7 +423,7 @@
   skipws();
   // Check for block delimiter
   if ((_curchar != '%') || (*(_ptr+1) != '{')) { // If not open block
-    parse_err(SYNERR, "missing '%c{' in operand definition\n","%");
+    parse_err(SYNERR, "missing '%%{' in operand definition\n");
     return;
   }
   next_char(); next_char();        // Skip over "%{" symbol
@@ -483,7 +483,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in operand definition\n");
+    parse_err(SYNERR, "missing '%%}' in operand definition\n");
     return;
   }
   // Add operand to tail of operand list
@@ -1324,7 +1324,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing '%{' in pipeline definition\n");
+    parse_err(SYNERR, "missing '%%{' in pipeline definition\n");
     return;
   }
   next_char();                     // Maintain the invariant
@@ -1341,7 +1341,7 @@
       skipws();
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '{')) ) {
-        parse_err(SYNERR, "expected '%{'\n");
+        parse_err(SYNERR, "expected '%%{'\n");
         return;
       }
       next_char(); skipws();
@@ -1397,7 +1397,7 @@
       skipws();
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '{')) ) {
-        parse_err(SYNERR, "expected '%{'\n");
+        parse_err(SYNERR, "expected '%%{'\n");
         return;
       }
       next_char(); skipws();
@@ -1586,7 +1586,7 @@
 
       if ( (_curchar != '%')
            || ( next_char(),  (_curchar != '}')) ) {
-        parse_err(SYNERR, "expected '%}', found \"%c\"\n", _curchar);
+        parse_err(SYNERR, "expected '%%}', found \"%c\"\n", _curchar);
       }
       next_char(); skipws();
 
@@ -1612,7 +1612,7 @@
 
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing \"%}\" in pipeline definition\n");
+    parse_err(SYNERR, "missing \"%%}\" in pipeline definition\n");
     return;
   }
 
@@ -1775,7 +1775,7 @@
   // Check for block delimiter
   if ( (_curchar != '%')
        || ( next_char(),  (_curchar != '{')) ) {
-    parse_err(SYNERR, "missing \"%{\" in pipe_class definition\n");
+    parse_err(SYNERR, "missing \"%%{\" in pipe_class definition\n");
     return;
   }
   next_char();
@@ -2062,7 +2062,7 @@
 
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing \"%}\" in pipe_class definition\n");
+    parse_err(SYNERR, "missing \"%%}\" in pipe_class definition\n");
     return;
   }
 
@@ -3341,12 +3341,12 @@
   char *disp        = NULL;
 
   if (_curchar != '%') {
-    parse_err(SYNERR, "Missing '%{' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '%'
   if (_curchar != '{') {
-    parse_err(SYNERR, "Missing '%{' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '{'
@@ -3354,7 +3354,7 @@
   do {
     char *field = get_ident();
     if (field == NULL) {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
     if ( strcmp(field,"base") == 0 ) {
@@ -3370,13 +3370,13 @@
       disp  = interface_field_parse();
     }
     else {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
   } while( _curchar != '%' );
   next_char();                  // Skip '%'
   if ( _curchar != '}' ) {
-    parse_err(SYNERR, "Missing '%}' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '}'
@@ -3403,12 +3403,12 @@
   const char *greater_format = "gt";
 
   if (_curchar != '%') {
-    parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '%'
   if (_curchar != '{') {
-    parse_err(SYNERR, "Missing '%{' for 'cond_interface' block.\n");
+    parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '{'
@@ -3416,7 +3416,7 @@
   do {
     char *field = get_ident();
     if (field == NULL) {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
     if ( strcmp(field,"equal") == 0 ) {
@@ -3438,13 +3438,13 @@
       greater = interface_field_parse(&greater_format);
     }
     else {
-      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%}' ending interface.\n");
+      parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
     }
   } while( _curchar != '%' );
   next_char();                  // Skip '%'
   if ( _curchar != '}' ) {
-    parse_err(SYNERR, "Missing '%}' for 'interface' block.\n");
+    parse_err(SYNERR, "Missing '%%}' for 'interface' block.\n");
     return NULL;
   }
   next_char();                  // Skip '}'
@@ -3543,7 +3543,7 @@
   }
   else if ((cnstr = find_cpp_block("match constructor")) == NULL ) {
     parse_err(SYNERR, "invalid construction of match rule\n"
-              "Missing ';' or invalid '%{' and '%}' constructor\n");
+              "Missing ';' or invalid '%%{' and '%%}' constructor\n");
     return NULL;                  // No MatchRule to return
   }
   if (_AD._adl_debug > 1)
@@ -3646,7 +3646,7 @@
       // Check for closing '"' and '%}' in format description
       skipws();                   // Move to closing '%}'
       if ( _curchar != '%' ) {
-        parse_err(SYNERR, "non-blank characters between closing '\"' and '%' in format");
+        parse_err(SYNERR, "non-blank characters between closing '\"' and '%%' in format");
         return NULL;
       }
     } // Done with format description inside
@@ -3654,7 +3654,7 @@
     skipws();
     // Past format description, at '%'
     if ( _curchar != '%' || *(_ptr+1) != '}' ) {
-      parse_err(SYNERR, "missing '%}' at end of format block");
+      parse_err(SYNERR, "missing '%%}' at end of format block");
       return NULL;
     }
     next_char();                  // Move past the '%'
@@ -3785,7 +3785,7 @@
   skipws();
   // Past format description, at '%'
   if ( _curchar != '%' || *(_ptr+1) != '}' ) {
-    parse_err(SYNERR, "missing '%}' at end of format block");
+    parse_err(SYNERR, "missing '%%}' at end of format block");
     return NULL;
   }
   next_char();                  // Move past the '%'
@@ -3834,7 +3834,7 @@
   skipws();                        // Skip leading whitespace
   if ((_curchar != '%')
       || (next_char(), (_curchar != '{')) ) { // If not open block
-    parse_err(SYNERR, "missing '%{' in expand definition\n");
+    parse_err(SYNERR, "missing '%%{' in expand definition\n");
     return(NULL);
   }
   next_char();                     // Maintain the invariant
@@ -3933,7 +3933,7 @@
   } while(_curchar != '%');
   next_char();
   if (_curchar != '}') {
-    parse_err(SYNERR, "missing '%}' in expand rule definition\n");
+    parse_err(SYNERR, "missing '%%}' in expand rule definition\n");
     return(NULL);
   }
   next_char();
--- a/src/share/vm/adlc/formssel.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/adlc/formssel.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -751,10 +751,11 @@
         !strcmp(_matrule->_rChild->_opType,"DecodeN")    ||
         !strcmp(_matrule->_rChild->_opType,"EncodeP")    ||
         !strcmp(_matrule->_rChild->_opType,"LoadN")      ||
-        !strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
         !strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
         !strcmp(_matrule->_rChild->_opType,"CreateEx")   ||  // type of exception
-        !strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
+        !strcmp(_matrule->_rChild->_opType,"CheckCastPP")||
+        !strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
+        !strcmp(_matrule->_rChild->_opType,"GetAndSetN")) )  return true;
   else if ( is_ideal_load() == Form::idealP )                return true;
   else if ( is_ideal_store() != Form::none  )                return true;
 
--- a/src/share/vm/asm/assembler.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/asm/assembler.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -340,15 +340,19 @@
   DelayedConstant::update_all();
 }
 
-
-
-
 void AbstractAssembler::block_comment(const char* comment) {
   if (sect() == CodeBuffer::SECT_INSTS) {
     code_section()->outer()->block_comment(offset(), comment);
   }
 }
 
+const char* AbstractAssembler::code_string(const char* str) {
+  if (sect() == CodeBuffer::SECT_INSTS || sect() == CodeBuffer::SECT_STUBS) {
+    return code_section()->outer()->code_string(str);
+  }
+  return NULL;
+}
+
 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
   // Exception handler checks the nmethod's implicit null checks table
   // only when this method returns false.
--- a/src/share/vm/asm/assembler.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/asm/assembler.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -336,6 +336,8 @@
   // along with the disassembly when printing nmethods.  Currently
   // only supported in the instruction section of the code buffer.
   void block_comment(const char* comment);
+  // Copy str to a buffer that has the same lifetime as the CodeBuffer
+  const char* code_string(const char* str);
 
   // Label functions
   void bind(Label& L); // binds an unbound label L to the current code position
--- a/src/share/vm/asm/codeBuffer.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/asm/codeBuffer.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -618,8 +618,8 @@
   this->compute_final_layout(&dest);
   relocate_code_to(&dest);
 
-  // transfer comments from buffer to blob
-  dest_blob->set_comments(_comments);
+  // transfer strings and comments from buffer to blob
+  dest_blob->set_strings(_strings);
 
   // Done moving code bytes; were they the right size?
   assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
@@ -907,58 +907,78 @@
 
 
 void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
-  _comments.add_comment(offset, comment);
+  _strings.add_comment(offset, comment);
+}
+
+const char* CodeBuffer::code_string(const char* str) {
+  return _strings.add_string(str);
 }
 
-class CodeComment: public CHeapObj<mtCode> {
+class CodeString: public CHeapObj<mtCode> {
  private:
-  friend class CodeComments;
+  friend class CodeStrings;
+  const char * _string;
+  CodeString*  _next;
   intptr_t     _offset;
-  const char * _comment;
-  CodeComment* _next;
 
-  ~CodeComment() {
+  ~CodeString() {
     assert(_next == NULL, "wrong interface for freeing list");
-    os::free((void*)_comment, mtCode);
-  }
-
- public:
-  CodeComment(intptr_t offset, const char * comment) {
-    _offset = offset;
-    _comment = os::strdup(comment, mtCode);
-    _next = NULL;
+    os::free((void*)_string, mtCode);
   }
 
-  intptr_t     offset()  const { return _offset;  }
-  const char * comment() const { return _comment; }
-  CodeComment* next()          { return _next; }
-
-  void set_next(CodeComment* next) { _next = next; }
+  bool is_comment() const { return _offset >= 0; }
 
-  CodeComment* find(intptr_t offset) {
-    CodeComment* a = this;
-    while (a != NULL && a->_offset != offset) {
-      a = a->_next;
-    }
-    return a;
+ public:
+  CodeString(const char * string, intptr_t offset = -1)
+    : _next(NULL), _offset(offset) {
+    _string = os::strdup(string, mtCode);
   }
 
-  // Convenience for add_comment.
-  CodeComment* find_last(intptr_t offset) {
-    CodeComment* a = find(offset);
-    if (a != NULL) {
-      while ((a->_next != NULL) && (a->_next->_offset == offset)) {
-        a = a->_next;
-      }
+  const char * string() const { return _string; }
+  intptr_t     offset() const { assert(_offset >= 0, "offset for non comment?"); return _offset;  }
+  CodeString* next()    const { return _next; }
+
+  void set_next(CodeString* next) { _next = next; }
+
+  CodeString* first_comment() {
+    if (is_comment()) {
+      return this;
+    } else {
+      return next_comment();
     }
-    return a;
+  }
+  CodeString* next_comment() const {
+    CodeString* s = _next;
+    while (s != NULL && !s->is_comment()) {
+      s = s->_next;
+    }
+    return s;
   }
 };
 
+CodeString* CodeStrings::find(intptr_t offset) const {
+  CodeString* a = _strings->first_comment();
+  while (a != NULL && a->offset() != offset) {
+    a = a->next_comment();
+  }
+  return a;
+}
 
-void CodeComments::add_comment(intptr_t offset, const char * comment) {
-  CodeComment* c      = new CodeComment(offset, comment);
-  CodeComment* inspos = (_comments == NULL) ? NULL : _comments->find_last(offset);
+// Convenience for add_comment.
+CodeString* CodeStrings::find_last(intptr_t offset) const {
+  CodeString* a = find(offset);
+  if (a != NULL) {
+    CodeString* c = NULL;
+    while (((c = a->next_comment()) != NULL) && (c->offset() == offset)) {
+      a = c;
+    }
+  }
+  return a;
+}
+
+void CodeStrings::add_comment(intptr_t offset, const char * comment) {
+  CodeString* c      = new CodeString(comment, offset);
+  CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset);
 
   if (inspos) {
     // insert after already existing comments with same offset
@@ -966,43 +986,47 @@
     inspos->set_next(c);
   } else {
     // no comments with such offset, yet. Insert before anything else.
-    c->set_next(_comments);
-    _comments = c;
+    c->set_next(_strings);
+    _strings = c;
   }
 }
 
-
-void CodeComments::assign(CodeComments& other) {
-  _comments = other._comments;
+void CodeStrings::assign(CodeStrings& other) {
+  _strings = other._strings;
 }
 
-
-void CodeComments::print_block_comment(outputStream* stream, intptr_t offset) const {
-  if (_comments != NULL) {
-    CodeComment* c = _comments->find(offset);
+void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const {
+  if (_strings != NULL) {
+    CodeString* c = find(offset);
     while (c && c->offset() == offset) {
       stream->bol();
       stream->print("  ;; ");
-      stream->print_cr(c->comment());
-      c = c->next();
+      stream->print_cr(c->string());
+      c = c->next_comment();
     }
   }
 }
 
 
-void CodeComments::free() {
-  CodeComment* n = _comments;
+void CodeStrings::free() {
+  CodeString* n = _strings;
   while (n) {
     // unlink the node from the list saving a pointer to the next
-    CodeComment* p = n->_next;
-    n->_next = NULL;
+    CodeString* p = n->next();
+    n->set_next(NULL);
     delete n;
     n = p;
   }
-  _comments = NULL;
+  _strings = NULL;
 }
 
-
+const char* CodeStrings::add_string(const char * string) {
+  CodeString* s = new CodeString(string);
+  s->set_next(_strings);
+  _strings = s;
+  assert(s->string() != NULL, "should have a string");
+  return s->string();
+}
 
 void CodeBuffer::decode() {
   ttyLocker ttyl;
--- a/src/share/vm/asm/codeBuffer.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -29,7 +29,7 @@
 #include "code/oopRecorder.hpp"
 #include "code/relocInfo.hpp"
 
-class  CodeComments;
+class  CodeStrings;
 class  AbstractAssembler;
 class  MacroAssembler;
 class  PhaseCFG;
@@ -238,27 +238,31 @@
 #endif //PRODUCT
 };
 
-class CodeComment;
-class CodeComments VALUE_OBJ_CLASS_SPEC {
+class CodeString;
+class CodeStrings VALUE_OBJ_CLASS_SPEC {
 private:
 #ifndef PRODUCT
-  CodeComment* _comments;
+  CodeString* _strings;
 #endif
 
+  CodeString* find(intptr_t offset) const;
+  CodeString* find_last(intptr_t offset) const;
+
 public:
-  CodeComments() {
+  CodeStrings() {
 #ifndef PRODUCT
-    _comments = NULL;
+    _strings = NULL;
 #endif
   }
 
+  const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
+
   void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
   void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
-  void assign(CodeComments& other)  PRODUCT_RETURN;
+  void assign(CodeStrings& other)  PRODUCT_RETURN;
   void free() PRODUCT_RETURN;
 };
 
-
 // A CodeBuffer describes a memory space into which assembly
 // code is generated.  This memory space usually occupies the
 // interior of a single BufferBlob, but in some cases it may be
@@ -324,7 +328,7 @@
   csize_t      _total_size;     // size in bytes of combined memory buffer
 
   OopRecorder* _oop_recorder;
-  CodeComments _comments;
+  CodeStrings  _strings;
   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
   Arena*       _overflow_arena;
 
@@ -513,7 +517,7 @@
   void initialize_oop_recorder(OopRecorder* r);
 
   OopRecorder* oop_recorder() const   { return _oop_recorder; }
-  CodeComments& comments()            { return _comments; }
+  CodeStrings& strings()              { return _strings; }
 
   // Code generation
   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
@@ -542,6 +546,7 @@
   address transform_address(const CodeBuffer &cb, address addr) const;
 
   void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
+  const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
 
   // Log a little info about section usage in the CodeBuffer
   void log_section_sizes(const char* name);
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -3662,11 +3662,12 @@
   }
 
   // now perform tests that are based on flag settings
-  if (callee->force_inline() || callee->should_inline()) {
-    // ignore heuristic controls on inlining
-    if (callee->force_inline())
-      print_inlining(callee, "force inline by annotation");
+  if (callee->force_inline()) {
+    print_inlining(callee, "force inline by annotation");
+  } else if (callee->should_inline()) {
+    print_inlining(callee, "force inline by CompileOracle");
   } else {
+    // use heuristic controls on inlining
     if (inline_level() > MaxInlineLevel                         ) INLINE_BAILOUT("inlining too deep");
     if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
     if (callee->code_size_for_inlining() > max_inline_size()    ) INLINE_BAILOUT("callee is too large");
--- a/src/share/vm/ci/ciMethod.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/ci/ciMethod.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -970,7 +970,7 @@
 // ciMethod::set_not_compilable
 //
 // Tell the VM that this method cannot be compiled at all.
-void ciMethod::set_not_compilable() {
+void ciMethod::set_not_compilable(const char* reason) {
   check_is_loaded();
   VM_ENTRY_MARK;
   ciEnv* env = CURRENT_ENV;
@@ -979,7 +979,7 @@
   } else {
     _is_c2_compilable = false;
   }
-  get_methodOop()->set_not_compilable(env->comp_level());
+  get_methodOop()->set_not_compilable(env->comp_level(), true, reason);
 }
 
 // ------------------------------------------------------------------
--- a/src/share/vm/ci/ciMethod.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/ci/ciMethod.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -250,7 +250,7 @@
   bool has_option(const char *option);
   bool can_be_compiled();
   bool can_be_osr_compiled(int entry_bci);
-  void set_not_compilable();
+  void set_not_compilable(const char* reason = NULL);
   bool has_compiled_code();
   int  instructions_size(int comp_level = CompLevel_any);
   void log_nmethod_identity(xmlStream* log);
--- a/src/share/vm/code/codeBlob.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/code/codeBlob.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -185,7 +185,7 @@
     FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode);
     _oop_maps = NULL;
   }
-  _comments.free();
+  _strings.free();
 }
 
 
--- a/src/share/vm/code/codeBlob.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/code/codeBlob.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -66,7 +66,7 @@
   int        _data_offset;                       // offset to where data region begins
   int        _frame_size;                        // size of stack frame
   OopMapSet* _oop_maps;                          // OopMap for this CodeBlob
-  CodeComments _comments;
+  CodeStrings _strings;
 
   friend class OopRecorder;
 
@@ -188,12 +188,12 @@
   // Print the comment associated with offset on stream, if there is one
   virtual void print_block_comment(outputStream* stream, address block_begin) const {
     intptr_t offset = (intptr_t)(block_begin - code_begin());
-    _comments.print_block_comment(stream, offset);
+    _strings.print_block_comment(stream, offset);
   }
 
   // Transfer ownership of comments to this CodeBlob
-  void set_comments(CodeComments& comments) {
-    _comments.assign(comments);
+  void set_strings(CodeStrings& strings) {
+    _strings.assign(strings);
   }
 };
 
--- a/src/share/vm/code/icBuffer.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/code/icBuffer.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -50,7 +50,7 @@
   friend class ICStubInterface;
   // This will be called only by ICStubInterface
   void    initialize(int size,
-                     CodeComments comments)      { _size = size; _ic_site = NULL; }
+                     CodeStrings strings)        { _size = size; _ic_site = NULL; }
   void    finalize(); // called when a method is removed
 
   // General info
--- a/src/share/vm/code/stubs.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/code/stubs.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -101,8 +101,8 @@
 
 Stub* StubQueue::request_committed(int code_size) {
   Stub* s = request(code_size);
-  CodeComments comments;
-  if (s != NULL) commit(code_size, comments);
+  CodeStrings strings;
+  if (s != NULL) commit(code_size, strings);
   return s;
 }
 
@@ -119,8 +119,8 @@
       assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
       if (_queue_end + requested_size <= _buffer_size) {
         // code fits in at the end => nothing to do
-        CodeComments comments;
-        stub_initialize(s, requested_size, comments);
+        CodeStrings strings;
+        stub_initialize(s, requested_size, strings);
         return s;
       } else {
         // stub doesn't fit in at the queue end
@@ -137,8 +137,8 @@
     // Queue: |XXX|.......|XXXXXXX|.......|
     //        ^0  ^end    ^begin  ^limit  ^size
     s = current_stub();
-    CodeComments comments;
-    stub_initialize(s, requested_size, comments);
+    CodeStrings strings;
+    stub_initialize(s, requested_size, strings);
     return s;
   }
   // Not enough space left
@@ -147,12 +147,12 @@
 }
 
 
-void StubQueue::commit(int committed_code_size, CodeComments& comments) {
+void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
   assert(committed_code_size > 0, "committed_code_size must be > 0");
   int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
   Stub* s = current_stub();
   assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
-  stub_initialize(s, committed_size, comments);
+  stub_initialize(s, committed_size, strings);
   _queue_end += committed_size;
   _number_of_stubs++;
   if (_mutex != NULL) _mutex->unlock();
--- a/src/share/vm/code/stubs.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/code/stubs.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -73,7 +73,7 @@
  public:
   // Initialization/finalization
   void    initialize(int size,
-                     CodeComments& comments)     { ShouldNotCallThis(); }                // called to initialize/specify the stub's size
+                     CodeStrings& strings)       { ShouldNotCallThis(); }                // called to initialize/specify the stub's size
   void    finalize()                             { ShouldNotCallThis(); }                // called before the stub is deallocated
 
   // General info/converters
@@ -107,7 +107,7 @@
  public:
   // Initialization/finalization
   virtual void    initialize(Stub* self, int size,
-                             CodeComments& comments)       = 0; // called after creation (called twice if allocated via (request, commit))
+                             CodeStrings& strings)         = 0; // called after creation (called twice if allocated via (request, commit))
   virtual void    finalize(Stub* self)                     = 0; // called before deallocation
 
   // General info/converters
@@ -136,7 +136,7 @@
    public:                                                 \
     /* Initialization/finalization */                      \
     virtual void    initialize(Stub* self, int size,       \
-                               CodeComments& comments)     { cast(self)->initialize(size, comments); } \
+                               CodeStrings& strings)       { cast(self)->initialize(size, strings); } \
     virtual void    finalize(Stub* self)                   { cast(self)->finalize(); }             \
                                                            \
     /* General info */                                     \
@@ -176,7 +176,7 @@
 
   // Stub functionality accessed via interface
   void  stub_initialize(Stub* s, int size,
-                        CodeComments& comments)  { assert(size % CodeEntryAlignment == 0, "size not aligned"); _stub_interface->initialize(s, size, comments); }
+                        CodeStrings& strings)    { assert(size % CodeEntryAlignment == 0, "size not aligned"); _stub_interface->initialize(s, size, strings); }
   void  stub_finalize(Stub* s)                   { _stub_interface->finalize(s); }
   int   stub_size(Stub* s) const                 { return _stub_interface->size(s); }
   bool  stub_contains(Stub* s, address pc) const { return _stub_interface->code_begin(s) <= pc && pc < _stub_interface->code_end(s); }
@@ -206,7 +206,7 @@
   Stub* request_committed(int code_size);        // request a stub that provides exactly code_size space for code
   Stub* request(int requested_code_size);        // request a stub with a (maximum) code space - locks the queue
   void  commit (int committed_code_size,
-                CodeComments& comments);         // commit the previously requested stub - unlocks the queue
+                CodeStrings& strings);           // commit the previously requested stub - unlocks the queue
 
   // Stub deallocation
   void  remove_first();                          // remove the first stub in the queue
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1376,7 +1376,7 @@
       method->print_short_name(tty);
       tty->cr();
     }
-    method->set_not_compilable_quietly();
+    method->set_not_compilable(CompLevel_all, !quietly, "excluded by CompilerOracle");
   }
 
   return false;
--- a/src/share/vm/compiler/disassembler.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/compiler/disassembler.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -148,7 +148,7 @@
  private:
   nmethod*      _nm;
   CodeBlob*     _code;
-  CodeComments  _comments;
+  CodeStrings   _strings;
   outputStream* _output;
   address       _start, _end;
 
@@ -188,7 +188,7 @@
   void print_address(address value);
 
  public:
-  decode_env(CodeBlob* code, outputStream* output, CodeComments c = CodeComments());
+  decode_env(CodeBlob* code, outputStream* output, CodeStrings c = CodeStrings());
 
   address decode_instructions(address start, address end);
 
@@ -230,13 +230,13 @@
   const char* options() { return _option_buf; }
 };
 
-decode_env::decode_env(CodeBlob* code, outputStream* output, CodeComments c) {
+decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
   memset(this, 0, sizeof(*this));
   _output = output ? output : tty;
   _code = code;
   if (code != NULL && code->is_nmethod())
     _nm = (nmethod*) code;
-  _comments.assign(c);
+  _strings.assign(c);
 
   // by default, output pc but not bytes:
   _print_pc       = true;
@@ -358,7 +358,7 @@
   if (cb != NULL) {
     cb->print_block_comment(st, p);
   }
-  _comments.print_block_comment(st, (intptr_t)(p - _start));
+  _strings.print_block_comment(st, (intptr_t)(p - _start));
   if (_print_pc) {
     st->print("  " PTR_FORMAT ": ", p);
   }
@@ -470,7 +470,7 @@
   env.decode_instructions(cb->code_begin(), cb->code_end());
 }
 
-void Disassembler::decode(address start, address end, outputStream* st, CodeComments c) {
+void Disassembler::decode(address start, address end, outputStream* st, CodeStrings c) {
   if (!load_library())  return;
   decode_env env(CodeCache::find_blob_unsafe(start), st, c);
   env.decode_instructions(start, end);
--- a/src/share/vm/compiler/disassembler.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/compiler/disassembler.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -88,7 +88,7 @@
   }
   static void decode(CodeBlob *cb,               outputStream* st = NULL);
   static void decode(nmethod* nm,                outputStream* st = NULL);
-  static void decode(address begin, address end, outputStream* st = NULL, CodeComments c = CodeComments());
+  static void decode(address begin, address end, outputStream* st = NULL, CodeStrings c = CodeStrings());
 };
 
 #endif // SHARE_VM_COMPILER_DISASSEMBLER_HPP
--- a/src/share/vm/compiler/oopMap.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/compiler/oopMap.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -542,17 +542,17 @@
     st->print("Oop");
     break;
   case OopMapValue::value_value:
-    st->print("Value" );
+    st->print("Value");
     break;
   case OopMapValue::narrowoop_value:
-    tty->print("NarrowOop" );
+    st->print("NarrowOop");
     break;
   case OopMapValue::callee_saved_value:
-    st->print("Callers_" );
+    st->print("Callers_");
     optional->print_on(st);
     break;
   case OopMapValue::derived_oop_value:
-    st->print("Derived_oop_" );
+    st->print("Derived_oop_");
     optional->print_on(st);
     break;
   default:
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -33,7 +33,7 @@
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -284,7 +284,7 @@
 }
 
 void ParScanThreadState::print_promotion_failure_size() {
-  if (_promotion_failed_info.promotion_failed() && PrintPromotionFailure) {
+  if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
     gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
                         _thread_num, _promotion_failed_info.first_size());
   }
@@ -908,7 +908,7 @@
   // Trace promotion failure in the parallel GC threads
   thread_state_set.trace_promotion_failed(gc_tracer);
   // Single threaded code may have reported promotion failure to the global state
-  if (_promotion_failed_info.promotion_failed()) {
+  if (_promotion_failed_info.has_failed()) {
     gc_tracer.report_promotion_failed(_promotion_failed_info);
   }
   // Reset the PromotionFailureALot counters.
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -27,7 +27,7 @@
 
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/defNewGeneration.hpp"
 #include "utilities/taskqueue.hpp"
 
@@ -183,13 +183,13 @@
 
   // Promotion failure stats
   void register_promotion_failure(size_t sz) {
-    _promotion_failed_info.register_promotion_failed(sz);
+    _promotion_failed_info.register_copy_failure(sz);
   }
   PromotionFailedInfo& promotion_failed_info() {
     return _promotion_failed_info;
   }
   bool promotion_failed() {
-    return _promotion_failed_info.promotion_failed();
+    return _promotion_failed_info.has_failed();
   }
   void print_promotion_failure_size();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -94,7 +94,7 @@
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     PSPromotionManager* manager = manager_array(i);
     assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
-    if (manager->_promotion_failed_info.promotion_failed()) {
+    if (manager->_promotion_failed_info.has_failed()) {
       gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
       promotion_failure_occurred = true;
     }
@@ -316,7 +316,7 @@
     // We won any races, we "own" this object.
     assert(obj == obj->forwardee(), "Sanity");
 
-    _promotion_failed_info.register_promotion_failed(obj->size());
+    _promotion_failed_info.register_copy_failure(obj->size());
 
     obj->push_contents(this);
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -27,7 +27,7 @@
 
 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/taskqueue.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
+
+#include "runtime/thread.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class CopyFailedInfo VALUE_OBJ_CLASS_SPEC {
+  size_t    _first_size;
+  size_t    _smallest_size;
+  size_t    _total_size;
+  uint      _count;
+  OSThread* _thread;
+
+ public:
+  CopyFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0), _thread(NULL) {}
+
+  void register_copy_failure(size_t size) {
+    if (_first_size == 0) {
+      _first_size = size;
+      _smallest_size = size;
+      _thread = Thread::current()->osthread();
+    } else if (size < _smallest_size) {
+      _smallest_size = size;
+    }
+    _total_size += size;
+    _count++;
+    assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local.");
+  }
+
+  void reset() {
+    _first_size = 0;
+    _smallest_size = 0;
+    _total_size = 0;
+    _count = 0;
+    _thread = NULL;
+  }
+
+  bool has_failed() const { return _count != 0; }
+  size_t first_size() const { return _first_size; }
+  size_t smallest_size() const { return _smallest_size; }
+  size_t total_size() const { return _total_size; }
+  uint failed_count() const { return _count; }
+  OSThread* thread() const { return _thread; }
+};
+
+class PromotionFailedInfo : public CopyFailedInfo {};
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP */
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -26,7 +26,7 @@
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/referenceProcessorStats.hpp"
 #include "utilities/globalDefinitions.hpp"
 
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -28,7 +28,7 @@
 #include "gc_interface/gcCause.hpp"
 #include "gc_interface/gcName.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
 #include "memory/referenceType.hpp"
 #ifndef SERIALGC
@@ -138,7 +138,7 @@
   YoungGCTracer(GCName name) : GCTracer(name) {}
 
  public:
-  virtual void report_promotion_failed(const PromotionFailedInfo& pf_info);
+  void report_promotion_failed(const PromotionFailedInfo& pf_info);
 
  protected:
   virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -27,7 +27,7 @@
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "trace/tracing.hpp"
 #ifndef SERIALGC
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -92,15 +92,21 @@
   }
 }
 
+static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
+  TraceStructCopyFailed failed_info;
+  failed_info.set_objectCount(cf_info.failed_count());
+  failed_info.set_firstSize(cf_info.first_size());
+  failed_info.set_smallestSize(cf_info.smallest_size());
+  failed_info.set_totalSize(cf_info.total_size());
+  failed_info.set_thread(cf_info.thread()->thread_id());
+  return failed_info;
+}
+
 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
   EventPromotionFailed e;
   if (e.should_commit()) {
     e.set_gcId(_shared_gc_info.id());
-    e.set_objectCount(pf_info.promotion_failed_count());
-    e.set_firstSize(pf_info.first_size());
-    e.set_smallestSize(pf_info.smallest_size());
-    e.set_totalSize(pf_info.total_size());
-    e.set_thread(pf_info.thread()->thread_id());
+    e.set_data(to_trace_struct(pf_info));
     e.commit();
   }
 }
--- a/src/share/vm/gc_implementation/shared/promotionFailedInfo.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP
-
-#include "runtime/thread.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class PromotionFailedInfo VALUE_OBJ_CLASS_SPEC {
-  size_t    _first_size;
-  size_t    _smallest_size;
-  size_t    _total_size;
-  uint      _count;
-  OSThread* _thread;
-
- public:
-  PromotionFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0), _thread(NULL) {}
-
-  void register_promotion_failed(size_t size) {
-    if (_first_size == 0) {
-      _first_size = size;
-      _smallest_size = size;
-      _thread = Thread::current()->osthread();
-    } else if (size < _smallest_size) {
-      _smallest_size = size;
-    }
-    _total_size += size;
-    _count++;
-    assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local.");
-  }
-
-  void reset() {
-    _first_size = 0;
-    _smallest_size = 0;
-    _total_size = 0;
-    _count = 0;
-    _thread = NULL;
-  }
-
-  bool promotion_failed() const { return _count != 0; }
-  size_t first_size() const { return _first_size; }
-  size_t smallest_size() const { return _smallest_size; }
-  size_t total_size() const { return _total_size; }
-  uint promotion_failed_count() const { return _count; }
-  OSThread* thread() const { return _thread; }
-};
-
-#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP */
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -197,7 +197,8 @@
                                     int caller_actual_parameters,
                                     int callee_params,
                                     int callee_locals,
-                                    bool is_top_frame) {
+                                    bool is_top_frame,
+                                    bool is_bottom_frame) {
     return layout_activation(method,
                              temps,
                              popframe_args,
@@ -207,7 +208,8 @@
                              callee_locals,
                              (frame*)NULL,
                              (frame*)NULL,
-                             is_top_frame);
+                             is_top_frame,
+                             is_bottom_frame);
   }
 
   static int       layout_activation(methodOop method,
@@ -219,7 +221,8 @@
                                      int callee_locals,
                                      frame* caller,
                                      frame* interpreter_frame,
-                                     bool is_top_frame);
+                                     bool is_top_frame,
+                                     bool is_bottom_frame);
 
   // Runtime support
   static bool       is_not_reached(                       methodHandle method, int bci);
--- a/src/share/vm/interpreter/interpreter.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/interpreter/interpreter.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -74,7 +74,7 @@
 
   if (PrintInterpreter) {
     st->cr();
-    Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_comments) NOT_DEBUG(CodeComments()));
+    Disassembler::decode(code_begin(), code_end(), st, DEBUG_ONLY(_strings) NOT_DEBUG(CodeStrings()));
   }
 }
 
--- a/src/share/vm/interpreter/interpreter.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/interpreter/interpreter.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -48,12 +48,12 @@
   int         _size;                             // the size in bytes
   const char* _description;                      // a description of the codelet, for debugging & printing
   Bytecodes::Code _bytecode;                     // associated bytecode if any
-  DEBUG_ONLY(CodeComments _comments;)            // Comments for annotating assembler output.
+  DEBUG_ONLY(CodeStrings _strings;)              // Comments for annotating assembler output.
 
  public:
   // Initialization/finalization
   void    initialize(int size,
-                     CodeComments& comments)     { _size = size; DEBUG_ONLY(_comments.assign(comments);) }
+                     CodeStrings& strings)       { _size = size; DEBUG_ONLY(_strings.assign(strings);) }
   void    finalize()                             { ShouldNotCallThis(); }
 
   // General info/converters
@@ -131,7 +131,7 @@
 
 
     // commit Codelet
-    AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->comments());
+    AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings());
     // make sure nobody can use _masm outside a CodeletMark lifespan
     *_masm = NULL;
   }
--- a/src/share/vm/memory/defNewGeneration.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -735,7 +735,7 @@
                         old->size());
   }
   _promotion_failed = true;
-  _promotion_failed_info.register_promotion_failed(old->size());
+  _promotion_failed_info.register_copy_failure(old->size());
   preserve_mark_if_necessary(old, old->mark());
   // forward to self
   old->forward_to(old);
--- a/src/share/vm/memory/defNewGeneration.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/memory/defNewGeneration.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -28,7 +28,7 @@
 #include "gc_implementation/shared/ageTable.hpp"
 #include "gc_implementation/shared/cSpaceCounters.hpp"
 #include "gc_implementation/shared/generationCounters.hpp"
-#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/generation.inline.hpp"
 #include "utilities/stack.hpp"
 
--- a/src/share/vm/oops/methodDataOop.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/oops/methodDataOop.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1502,7 +1502,7 @@
   void inc_decompile_count() {
     _nof_decompiles += 1;
     if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
-      method()->set_not_compilable(CompLevel_full_optimization);
+      method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff");
     }
   }
 
--- a/src/share/vm/oops/methodOop.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -613,7 +613,7 @@
 }
 
 
-void methodOopDesc::print_made_not_compilable(int comp_level, bool is_osr, bool report) {
+void methodOopDesc::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) {
   if (PrintCompilation && report) {
     ttyLocker ttyl;
     tty->print("made not %scompilable on ", is_osr ? "OSR " : "");
@@ -627,14 +627,21 @@
     }
     this->print_short_name(tty);
     int size = this->code_size();
-    if (size > 0)
+    if (size > 0) {
       tty->print(" (%d bytes)", size);
+    }
+    if (reason != NULL) {
+      tty->print("   %s", reason);
+    }
     tty->cr();
   }
   if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
     ttyLocker ttyl;
     xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'",
                      is_osr ? "osr_" : "", os::current_thread_id());
+    if (reason != NULL) {
+      xtty->print(" reason=\'%s\'", reason);
+    }
     xtty->method(methodOop(this));
     xtty->stamp();
     xtty->end_elem();
@@ -656,8 +663,8 @@
 }
 
 // call this when compiler finds that this method is not compilable
-void methodOopDesc::set_not_compilable(int comp_level, bool report) {
-  print_made_not_compilable(comp_level, /*is_osr*/ false, report);
+void methodOopDesc::set_not_compilable(int comp_level, bool report, const char* reason) {
+  print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_compilable();
     set_not_c2_compilable();
@@ -682,8 +689,8 @@
   return false;
 }
 
-void methodOopDesc::set_not_osr_compilable(int comp_level, bool report) {
-  print_made_not_compilable(comp_level, /*is_osr*/ true, report);
+void methodOopDesc::set_not_osr_compilable(int comp_level, bool report, const char* reason) {
+  print_made_not_compilable(comp_level, /*is_osr*/ true, report, reason);
   if (comp_level == CompLevel_all) {
     set_not_c1_osr_compilable();
     set_not_c2_osr_compilable();
--- a/src/share/vm/oops/methodOop.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -678,18 +678,18 @@
   // whether it is not compilable for another reason like having a
   // breakpoint set in it.
   bool  is_not_compilable(int comp_level = CompLevel_any) const;
-  void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
+  void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
   void set_not_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_compilable(comp_level, false);
   }
   bool  is_not_osr_compilable(int comp_level = CompLevel_any) const;
-  void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true);
+  void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL);
   void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
     set_not_osr_compilable(comp_level, false);
   }
 
  private:
-  void print_made_not_compilable(int comp_level, bool is_osr, bool report);
+  void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
 
  public:
   bool  is_not_c1_compilable() const          { return access_flags().is_not_c1_compilable(); }
--- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -46,7 +46,8 @@
   _site_invoke_ratio(site_invoke_ratio),
   _max_inline_level(max_inline_level),
   _count_inline_bcs(method()->code_size_for_inlining()),
-  _subtrees(c->comp_arena(), 2, 0, NULL)
+  _subtrees(c->comp_arena(), 2, 0, NULL),
+  _msg(NULL)
 {
   NOT_PRODUCT(_count_inlines = 0;)
   if (_caller_jvms != NULL) {
@@ -76,7 +77,8 @@
   _method(callee_method),
   _site_invoke_ratio(site_invoke_ratio),
   _max_inline_level(max_inline_level),
-  _count_inline_bcs(method()->code_size())
+  _count_inline_bcs(method()->code_size()),
+  _msg(NULL)
 {
   NOT_PRODUCT(_count_inlines = 0;)
   assert(!UseOldInlining, "do not use for old stuff");
@@ -94,8 +96,10 @@
          );
 }
 
-// positive filter: should callee be inlined?  returns NULL, if yes, or rejection msg
-const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
+// positive filter: should callee be inlined?
+bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
+                               int caller_bci, ciCallProfile& profile,
+                               WarmCallInfo* wci_result) {
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
@@ -103,11 +107,10 @@
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
-    return NULL;
+    set_msg("force inline by CompilerOracle");
+    return true;
   }
 
-  // positive filter: should send be inlined?  returns NULL (--> yes)
-  // or rejection msg
   int size = callee_method->code_size_for_inlining();
 
   // Check for too many throws (and not too huge)
@@ -118,11 +121,13 @@
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
-    return NULL;
+    set_msg("many throws");
+    return true;
   }
 
   if (!UseOldInlining) {
-    return NULL;  // size and frequency are represented in a new way
+    set_msg("!UseOldInlining");
+    return true;  // size and frequency are represented in a new way
   }
 
   int default_max_inline_size = C->max_inline_size();
@@ -151,32 +156,46 @@
   } else {
     // Not hot.  Check for medium-sized pre-existing nmethod at cold sites.
     if (callee_method->has_compiled_code() &&
-        callee_method->instructions_size(CompLevel_full_optimization) > inline_small_code_size)
-      return "already compiled into a medium method";
+        callee_method->instructions_size(CompLevel_full_optimization) > inline_small_code_size) {
+      set_msg("already compiled into a medium method");
+      return false;
+    }
   }
   if (size > max_inline_size) {
-    if (max_inline_size > default_max_inline_size)
-      return "hot method too big";
-    return "too big";
+    if (max_inline_size > default_max_inline_size) {
+      set_msg("hot method too big");
+    } else {
+      set_msg("too big");
+    }
+    return false;
   }
-  return NULL;
+  return true;
 }
 
 
-// negative filter: should callee NOT be inlined?  returns NULL, ok to inline, or rejection msg
-const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
-  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
+// negative filter: should callee NOT be inlined?
+bool InlineTree::should_not_inline(ciMethod *callee_method,
+                                   ciMethod* caller_method,
+                                   WarmCallInfo* wci_result) {
+
+  const char* fail_msg = NULL;
+
+  // First check all inlining restrictions which are required for correctness
+  if ( callee_method->is_abstract()) {
+    fail_msg = "abstract method"; // // note: we allow ik->is_abstract()
+  } else if (!callee_method->holder()->is_initialized()) {
+    fail_msg = "method holder not initialized";
+  } else if ( callee_method->is_native()) {
+    fail_msg = "native method";
+  } else if ( callee_method->dont_inline()) {
+    fail_msg = "don't inline by annotation";
+  }
+
   if (!UseOldInlining) {
-    const char* fail = NULL;
-    if ( callee_method->is_abstract())               fail = "abstract method";
-    // note: we allow ik->is_abstract()
-    if (!callee_method->holder()->is_initialized())  fail = "method holder not initialized";
-    if ( callee_method->is_native())                 fail = "native method";
-    if ( callee_method->dont_inline())               fail = "don't inline by annotation";
-
-    if (fail) {
+    if (fail_msg != NULL) {
       *wci_result = *(WarmCallInfo::always_cold());
-      return fail;
+      set_msg(fail_msg);
+      return true;
     }
 
     if (callee_method->has_unloaded_classes_in_signature()) {
@@ -198,20 +217,23 @@
       // %%% adjust wci_result->size()?
     }
 
-    return NULL;
+    return false;
+  }
+
+  // one more inlining restriction
+  if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
+    fail_msg = "unloaded signature classes";
   }
 
-  // First check all inlining restrictions which are required for correctness
-  if ( callee_method->is_abstract())                        return "abstract method";
-  // note: we allow ik->is_abstract()
-  if (!callee_method->holder()->is_initialized())           return "method holder not initialized";
-  if ( callee_method->is_native())                          return "native method";
-  if ( callee_method->dont_inline())                        return "don't inline by annotation";
-  if ( callee_method->has_unloaded_classes_in_signature())  return "unloaded signature classes";
+  if (fail_msg != NULL) {
+    set_msg(fail_msg);
+    return true;
+  }
 
+  // ignore heuristic controls on inlining
   if (callee_method->should_inline()) {
-    // ignore heuristic controls on inlining
-    return NULL;
+    set_msg("force inline by CompilerOracle");
+    return false;
   }
 
   // Now perform checks which are heuristic
@@ -219,7 +241,8 @@
   if (!callee_method->force_inline()) {
     if (callee_method->has_compiled_code() &&
         callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
-    return "already compiled into a big method";
+      set_msg("already compiled into a big method");
+      return true;
     }
   }
 
@@ -230,31 +253,38 @@
     const InlineTree *top = this;
     while (top->caller_tree() != NULL) top = top->caller_tree();
     ciInstanceKlass* k = top->method()->holder();
-    if (!k->is_subclass_of(C->env()->Throwable_klass()))
-      return "exception method";
+    if (!k->is_subclass_of(C->env()->Throwable_klass())) {
+      set_msg("exception method");
+      return true;
+    }
   }
 
   if (callee_method->should_not_inline()) {
-    return "disallowed by CompilerOracle";
+    set_msg("disallowed by CompilerOracle");
+    return true;
   }
 
   if (UseStringCache) {
     // Do not inline StringCache::profile() method used only at the beginning.
     if (callee_method->name() == ciSymbol::profile_name() &&
         callee_method->holder()->name() == ciSymbol::java_lang_StringCache()) {
-      return "profiling method";
+      set_msg("profiling method");
+      return true;
     }
   }
 
   // use frequency-based objections only for non-trivial methods
-  if (callee_method->code_size() <= MaxTrivialSize) return NULL;
+  if (callee_method->code_size() <= MaxTrivialSize) {
+    return false;
+  }
 
   // don't use counts with -Xcomp or CTW
   if (UseInterpreter && !CompileTheWorld) {
 
     if (!callee_method->has_compiled_code() &&
         !callee_method->was_executed_more_than(0)) {
-      return "never executed";
+      set_msg("never executed");
+      return true;
     }
 
     if (is_init_with_ea(callee_method, caller_method, C)) {
@@ -263,39 +293,44 @@
 
     } else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
                                                            CompileThreshold >> 1))) {
-      return "executed < MinInliningThreshold times";
+      set_msg("executed < MinInliningThreshold times");
+      return true;
     }
   }
 
-  return NULL;
+  return false;
 }
 
 //-----------------------------try_to_inline-----------------------------------
-// return NULL if ok, reason for not inlining otherwise
+// return true if ok
 // Relocated from "InliningClosure::try_to_inline"
-const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) {
-  // Old algorithm had funny accumulating BC-size counters
+bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
+                               int caller_bci, ciCallProfile& profile,
+                               WarmCallInfo* wci_result, bool& should_delay) {
+
+   // Old algorithm had funny accumulating BC-size counters
   if (UseOldInlining && ClipInlining
       && (int)count_inline_bcs() >= DesiredMethodLimit) {
     if (!callee_method->force_inline() || !IncrementalInline) {
-      return "size > DesiredMethodLimit";
+      set_msg("size > DesiredMethodLimit");
+      return false;
     } else if (!C->inlining_incrementally()) {
       should_delay = true;
     }
   }
 
-  const char *msg = NULL;
-  msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result);
-  if (msg != NULL)
-    return msg;
-
-  msg = should_not_inline(callee_method, caller_method, wci_result);
-  if (msg != NULL)
-    return msg;
+  if (!should_inline(callee_method, caller_method, caller_bci, profile,
+                     wci_result)) {
+    return false;
+  }
+  if (should_not_inline(callee_method, caller_method, wci_result)) {
+    return false;
+  }
 
   if (InlineAccessors && callee_method->is_accessor()) {
     // accessor methods are not subject to any of the following limits.
-    return NULL;
+    set_msg("accessor");
+    return true;
   }
 
   // suppress a few checks for accessors and trivial methods
@@ -305,7 +340,8 @@
     if (C->over_inlining_cutoff()) {
       if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form())
           || !IncrementalInline) {
-        return "NodeCountInliningCutoff";
+        set_msg("NodeCountInliningCutoff");
+        return false;
       } else {
         should_delay = true;
       }
@@ -319,16 +355,19 @@
 
     } else if (profile.count() == 0) {
       // don't inline unreached call sites
-      return "call site not reached";
+       set_msg("call site not reached");
+       return false;
     }
   }
 
   if (!C->do_inlining() && InlineAccessors) {
-    return "not an accessor";
+    set_msg("not an accessor");
+    return false;
   }
   if (inline_level() > _max_inline_level) {
     if (!callee_method->force_inline() || !IncrementalInline) {
-      return "inlining too deep";
+      set_msg("inlining too deep");
+      return false;
     } else if (!C->inlining_incrementally()) {
       should_delay = true;
     }
@@ -338,15 +377,19 @@
   if (!callee_method->is_compiled_lambda_form()) {
     // count the current method and the callee
     int inline_level = (method() == callee_method) ? 1 : 0;
-    if (inline_level > MaxRecursiveInlineLevel)
-      return "recursively inlining too deep";
+    if (inline_level > MaxRecursiveInlineLevel) {
+      set_msg("recursively inlining too deep");
+      return false;
+    }
     // count callers of current method and callee
     JVMState* jvms = caller_jvms();
     while (jvms != NULL && jvms->has_method()) {
       if (jvms->method() == callee_method) {
         inline_level++;
-        if (inline_level > MaxRecursiveInlineLevel)
-          return "recursively inlining too deep";
+        if (inline_level > MaxRecursiveInlineLevel) {
+          set_msg("recursively inlining too deep");
+          return false;
+        }
       }
       jvms = jvms->caller();
     }
@@ -357,14 +400,15 @@
   if (UseOldInlining && ClipInlining
       && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
     if (!callee_method->force_inline() || !IncrementalInline) {
-      return "size > DesiredMethodLimit";
+      set_msg("size > DesiredMethodLimit");
+      return false;
     } else if (!C->inlining_incrementally()) {
       should_delay = true;
     }
   }
 
   // ok, inline this method
-  return NULL;
+  return true;
 }
 
 //------------------------------pass_initial_checks----------------------------
@@ -413,14 +457,25 @@
 }
 
 //------------------------------print_inlining---------------------------------
-// Really, the failure_msg can be a success message also.
-void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
-  C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
-  if (callee_method == NULL)  tty->print(" callee not monotonic or profiled");
-  if (Verbose && callee_method) {
-    const InlineTree *top = this;
-    while( top->caller_tree() != NULL ) { top = top->caller_tree(); }
-    //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
+void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
+                                bool success) const {
+  const char* inline_msg = msg();
+  assert(inline_msg != NULL, "just checking");
+  if (C->log() != NULL) {
+    if (success) {
+      C->log()->inline_success(inline_msg);
+    } else {
+      C->log()->inline_fail(inline_msg);
+    }
+  }
+  if (PrintInlining) {
+    C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
+    if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
+    if (Verbose && callee_method) {
+      const InlineTree *top = this;
+      while( top->caller_tree() != NULL ) { top = top->caller_tree(); }
+      //tty->print("  bcs: %d+%d  invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count());
+    }
   }
 }
 
@@ -438,48 +493,50 @@
   }
   assert(_method == jvms->method(), "redundant instance state");
 #endif
-  const char *failure_msg   = NULL;
   int         caller_bci    = jvms->bci();
-  ciMethod   *caller_method = jvms->method();
+  ciMethod*   caller_method = jvms->method();
 
   // Do some initial checks.
   if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, "failed initial checks");
+    set_msg("failed initial checks");
+    print_inlining(callee_method, caller_bci, false /* !success */);
     return NULL;
   }
 
   // Do some parse checks.
-  failure_msg = check_can_parse(callee_method);
-  if (failure_msg != NULL) {
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+  set_msg(check_can_parse(callee_method));
+  if (msg() != NULL) {
+    print_inlining(callee_method, caller_bci, false /* !success */);
     return NULL;
   }
 
   // Check if inlining policy says no.
   WarmCallInfo wci = *(initial_wci);
-  failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci, should_delay);
-  if (failure_msg != NULL && C->log() != NULL) {
-    C->log()->inline_fail(failure_msg);
-  }
+  bool success = try_to_inline(callee_method, caller_method, caller_bci,
+                               profile, &wci, should_delay);
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
       && (PrintOpto || PrintOptoInlining || PrintInlining)) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
-    bool old_cold = (failure_msg != NULL);
+    bool old_cold = !success;
     if (old_cold != cold || (Verbose || WizardMode)) {
+      if (msg() == NULL) {
+        set_msg("OK");
+      }
       tty->print("   OldInlining= %4s : %s\n           WCI=",
-                 old_cold ? "cold" : "hot", failure_msg ? failure_msg : "OK");
+                 old_cold ? "cold" : "hot", msg());
       wci.print();
     }
   }
 #endif
   if (UseOldInlining) {
-    if (failure_msg == NULL)
+    if (success) {
       wci = *(WarmCallInfo::always_hot());
-    else
+    } else {
       wci = *(WarmCallInfo::always_cold());
+    }
   }
   if (!InlineWarmCalls) {
     if (!wci.is_cold() && !wci.is_hot()) {
@@ -489,11 +546,11 @@
   }
 
   if (!wci.is_cold()) {
-    // In -UseOldInlining, the failure_msg may also be a success message.
-    if (failure_msg == NULL)  failure_msg = "inline (hot)";
-
     // Inline!
-    if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+    if (msg() == NULL) {
+      set_msg("inline (hot)");
+    }
+    print_inlining(callee_method, caller_bci, true /* success */);
     if (UseOldInlining)
       build_inline_tree_for_callee(callee_method, jvms, caller_bci);
     if (InlineWarmCalls && !wci.is_hot())
@@ -502,8 +559,10 @@
   }
 
   // Do not inline
-  if (failure_msg == NULL)  failure_msg = "too cold to inline";
-  if (PrintInlining)  print_inlining(callee_method, caller_bci, failure_msg);
+  if (msg() == NULL) {
+    set_msg("too cold to inline");
+  }
+  print_inlining(callee_method, caller_bci, false /* !success */ );
   return NULL;
 }
 
--- a/src/share/vm/opto/c2_globals.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -615,6 +615,9 @@
                                                                             \
   product(intx, LiveNodeCountInliningCutoff, 20000,                         \
           "max number of live nodes in a method")                           \
+                                                                            \
+  diagnostic(bool, OptimizeExpensiveOps, true,                              \
+          "Find best control for expensive operations")                     \
 
 
 C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/src/share/vm/opto/callnode.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/callnode.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -165,13 +165,13 @@
 
 
 #ifndef PRODUCT
-void ReturnNode::dump_req() const {
+void ReturnNode::dump_req(outputStream *st) const {
   // Dump the required inputs, enclosed in '(' and ')'
   uint i;                       // Exit value of loop
-  for( i=0; i<req(); i++ ) {    // For all required inputs
-    if( i == TypeFunc::Parms ) tty->print("returns");
-    if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
-    else tty->print("_ ");
+  for (i = 0; i < req(); i++) {    // For all required inputs
+    if (i == TypeFunc::Parms) st->print("returns");
+    if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
+    else st->print("_ ");
   }
 }
 #endif
@@ -208,13 +208,13 @@
 }
 
 #ifndef PRODUCT
-void RethrowNode::dump_req() const {
+void RethrowNode::dump_req(outputStream *st) const {
   // Dump the required inputs, enclosed in '(' and ')'
   uint i;                       // Exit value of loop
-  for( i=0; i<req(); i++ ) {    // For all required inputs
-    if( i == TypeFunc::Parms ) tty->print("exception");
-    if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
-    else tty->print("_ ");
+  for (i = 0; i < req(); i++) {    // For all required inputs
+    if (i == TypeFunc::Parms) st->print("exception");
+    if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
+    else st->print("_ ");
   }
 }
 #endif
@@ -330,7 +330,8 @@
     st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
     return;
   }
-  if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
+  if (regalloc->node_regs_max_index() > 0 &&
+      OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
     char buf[50];
     regalloc->dump_register(n,buf);
     st->print(" %s%d]=%s",msg,i,buf);
@@ -376,7 +377,7 @@
 //------------------------------format-----------------------------------------
 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
   st->print("        #");
-  if( _method ) {
+  if (_method) {
     _method->print_short_name(st);
     st->print(" @ bci:%d ",_bci);
   } else {
@@ -388,21 +389,22 @@
     MachSafePointNode *mcall = n->as_MachSafePoint();
     uint i;
     // Print locals
-    for( i = 0; i < (uint)loc_size(); i++ )
-      format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
+    for (i = 0; i < (uint)loc_size(); i++)
+      format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
     // Print stack
     for (i = 0; i < (uint)stk_size(); i++) {
       if ((uint)(_stkoff + i) >= mcall->len())
         st->print(" oob ");
       else
-       format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
+       format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
     }
     for (i = 0; (int)i < nof_monitors(); i++) {
       Node *box = mcall->monitor_box(this, i);
       Node *obj = mcall->monitor_obj(this, i);
-      if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
+      if (regalloc->node_regs_max_index() > 0 &&
+          OptoReg::is_valid(regalloc->get_reg_first(box))) {
         box = BoxLockNode::box_node(box);
-        format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
+        format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
       } else {
         OptoReg::Name box_reg = BoxLockNode::reg(box);
         st->print(" MON-BOX%d=%s+%d",
@@ -415,7 +417,7 @@
         if (BoxLockNode::box_node(box)->is_eliminated())
           obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
       }
-      format_helper( regalloc, st, obj, obj_msg, i, &scobjs );
+      format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
     }
 
     for (i = 0; i < (uint)scobjs.length(); i++) {
@@ -458,9 +460,9 @@
           st->print(" [");
           cifield = iklass->nonstatic_field_at(0);
           cifield->print_name_on(st);
-          format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
+          format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
         } else {
-          format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
+          format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
         }
         for (uint j = 1; j < nf; j++) {
           fld_node = mcall->in(first_ind+j);
@@ -468,9 +470,9 @@
             st->print(", [");
             cifield = iklass->nonstatic_field_at(j);
             cifield->print_name_on(st);
-            format_helper( regalloc, st, fld_node, ":", j, &scobjs );
+            format_helper(regalloc, st, fld_node, ":", j, &scobjs);
           } else {
-            format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
+            format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
           }
         }
       }
@@ -478,7 +480,7 @@
     }
   }
   st->print_cr("");
-  if (caller() != NULL)  caller()->format(regalloc, n, st);
+  if (caller() != NULL) caller()->format(regalloc, n, st);
 }
 
 
@@ -581,15 +583,15 @@
 uint CallNode::cmp( const Node &n ) const
 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
 #ifndef PRODUCT
-void CallNode::dump_req() const {
+void CallNode::dump_req(outputStream *st) const {
   // Dump the required inputs, enclosed in '(' and ')'
   uint i;                       // Exit value of loop
-  for( i=0; i<req(); i++ ) {    // For all required inputs
-    if( i == TypeFunc::Parms ) tty->print("(");
-    if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
-    else tty->print("_ ");
+  for (i = 0; i < req(); i++) {    // For all required inputs
+    if (i == TypeFunc::Parms) st->print("(");
+    if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
+    else st->print("_ ");
   }
-  tty->print(")");
+  st->print(")");
 }
 
 void CallNode::dump_spec(outputStream *st) const {
--- a/src/share/vm/opto/callnode.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/callnode.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -126,7 +126,7 @@
   virtual uint ideal_reg() const { return NotAMachineReg; }
   virtual uint match_edge(uint idx) const;
 #ifndef PRODUCT
-  virtual void dump_req() const;
+  virtual void dump_req(outputStream *st = tty) const;
 #endif
 };
 
@@ -147,7 +147,7 @@
   virtual uint match_edge(uint idx) const;
   virtual uint ideal_reg() const { return NotAMachineReg; }
 #ifndef PRODUCT
-  virtual void dump_req() const;
+  virtual void dump_req(outputStream *st = tty) const;
 #endif
 };
 
@@ -579,7 +579,7 @@
   virtual uint match_edge(uint idx) const;
 
 #ifndef PRODUCT
-  virtual void        dump_req()  const;
+  virtual void        dump_req(outputStream *st = tty) const;
   virtual void        dump_spec(outputStream *st) const;
 #endif
 };
--- a/src/share/vm/opto/compile.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/compile.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -408,6 +408,13 @@
       remove_macro_node(n);
     }
   }
+  // Remove useless expensive node
+  for (int i = C->expensive_count()-1; i >= 0; i--) {
+    Node* n = C->expensive_node(i);
+    if (!useful.member(n)) {
+      remove_expensive_node(n);
+    }
+  }
   // clean up the late inline lists
   remove_useless_late_inlines(&_string_late_inlines, useful);
   remove_useless_late_inlines(&_late_inlines, useful);
@@ -1071,6 +1078,7 @@
   _intrinsics = NULL;
   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
 }
 
@@ -1937,6 +1945,10 @@
 
   if (failing())  return;
 
+  // No more new expensive nodes will be added to the list from here
+  // so keep only the actual candidates for optimizations.
+  cleanup_expensive_nodes(igvn);
+
   // Perform escape analysis
   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
     if (has_loops()) {
@@ -3002,6 +3014,15 @@
     return true;
   }
 
+  // Expensive nodes have their control input set to prevent the GVN
+  // from freely commoning them. There's no GVN beyond this point so
+  // no need to keep the control input. We want the expensive nodes to
+  // be freely moved to the least frequent code path by gcm.
+  assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
+  for (int i = 0; i < expensive_count(); i++) {
+    _expensive_nodes->at(i)->set_req(0, NULL);
+  }
+
   Final_Reshape_Counts frc;
 
   // Visit everybody reachable!
@@ -3510,3 +3531,126 @@
     }
   }
 }
+
+int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
+  if (n1->Opcode() < n2->Opcode())      return -1;
+  else if (n1->Opcode() > n2->Opcode()) return 1;
+
+  assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
+  for (uint i = 1; i < n1->req(); i++) {
+    if (n1->in(i) < n2->in(i))      return -1;
+    else if (n1->in(i) > n2->in(i)) return 1;
+  }
+
+  return 0;
+}
+
+int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
+  Node* n1 = *n1p;
+  Node* n2 = *n2p;
+
+  return cmp_expensive_nodes(n1, n2);
+}
+
+void Compile::sort_expensive_nodes() {
+  if (!expensive_nodes_sorted()) {
+    _expensive_nodes->sort(cmp_expensive_nodes);
+  }
+}
+
+bool Compile::expensive_nodes_sorted() const {
+  for (int i = 1; i < _expensive_nodes->length(); i++) {
+    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
+  if (_expensive_nodes->length() == 0) {
+    return false;
+  }
+
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Take this opportunity to remove dead nodes from the list
+  int j = 0;
+  for (int i = 0; i < _expensive_nodes->length(); i++) {
+    Node* n = _expensive_nodes->at(i);
+    if (!n->is_unreachable(igvn)) {
+      assert(n->is_expensive(), "should be expensive");
+      _expensive_nodes->at_put(j, n);
+      j++;
+    }
+  }
+  _expensive_nodes->trunc_to(j);
+
+  // Then sort the list so that similar nodes are next to each other
+  // and check for at least two nodes of identical kind with same data
+  // inputs.
+  sort_expensive_nodes();
+
+  for (int i = 0; i < _expensive_nodes->length()-1; i++) {
+    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
+  if (_expensive_nodes->length() == 0) {
+    return;
+  }
+
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Sort to bring similar nodes next to each other and clear the
+  // control input of nodes for which there's only a single copy.
+  sort_expensive_nodes();
+
+  int j = 0;
+  int identical = 0;
+  int i = 0;
+  for (; i < _expensive_nodes->length()-1; i++) {
+    assert(j <= i, "can't write beyond current index");
+    if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
+      identical++;
+      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+      continue;
+    }
+    if (identical > 0) {
+      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+      identical = 0;
+    } else {
+      Node* n = _expensive_nodes->at(i);
+      igvn.hash_delete(n);
+      n->set_req(0, NULL);
+      igvn.hash_insert(n);
+    }
+  }
+  if (identical > 0) {
+    _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+  } else if (_expensive_nodes->length() >= 1) {
+    Node* n = _expensive_nodes->at(i);
+    igvn.hash_delete(n);
+    n->set_req(0, NULL);
+    igvn.hash_insert(n);
+  }
+  _expensive_nodes->trunc_to(j);
+}
+
+void Compile::add_expensive_node(Node * n) {
+  assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
+  assert(n->is_expensive(), "expensive nodes with non-null control here only");
+  assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
+  if (OptimizeExpensiveOps) {
+    _expensive_nodes->append(n);
+  } else {
+    // Clear control input and let IGVN optimize expensive nodes if
+    // OptimizeExpensiveOps is off.
+    n->set_req(0, NULL);
+  }
+}
--- a/src/share/vm/opto/compile.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/compile.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -300,6 +300,7 @@
   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
+  GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
   ConnectionGraph*      _congraph;
 #ifndef PRODUCT
   IdealGraphPrinter*    _printer;
@@ -385,6 +386,13 @@
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
   int _print_inlining;
 
+  // Only keep nodes in the expensive node list that need to be optimized
+  void cleanup_expensive_nodes(PhaseIterGVN &igvn);
+  // Use for sorting expensive nodes to bring similar nodes together
+  static int cmp_expensive_nodes(Node** n1, Node** n2);
+  // Expensive nodes list already sorted?
+  bool expensive_nodes_sorted() const;
+
  public:
 
   outputStream* print_inlining_stream() const {
@@ -586,8 +594,10 @@
 
   int           macro_count()                   { return _macro_nodes->length(); }
   int           predicate_count()               { return _predicate_opaqs->length();}
+  int           expensive_count()               { return _expensive_nodes->length(); }
   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
   Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
+  Node*         expensive_node(int idx)         { return _expensive_nodes->at(idx); }
   ConnectionGraph* congraph()                   { return _congraph;}
   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
   void add_macro_node(Node * n) {
@@ -605,6 +615,12 @@
       _predicate_opaqs->remove(n);
     }
   }
+  void add_expensive_node(Node * n);
+  void remove_expensive_node(Node * n) {
+    if (_expensive_nodes->contains(n)) {
+      _expensive_nodes->remove(n);
+    }
+  }
   void add_predicate_opaq(Node * n) {
     assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
     assert(_macro_nodes->contains(n), "should have already been in macro list");
@@ -617,6 +633,13 @@
     return _predicate_opaqs->contains(n);
   }
 
+  // Are there candidate expensive nodes for optimization?
+  bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
+  // Check whether n1 and n2 are similar
+  static int cmp_expensive_nodes(Node* n1, Node* n2);
+  // Sort expensive nodes to locate similar expensive nodes
+  void sort_expensive_nodes();
+
   // Compilation environment.
   Arena*            comp_arena()                { return &_comp_arena; }
   ciEnv*            env() const                 { return _env; }
@@ -668,6 +691,7 @@
   void         record_dead_node(uint idx)  { if (_dead_node_list.test_set(idx)) return;
                                              _dead_node_count++;
                                            }
+  bool         is_dead_node(uint idx)      { return _dead_node_list.test(idx) != 0; }
   uint         dead_node_count()           { return _dead_node_count; }
   void         reset_dead_node_list()      { _dead_node_list.Reset();
                                              _dead_node_count = 0;
--- a/src/share/vm/opto/library_call.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/library_call.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1466,10 +1466,10 @@
   Node* arg = round_double_node(argument(0));
   Node* n;
   switch (id) {
-  case vmIntrinsics::_dabs:   n = new (C) AbsDNode(    arg);  break;
-  case vmIntrinsics::_dsqrt:  n = new (C) SqrtDNode(0, arg);  break;
-  case vmIntrinsics::_dlog:   n = new (C) LogDNode(    arg);  break;
-  case vmIntrinsics::_dlog10: n = new (C) Log10DNode(  arg);  break;
+  case vmIntrinsics::_dabs:   n = new (C) AbsDNode(                arg);  break;
+  case vmIntrinsics::_dsqrt:  n = new (C) SqrtDNode(C, control(),  arg);  break;
+  case vmIntrinsics::_dlog:   n = new (C) LogDNode(C, control(),   arg);  break;
+  case vmIntrinsics::_dlog10: n = new (C) Log10DNode(C, control(), arg);  break;
   default:  fatal_unexpected_iid(id);  break;
   }
   set_result(_gvn.transform(n));
@@ -1484,9 +1484,9 @@
   Node* n = NULL;
 
   switch (id) {
-  case vmIntrinsics::_dsin:  n = new (C) SinDNode(arg);  break;
-  case vmIntrinsics::_dcos:  n = new (C) CosDNode(arg);  break;
-  case vmIntrinsics::_dtan:  n = new (C) TanDNode(arg);  break;
+  case vmIntrinsics::_dsin:  n = new (C) SinDNode(C, control(), arg);  break;
+  case vmIntrinsics::_dcos:  n = new (C) CosDNode(C, control(), arg);  break;
+  case vmIntrinsics::_dtan:  n = new (C) TanDNode(C, control(), arg);  break;
   default:  fatal_unexpected_iid(id);  break;
   }
   n = _gvn.transform(n);
@@ -1638,7 +1638,7 @@
 // really odd corner cases (+/- Infinity).  Just uncommon-trap them.
 bool LibraryCallKit::inline_exp() {
   Node* arg = round_double_node(argument(0));
-  Node* n   = _gvn.transform(new (C) ExpDNode(0, arg));
+  Node* n   = _gvn.transform(new (C) ExpDNode(C, control(), arg));
 
   finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
 
@@ -1673,7 +1673,7 @@
 
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
     // Short form: skip the fancy tests and just check for NaN result.
-    result = _gvn.transform(new (C) PowDNode(0, x, y));
+    result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
   } else {
     // If this inlining ever returned NaN in the past, include all
     // checks + call to the runtime.
@@ -1700,7 +1700,7 @@
     Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) );
 
     // Set fast path result
-    Node *fast_result = _gvn.transform( new (C) PowDNode(0, x, y) );
+    Node *fast_result = _gvn.transform( new (C) PowDNode(C, control(), x, y) );
     phi->init_req(3, fast_result);
 
     // Complex path
@@ -1760,7 +1760,7 @@
     // abs(x)
     Node *absx=_gvn.transform( new (C) AbsDNode(x));
     // abs(x)^y
-    Node *absxpowy = _gvn.transform( new (C) PowDNode(0, absx, y) );
+    Node *absxpowy = _gvn.transform( new (C) PowDNode(C, control(), absx, y) );
     // -abs(x)^y
     Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
     // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
--- a/src/share/vm/opto/loopnode.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/loopnode.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -88,9 +88,9 @@
   assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
   uint i;
   Node *early;
-  if( n->in(0) ) {
+  if (n->in(0) && !n->is_expensive()) {
     early = n->in(0);
-    if( !early->is_CFG() ) // Might be a non-CFG multi-def
+    if (!early->is_CFG()) // Might be a non-CFG multi-def
       early = get_ctrl(early);        // So treat input as a straight data input
     i = 1;
   } else {
@@ -99,28 +99,28 @@
   }
   uint e_d = dom_depth(early);
   assert( early, "" );
-  for( ; i < n->req(); i++ ) {
+  for (; i < n->req(); i++) {
     Node *cin = get_ctrl(n->in(i));
     assert( cin, "" );
     // Keep deepest dominator depth
     uint c_d = dom_depth(cin);
-    if( c_d > e_d ) {           // Deeper guy?
+    if (c_d > e_d) {           // Deeper guy?
       early = cin;              // Keep deepest found so far
       e_d = c_d;
-    } else if( c_d == e_d &&    // Same depth?
-               early != cin ) { // If not equal, must use slower algorithm
+    } else if (c_d == e_d &&    // Same depth?
+               early != cin) { // If not equal, must use slower algorithm
       // If same depth but not equal, one _must_ dominate the other
       // and we want the deeper (i.e., dominated) guy.
       Node *n1 = early;
       Node *n2 = cin;
-      while( 1 ) {
+      while (1) {
         n1 = idom(n1);          // Walk up until break cycle
         n2 = idom(n2);
-        if( n1 == cin ||        // Walked early up to cin
-            dom_depth(n2) < c_d )
+        if (n1 == cin ||        // Walked early up to cin
+            dom_depth(n2) < c_d)
           break;                // early is deeper; keep him
-        if( n2 == early ||      // Walked cin up to early
-            dom_depth(n1) < c_d ) {
+        if (n2 == early ||      // Walked cin up to early
+            dom_depth(n1) < c_d) {
           early = cin;          // cin is deeper; keep him
           break;
         }
@@ -132,9 +132,108 @@
   // Return earliest legal location
   assert(early == find_non_split_ctrl(early), "unexpected early control");
 
+  if (n->is_expensive()) {
+    assert(n->in(0), "should have control input");
+    early = get_early_ctrl_for_expensive(n, early);
+  }
+
   return early;
 }
 
+//------------------------------get_early_ctrl_for_expensive---------------------------------
+// Move node up the dominator tree as high as legal while still beneficial
+Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
+  assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  Node* ctl = n->in(0);
+  assert(ctl->is_CFG(), "expensive input 0 must be cfg");
+  uint min_dom_depth = dom_depth(earliest);
+#ifdef ASSERT
+  if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
+    dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
+    assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
+  }
+#endif
+  if (dom_depth(ctl) < min_dom_depth) {
+    return earliest;
+  }
+
+  while (1) {
+    Node *next = ctl;
+    // Moving the node out of a loop on the projection of a If
+    // confuses loop predication. So once we hit a Loop in a If branch
+    // that doesn't branch to an UNC, we stop. The code that process
+    // expensive nodes will notice the loop and skip over it to try to
+    // move the node further up.
+    if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
+      if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) {
+        break;
+      }
+      next = idom(ctl->in(1)->in(0));
+    } else if (ctl->is_Proj()) {
+      // We only move it up along a projection if the projection is
+      // the single control projection for its parent: same code path,
+      // if it's a If with UNC or fallthrough of a call.
+      Node* parent_ctl = ctl->in(0);
+      if (parent_ctl == NULL) {
+        break;
+      } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
+        next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
+      } else if (parent_ctl->is_If()) {
+        if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) {
+          break;
+        }
+        assert(idom(ctl) == parent_ctl, "strange");
+        next = idom(parent_ctl);
+      } else if (ctl->is_CatchProj()) {
+        if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
+          break;
+        }
+        assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
+        next = parent_ctl->in(0)->in(0)->in(0);
+      } else {
+        // Check if parent control has a single projection (this
+        // control is the only possible successor of the parent
+        // control). If so, we can try to move the node above the
+        // parent control.
+        int nb_ctl_proj = 0;
+        for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
+          Node *p = parent_ctl->fast_out(i);
+          if (p->is_Proj() && p->is_CFG()) {
+            nb_ctl_proj++;
+            if (nb_ctl_proj > 1) {
+              break;
+            }
+          }
+        }
+
+        if (nb_ctl_proj > 1) {
+          break;
+        }
+        assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call(), "unexpected node");
+        assert(idom(ctl) == parent_ctl, "strange");
+        next = idom(parent_ctl);
+      }
+    } else {
+      next = idom(ctl);
+    }
+    if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
+      break;
+    }
+    ctl = next;
+  }
+
+  if (ctl != n->in(0)) {
+    _igvn.hash_delete(n);
+    n->set_req(0, ctl);
+    _igvn.hash_insert(n);
+  }
+
+  return ctl;
+}
+
+
 //------------------------------set_early_ctrl---------------------------------
 // Set earliest legal control
 void PhaseIdealLoop::set_early_ctrl( Node *n ) {
@@ -1892,6 +1991,98 @@
   }
 }
 
+//------------------------process_expensive_nodes-----------------------------
+// Expensive nodes have their control input set to prevent the GVN
+// from commoning them and as a result forcing the resulting node to
+// be in a more frequent path. Use CFG information here, to change the
+// control inputs so that some expensive nodes can be commoned while
+// not executed more frequently.
+bool PhaseIdealLoop::process_expensive_nodes() {
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Sort nodes to bring similar nodes together
+  C->sort_expensive_nodes();
+
+  bool progress = false;
+
+  for (int i = 0; i < C->expensive_count(); ) {
+    Node* n = C->expensive_node(i);
+    int start = i;
+    // Find nodes similar to n
+    i++;
+    for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
+    int end = i;
+    // And compare them two by two
+    for (int j = start; j < end; j++) {
+      Node* n1 = C->expensive_node(j);
+      if (is_node_unreachable(n1)) {
+        continue;
+      }
+      for (int k = j+1; k < end; k++) {
+        Node* n2 = C->expensive_node(k);
+        if (is_node_unreachable(n2)) {
+          continue;
+        }
+
+        assert(n1 != n2, "should be pair of nodes");
+
+        Node* c1 = n1->in(0);
+        Node* c2 = n2->in(0);
+
+        Node* parent_c1 = c1;
+        Node* parent_c2 = c2;
+
+        // The call to get_early_ctrl_for_expensive() moves the
+        // expensive nodes up but stops at loops that are in a if
+        // branch. See whether we can exit the loop and move above the
+        // If.
+        if (c1->is_Loop()) {
+          parent_c1 = c1->in(1);
+        }
+        if (c2->is_Loop()) {
+          parent_c2 = c2->in(1);
+        }
+
+        if (parent_c1 == parent_c2) {
+          _igvn._worklist.push(n1);
+          _igvn._worklist.push(n2);
+          continue;
+        }
+
+        // Look for identical expensive node up the dominator chain.
+        if (is_dominator(c1, c2)) {
+          c2 = c1;
+        } else if (is_dominator(c2, c1)) {
+          c1 = c2;
+        } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
+                   parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
+          // Both branches have the same expensive node so move it up
+          // before the if.
+          c1 = c2 = idom(parent_c1->in(0));
+        }
+        // Do the actual moves
+        if (n1->in(0) != c1) {
+          _igvn.hash_delete(n1);
+          n1->set_req(0, c1);
+          _igvn.hash_insert(n1);
+          _igvn._worklist.push(n1);
+          progress = true;
+        }
+        if (n2->in(0) != c2) {
+          _igvn.hash_delete(n2);
+          n2->set_req(0, c2);
+          _igvn.hash_insert(n2);
+          _igvn._worklist.push(n2);
+          progress = true;
+        }
+      }
+    }
+  }
+
+  return progress;
+}
+
+
 //=============================================================================
 //----------------------------build_and_optimize-------------------------------
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
@@ -1960,7 +2151,9 @@
   }
 
   // Nothing to do, so get out
-  if( !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only ) {
+  bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only;
+  bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
+  if (stop_early && !do_expensive_nodes) {
     _igvn.optimize();           // Cleanup NeverBranches
     return;
   }
@@ -2058,6 +2251,21 @@
     return;
   }
 
+  if (stop_early) {
+    assert(do_expensive_nodes, "why are we here?");
+    if (process_expensive_nodes()) {
+      // If we made some progress when processing expensive nodes then
+      // the IGVN may modify the graph in a way that will allow us to
+      // make some more progress: we need to try processing expensive
+      // nodes again.
+      C->set_major_progress();
+    }
+
+    _igvn.optimize();
+
+    return;
+  }
+
   // Some parser-inserted loop predicates could never be used by loop
   // predication or they were moved away from loop during some optimizations.
   // For example, peeling. Eliminate them before next loop optimizations.
@@ -2120,6 +2328,10 @@
     NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
   }
 
+  if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
+    C->set_major_progress();
+  }
+
   // Perform loop predication before iteration splitting
   if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) {
     _ltree_root->_child->loop_predication(this);
@@ -3299,7 +3511,7 @@
 #ifdef ASSERT
     if (legal->is_Start() && !early->is_Root()) {
       // Bad graph. Print idom path and fail.
-      dump_bad_graph(n, early, LCA);
+      dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
       assert(false, "Bad graph detected in build_loop_late");
     }
 #endif
@@ -3350,8 +3562,8 @@
 }
 
 #ifdef ASSERT
-void PhaseIdealLoop::dump_bad_graph(Node* n, Node* early, Node* LCA) {
-  tty->print_cr( "Bad graph detected in build_loop_late");
+void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
+  tty->print_cr(msg);
   tty->print("n: "); n->dump();
   tty->print("early(n): "); early->dump();
   if (n->in(0) != NULL  && !n->in(0)->is_top() &&
--- a/src/share/vm/opto/loopnode.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -263,9 +263,18 @@
   bool stride_is_con() const        { Node *tmp = stride  (); return (tmp != NULL && tmp->is_Con()); }
   BoolTest::mask test_trip() const  { return in(TestValue)->as_Bool()->_test._test; }
   CountedLoopNode *loopnode() const {
+    // The CountedLoopNode that goes with this CountedLoopEndNode may
+    // have been optimized out by the IGVN so be cautious with the
+    // pattern matching on the graph
+    if (phi() == NULL) {
+      return NULL;
+    }
     Node *ln = phi()->in(0);
-    assert( ln->Opcode() == Op_CountedLoop, "malformed loop" );
-    return (CountedLoopNode*)ln; }
+    if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
+      return (CountedLoopNode*)ln;
+    }
+    return NULL;
+  }
 
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
@@ -598,6 +607,7 @@
   // check if transform created new nodes that need _ctrl recorded
   Node *get_late_ctrl( Node *n, Node *early );
   Node *get_early_ctrl( Node *n );
+  Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
   void set_early_ctrl( Node *n );
   void set_subtree_ctrl( Node *root );
   void set_ctrl( Node *n, Node *ctrl ) {
@@ -905,6 +915,16 @@
   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
   void eliminate_useless_predicates();
 
+  // Change the control input of expensive nodes to allow commoning by
+  // IGVN when it is guaranteed to not result in a more frequent
+  // execution of the expensive node. Return true if progress.
+  bool process_expensive_nodes();
+
+  // Check whether node has become unreachable
+  bool is_node_unreachable(Node *n) const {
+    return !has_node(n) || n->is_unreachable(_igvn);
+  }
+
   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 
@@ -1043,7 +1063,7 @@
   void register_new_node( Node *n, Node *blk );
 
 #ifdef ASSERT
-void dump_bad_graph(Node* n, Node* early, Node* LCA);
+  void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
 #endif
 
 #ifndef PRODUCT
--- a/src/share/vm/opto/machnode.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/machnode.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -501,7 +501,7 @@
 #ifndef PRODUCT
 void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   int reg = ra_->get_reg_first(in(1)->in(_vidx));
-  tty->print("%s %s", Name(), Matcher::regName[reg]);
+  st->print("%s %s", Name(), Matcher::regName[reg]);
 }
 #endif
 
--- a/src/share/vm/opto/memnode.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/memnode.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -238,7 +238,7 @@
     return this;
   ctl = in(MemNode::Control);
   // Don't bother trying to transform a dead node
-  if( ctl && ctl->is_top() )  return NodeSentinel;
+  if (ctl && ctl->is_top())  return NodeSentinel;
 
   PhaseIterGVN *igvn = phase->is_IterGVN();
   // Wait if control on the worklist.
@@ -262,8 +262,8 @@
   }
   // Ignore if memory is dead, or self-loop
   Node *mem = in(MemNode::Memory);
-  if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL
-  assert( mem != this, "dead loop in MemNode::Ideal" );
+  if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
+  assert(mem != this, "dead loop in MemNode::Ideal");
 
   if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
     // This memory slice may be dead.
@@ -273,12 +273,12 @@
   }
 
   Node *address = in(MemNode::Address);
-  const Type *t_adr = phase->type( address );
-  if( t_adr == Type::TOP )              return NodeSentinel; // caller will return NULL
-
-  if( can_reshape && igvn != NULL &&
+  const Type *t_adr = phase->type(address);
+  if (t_adr == Type::TOP)              return NodeSentinel; // caller will return NULL
+
+  if (can_reshape && igvn != NULL &&
       (igvn->_worklist.member(address) ||
-       igvn->_worklist.size() > 0 && (phase->type(address) != adr_type())) ) {
+       igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
     // The address's base and type may change when the address is processed.
     // Delay this mem node transformation until the address is processed.
     phase->is_IterGVN()->_worklist.push(this);
@@ -288,7 +288,7 @@
   // Do NOT remove or optimize the next lines: ensure a new alias index
   // is allocated for an oop pointer type before Escape Analysis.
   // Note: C++ will not remove it since the call has side effect.
-  if ( t_adr->isa_oopptr() ) {
+  if (t_adr->isa_oopptr()) {
     int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
   }
 
@@ -296,6 +296,26 @@
   Node* base = NULL;
   if (address->is_AddP())
     base = address->in(AddPNode::Base);
+  if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
+      !t_adr->isa_rawptr()) {
+    // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
+    Compile* C = phase->C;
+    tty->cr();
+    tty->print_cr("===== NULL+offs not RAW address =====");
+    if (C->is_dead_node(this->_idx))    tty->print_cr("'this' is dead");
+    if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
+    if (C->is_dead_node(mem->_idx))     tty->print_cr("'mem' is dead");
+    if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
+    if (C->is_dead_node(base->_idx))    tty->print_cr("'base' is dead");
+    tty->cr();
+    base->dump(1);
+    tty->cr();
+    this->dump(2);
+    tty->print("this->adr_type():     "); adr_type()->dump(); tty->cr();
+    tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
+    tty->print("phase->type(base):    "); phase->type(address)->dump(); tty->cr();
+    tty->cr();
+  }
   assert(base == NULL || t_adr->isa_rawptr() ||
         !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
 #endif
@@ -320,6 +340,9 @@
 
   if (mem != old_mem) {
     set_req(MemNode::Memory, mem);
+    if (can_reshape && old_mem->outcnt() == 0) {
+        igvn->_worklist.push(old_mem);
+    }
     if (phase->type( mem ) == Type::TOP) return NodeSentinel;
     return this;
   }
@@ -2310,9 +2333,9 @@
   if (ReduceFieldZeroing && /*can_reshape &&*/
       mem->is_Proj() && mem->in(0)->is_Initialize()) {
     InitializeNode* init = mem->in(0)->as_Initialize();
-    intptr_t offset = init->can_capture_store(this, phase);
+    intptr_t offset = init->can_capture_store(this, phase, can_reshape);
     if (offset > 0) {
-      Node* moved = init->capture_store(this, offset, phase);
+      Node* moved = init->capture_store(this, offset, phase, can_reshape);
       // If the InitializeNode captured me, it made a raw copy of me,
       // and I need to disappear.
       if (moved != NULL) {
@@ -3105,7 +3128,7 @@
 // an initialization.  Returns zero if a check fails.
 // On success, returns the (constant) offset to which the store applies,
 // within the initialized memory.
-intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase) {
+intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
   const int FAIL = 0;
   if (st->req() != MemNode::ValueIn + 1)
     return FAIL;                // an inscrutable StoreNode (card mark?)
@@ -3127,6 +3150,91 @@
   if (!detect_init_independence(val, true, complexity_count))
     return FAIL;                // stored value must be 'simple enough'
 
+  // The Store can be captured only if nothing after the allocation
+  // and before the Store is using the memory location that the store
+  // overwrites.
+  bool failed = false;
+  // If is_complete_with_arraycopy() is true the shape of the graph is
+  // well defined and is safe so no need for extra checks.
+  if (!is_complete_with_arraycopy()) {
+    // We are going to look at each use of the memory state following
+    // the allocation to make sure nothing reads the memory that the
+    // Store writes.
+    const TypePtr* t_adr = phase->type(adr)->isa_ptr();
+    int alias_idx = phase->C->get_alias_index(t_adr);
+    ResourceMark rm;
+    Unique_Node_List mems;
+    mems.push(mem);
+    Node* unique_merge = NULL;
+    for (uint next = 0; next < mems.size(); ++next) {
+      Node *m  = mems.at(next);
+      for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
+        Node *n = m->fast_out(j);
+        if (n->outcnt() == 0) {
+          continue;
+        }
+        if (n == st) {
+          continue;
+        } else if (n->in(0) != NULL && n->in(0) != ctl) {
+          // If the control of this use is different from the control
+          // of the Store which is right after the InitializeNode then
+          // this node cannot be between the InitializeNode and the
+          // Store.
+          continue;
+        } else if (n->is_MergeMem()) {
+          if (n->as_MergeMem()->memory_at(alias_idx) == m) {
+            // We can hit a MergeMemNode (that will likely go away
+            // later) that is a direct use of the memory state
+            // following the InitializeNode on the same slice as the
+            // store node that we'd like to capture. We need to check
+            // the uses of the MergeMemNode.
+            mems.push(n);
+          }
+        } else if (n->is_Mem()) {
+          Node* other_adr = n->in(MemNode::Address);
+          if (other_adr == adr) {
+            failed = true;
+            break;
+          } else {
+            const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
+            if (other_t_adr != NULL) {
+              int other_alias_idx = phase->C->get_alias_index(other_t_adr);
+              if (other_alias_idx == alias_idx) {
+                // A load from the same memory slice as the store right
+                // after the InitializeNode. We check the control of the
+                // object/array that is loaded from. If it's the same as
+                // the store control then we cannot capture the store.
+                assert(!n->is_Store(), "2 stores to same slice on same control?");
+                Node* base = other_adr;
+                assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name()));
+                base = base->in(AddPNode::Base);
+                if (base != NULL) {
+                  base = base->uncast();
+                  if (base->is_Proj() && base->in(0) == alloc) {
+                    failed = true;
+                    break;
+                  }
+                }
+              }
+            }
+          }
+        } else {
+          failed = true;
+          break;
+        }
+      }
+    }
+  }
+  if (failed) {
+    if (!can_reshape) {
+      // We decided we couldn't capture the store during parsing. We
+      // should try again during the next IGVN once the graph is
+      // cleaner.
+      phase->C->record_for_igvn(st);
+    }
+    return FAIL;
+  }
+
   return offset;                // success
 }
 
@@ -3237,11 +3345,11 @@
 //                      rawstore1 rawstore2)
 //
 Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
-                                    PhaseTransform* phase) {
+                                    PhaseTransform* phase, bool can_reshape) {
   assert(stores_are_sane(phase), "");
 
   if (start < 0)  return NULL;
-  assert(can_capture_store(st, phase) == start, "sanity");
+  assert(can_capture_store(st, phase, can_reshape) == start, "sanity");
 
   Compile* C = phase->C;
   int size_in_bytes = st->memory_size();
--- a/src/share/vm/opto/memnode.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/memnode.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1047,11 +1047,11 @@
 
   // See if this store can be captured; return offset where it initializes.
   // Return 0 if the store cannot be moved (any sort of problem).
-  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
+  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
 
   // Capture another store; reformat it to write my internal raw memory.
   // Return the captured copy, else NULL if there is some sort of problem.
-  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
+  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
 
   // Find captured store which corresponds to the range [start..start+size).
   // Return my own memory projection (meaning the initial zero bits)
--- a/src/share/vm/opto/node.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/node.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -493,6 +493,8 @@
   }
   if (is_macro())
     compile->add_macro_node(n);
+  if (is_expensive())
+    compile->add_expensive_node(n);
 
   n->set_idx(compile->next_unique()); // Get new unique index as well
   debug_only( n->verify_construction() );
@@ -616,6 +618,9 @@
   if (is_macro()) {
     compile->remove_macro_node(this);
   }
+  if (is_expensive()) {
+    compile->remove_expensive_node(this);
+  }
 #ifdef ASSERT
   // We will not actually delete the storage, but we'll make the node unusable.
   *(address*)this = badAddress;  // smash the C++ vtbl, probably
@@ -689,6 +694,13 @@
 }
 #endif
 
+
+//------------------------------is_unreachable---------------------------------
+bool Node::is_unreachable(PhaseIterGVN &igvn) const {
+  assert(!is_Mach(), "doesn't work with MachNodes");
+  return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
+}
+
 //------------------------------add_req----------------------------------------
 // Add a new required input at the end
 void Node::add_req( Node *n ) {
@@ -1246,6 +1258,10 @@
       if (dead->is_macro()) {
         igvn->C->remove_macro_node(dead);
       }
+      if (dead->is_expensive()) {
+        igvn->C->remove_expensive_node(dead);
+      }
+      igvn->C->record_dead_node(dead->_idx);
       // Kill all inputs to the dead guy
       for (uint i=0; i < dead->req(); i++) {
         Node *n = dead->in(i);      // Get input to dead guy
@@ -1476,35 +1492,35 @@
 }
 
 #ifdef ASSERT
-static void dump_orig(Node* orig) {
+static void dump_orig(Node* orig, outputStream *st) {
   Compile* C = Compile::current();
-  if (NotANode(orig))  orig = NULL;
-  if (orig != NULL && !C->node_arena()->contains(orig))  orig = NULL;
-  if (orig == NULL)  return;
-  tty->print(" !orig=");
+  if (NotANode(orig)) orig = NULL;
+  if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
+  if (orig == NULL) return;
+  st->print(" !orig=");
   Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
-  if (NotANode(fast))  fast = NULL;
+  if (NotANode(fast)) fast = NULL;
   while (orig != NULL) {
     bool discon = is_disconnected(orig);  // if discon, print [123] else 123
-    if (discon)  tty->print("[");
+    if (discon) st->print("[");
     if (!Compile::current()->node_arena()->contains(orig))
-      tty->print("o");
-    tty->print("%d", orig->_idx);
-    if (discon)  tty->print("]");
+      st->print("o");
+    st->print("%d", orig->_idx);
+    if (discon) st->print("]");
     orig = orig->debug_orig();
-    if (NotANode(orig))  orig = NULL;
-    if (orig != NULL && !C->node_arena()->contains(orig))  orig = NULL;
-    if (orig != NULL)  tty->print(",");
+    if (NotANode(orig)) orig = NULL;
+    if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
+    if (orig != NULL) st->print(",");
     if (fast != NULL) {
       // Step fast twice for each single step of orig:
       fast = fast->debug_orig();
-      if (NotANode(fast))  fast = NULL;
+      if (NotANode(fast)) fast = NULL;
       if (fast != NULL && fast != orig) {
         fast = fast->debug_orig();
-        if (NotANode(fast))  fast = NULL;
+        if (NotANode(fast)) fast = NULL;
       }
       if (fast == orig) {
-        tty->print("...");
+        st->print("...");
         break;
       }
     }
@@ -1531,35 +1547,34 @@
 
 //------------------------------dump------------------------------------------
 // Dump a Node
-void Node::dump() const {
+void Node::dump(const char* suffix, outputStream *st) const {
   Compile* C = Compile::current();
   bool is_new = C->node_arena()->contains(this);
   _in_dump_cnt++;
-  tty->print("%c%d\t%s\t=== ",
-             is_new ? ' ' : 'o', _idx, Name());
+  st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name());
 
   // Dump the required and precedence inputs
-  dump_req();
-  dump_prec();
+  dump_req(st);
+  dump_prec(st);
   // Dump the outputs
-  dump_out();
+  dump_out(st);
 
   if (is_disconnected(this)) {
 #ifdef ASSERT
-    tty->print("  [%d]",debug_idx());
-    dump_orig(debug_orig());
+    st->print("  [%d]",debug_idx());
+    dump_orig(debug_orig(), st);
 #endif
-    tty->cr();
+    st->cr();
     _in_dump_cnt--;
     return;                     // don't process dead nodes
   }
 
   // Dump node-specific info
-  dump_spec(tty);
+  dump_spec(st);
 #ifdef ASSERT
   // Dump the non-reset _debug_idx
-  if( Verbose && WizardMode ) {
-    tty->print("  [%d]",debug_idx());
+  if (Verbose && WizardMode) {
+    st->print("  [%d]",debug_idx());
   }
 #endif
 
@@ -1569,88 +1584,88 @@
     const TypeInstPtr  *toop = t->isa_instptr();
     const TypeKlassPtr *tkls = t->isa_klassptr();
     ciKlass*           klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
-    if( klass && klass->is_loaded() && klass->is_interface() ) {
-      tty->print("  Interface:");
-    } else if( toop ) {
-      tty->print("  Oop:");
-    } else if( tkls ) {
-      tty->print("  Klass:");
+    if (klass && klass->is_loaded() && klass->is_interface()) {
+      st->print("  Interface:");
+    } else if (toop) {
+      st->print("  Oop:");
+    } else if (tkls) {
+      st->print("  Klass:");
     }
-    t->dump();
-  } else if( t == Type::MEMORY ) {
-    tty->print("  Memory:");
-    MemNode::dump_adr_type(this, adr_type(), tty);
-  } else if( Verbose || WizardMode ) {
-    tty->print("  Type:");
-    if( t ) {
-      t->dump();
+    t->dump_on(st);
+  } else if (t == Type::MEMORY) {
+    st->print("  Memory:");
+    MemNode::dump_adr_type(this, adr_type(), st);
+  } else if (Verbose || WizardMode) {
+    st->print("  Type:");
+    if (t) {
+      t->dump_on(st);
     } else {
-      tty->print("no type");
+      st->print("no type");
     }
   } else if (t->isa_vect() && this->is_MachSpillCopy()) {
     // Dump MachSpillcopy vector type.
-    t->dump();
+    t->dump_on(st);
   }
   if (is_new) {
-    debug_only(dump_orig(debug_orig()));
+    debug_only(dump_orig(debug_orig(), st));
     Node_Notes* nn = C->node_notes_at(_idx);
     if (nn != NULL && !nn->is_clear()) {
       if (nn->jvms() != NULL) {
-        tty->print(" !jvms:");
-        nn->jvms()->dump_spec(tty);
+        st->print(" !jvms:");
+        nn->jvms()->dump_spec(st);
       }
     }
   }
-  tty->cr();
+  if (suffix) st->print(suffix);
   _in_dump_cnt--;
 }
 
 //------------------------------dump_req--------------------------------------
-void Node::dump_req() const {
+void Node::dump_req(outputStream *st) const {
   // Dump the required input edges
   for (uint i = 0; i < req(); i++) {    // For all required inputs
     Node* d = in(i);
     if (d == NULL) {
-      tty->print("_ ");
+      st->print("_ ");
     } else if (NotANode(d)) {
-      tty->print("NotANode ");  // uninitialized, sentinel, garbage, etc.
+      st->print("NotANode ");  // uninitialized, sentinel, garbage, etc.
     } else {
-      tty->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
+      st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
     }
   }
 }
 
 
 //------------------------------dump_prec-------------------------------------
-void Node::dump_prec() const {
+void Node::dump_prec(outputStream *st) const {
   // Dump the precedence edges
   int any_prec = 0;
   for (uint i = req(); i < len(); i++) {       // For all precedence inputs
     Node* p = in(i);
     if (p != NULL) {
-      if( !any_prec++ ) tty->print(" |");
-      if (NotANode(p)) { tty->print("NotANode "); continue; }
-      tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
+      if (!any_prec++) st->print(" |");
+      if (NotANode(p)) { st->print("NotANode "); continue; }
+      st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
     }
   }
 }
 
 //------------------------------dump_out--------------------------------------
-void Node::dump_out() const {
+void Node::dump_out(outputStream *st) const {
   // Delimit the output edges
-  tty->print(" [[");
+  st->print(" [[");
   // Dump the output edges
   for (uint i = 0; i < _outcnt; i++) {    // For all outputs
     Node* u = _out[i];
     if (u == NULL) {
-      tty->print("_ ");
+      st->print("_ ");
     } else if (NotANode(u)) {
-      tty->print("NotANode ");
+      st->print("NotANode ");
     } else {
-      tty->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
+      st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
     }
   }
-  tty->print("]] ");
+  st->print("]] ");
 }
 
 //------------------------------dump_nodes-------------------------------------
--- a/src/share/vm/opto/node.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/node.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -374,6 +374,8 @@
   bool is_dead() const;
 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
 #endif
+  // Check whether node has become unreachable
+  bool is_unreachable(PhaseIterGVN &igvn) const;
 
   // Set a required input edge, also updates corresponding output edge
   void add_req( Node *n ); // Append a NEW required input
@@ -638,7 +640,8 @@
     Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
     Flag_avoid_back_to_back  = Flag_may_be_short_branch << 1,
     Flag_has_call            = Flag_avoid_back_to_back << 1,
-    _max_flags = (Flag_has_call << 1) - 1 // allow flags combination
+    Flag_is_expensive        = Flag_has_call << 1,
+    _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
   };
 
 private:
@@ -807,6 +810,8 @@
 
   // The node is a "macro" node which needs to be expanded before matching
   bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
+  // The node is expensive: the best control is set during loop opts
+  bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
 
 //----------------- Optimization
 
@@ -982,12 +987,13 @@
 #ifndef PRODUCT
   Node* find(int idx) const;         // Search the graph for the given idx.
   Node* find_ctrl(int idx) const;    // Search control ancestors for the given idx.
-  void dump() const;                 // Print this node,
+  void dump() const { dump("\n"); }  // Print this node.
+  void dump(const char* suffix, outputStream *st = tty) const;// Print this node.
   void dump(int depth) const;        // Print this node, recursively to depth d
   void dump_ctrl(int depth) const;   // Print control nodes, to depth d
-  virtual void dump_req() const;     // Print required-edge info
-  virtual void dump_prec() const;    // Print precedence-edge info
-  virtual void dump_out() const;     // Print the output edge info
+  virtual void dump_req(outputStream *st = tty) const;     // Print required-edge info
+  virtual void dump_prec(outputStream *st = tty) const;    // Print precedence-edge info
+  virtual void dump_out(outputStream *st = tty) const;     // Print the output edge info
   virtual void dump_spec(outputStream *st) const {}; // Print per-node info
   void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
   void verify() const;               // Check Def-Use info for my subgraph
--- a/src/share/vm/opto/optoreg.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/optoreg.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -77,7 +77,7 @@
   // (We would like to have an operator+ for RegName, but it is not
   // a class, so this would be illegal in C++.)
 
-  static void dump( int );
+  static void dump(int, outputStream *st = tty);
 
   // Get the stack slot number of an OptoReg::Name
   static unsigned int reg2stack( OptoReg::Name r) {
--- a/src/share/vm/opto/parse.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/parse.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -58,7 +58,7 @@
   GrowableArray<InlineTree*> _subtrees;
 
   void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
-
+  const char* _msg;
 protected:
   InlineTree(Compile* C,
              const InlineTree* caller_tree,
@@ -70,16 +70,29 @@
   InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
                                            JVMState* caller_jvms,
                                            int caller_bci);
-  const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay);
-  const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
-  const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
-  void        print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const;
+  bool        try_to_inline(ciMethod* callee_method,
+                            ciMethod* caller_method,
+                            int caller_bci,
+                            ciCallProfile& profile,
+                            WarmCallInfo* wci_result,
+                            bool& should_delay);
+  bool        should_inline(ciMethod* callee_method,
+                            ciMethod* caller_method,
+                            int caller_bci,
+                            ciCallProfile& profile,
+                            WarmCallInfo* wci_result);
+  bool        should_not_inline(ciMethod* callee_method,
+                                ciMethod* caller_method,
+                                WarmCallInfo* wci_result);
+  void        print_inlining(ciMethod* callee_method, int caller_bci,
+                             bool success) const;
 
-  InlineTree *caller_tree()       const { return _caller_tree;  }
+  InlineTree* caller_tree()       const { return _caller_tree;  }
   InlineTree* callee_at(int bci, ciMethod* m) const;
   int         inline_level()      const { return stack_depth(); }
   int         stack_depth()       const { return _caller_jvms ? _caller_jvms->depth() : 0; }
-
+  const char* msg()               const { return _msg; }
+  void        set_msg(const char* msg)  { _msg = msg; }
 public:
   static const char* check_can_parse(ciMethod* callee);
 
--- a/src/share/vm/opto/parse2.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/parse2.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -104,7 +104,8 @@
     if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
   }
 
-  if (!arytype->klass()->is_loaded()) {
+  ciKlass * arytype_klass = arytype->klass();
+  if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
     // Only fails for some -Xcomp runs
     // The class is unloaded.  We have to run this bytecode in the interpreter.
     uncommon_trap(Deoptimization::Reason_unloaded,
@@ -1385,6 +1386,7 @@
   if (TraceOptoParse) {
     tty->print(" @");
     dump_bci(bci());
+    tty->cr();
   }
 #endif
 
--- a/src/share/vm/opto/parse3.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/parse3.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -487,7 +487,8 @@
                           fun, NULL, TypeRawPtr::BOTTOM,
                           makecon(TypeKlassPtr::make(array_klass)),
                           length[0], length[1], length[2],
-                          length[3], length[4]);
+                          (ndimensions > 2) ? length[3] : NULL,
+                          (ndimensions > 3) ? length[4] : NULL);
   } else {
     // Create a java array for dimension sizes
     Node* dims = NULL;
--- a/src/share/vm/opto/phaseX.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/phaseX.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1197,12 +1197,27 @@
                 assert(!(i < imax), "sanity");
               }
             }
+            if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
+                in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
+              // A Load that directly follows an InitializeNode is
+              // going away. The Stores that follow are candidates
+              // again to be captured by the InitializeNode.
+              for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
+                Node *n = in->fast_out(j);
+                if (n->is_Store()) {
+                  _worklist.push(n);
+                }
+              }
+            }
           }
         }
         C->record_dead_node(dead->_idx);
         if (dead->is_macro()) {
           C->remove_macro_node(dead);
         }
+        if (dead->is_expensive()) {
+          C->remove_expensive_node(dead);
+        }
 
         if (recurse) {
           continue;
--- a/src/share/vm/opto/regalloc.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/regalloc.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -40,6 +40,7 @@
                Phase(Register_Allocation), _cfg(cfg), _matcher(matcher),
                _node_oops(Thread::current()->resource_area()),
                _node_regs(0),
+               _node_regs_max_index(0),
                _framesize(0xdeadbeef)
 {
     int i;
--- a/src/share/vm/opto/regmask.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/regmask.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,13 +108,13 @@
 //------------------------------dump-------------------------------------------
 
 #ifndef PRODUCT
-void OptoReg::dump( int r ) {
-  switch( r ) {
-  case Special: tty->print("r---");   break;
-  case Bad:     tty->print("rBAD");   break;
+void OptoReg::dump(int r, outputStream *st) {
+  switch (r) {
+  case Special: st->print("r---"); break;
+  case Bad:     st->print("rBAD"); break;
   default:
-    if( r < _last_Mach_Reg ) tty->print(Matcher::regName[r]);
-    else tty->print("rS%d",r);
+    if (r < _last_Mach_Reg) st->print(Matcher::regName[r]);
+    else st->print("rS%d",r);
     break;
   }
 }
@@ -241,7 +241,8 @@
       } else {                  // Else its a split-pair case
         if( bit != _A[i] ) return false; // Found many bits, so fail
         i++;                    // Skip iteration forward
-        if( _A[i] != 1 ) return false; // Require 1 lo bit in next word
+        if( i >= RM_SIZE || _A[i] != 1 )
+          return false; // Require 1 lo bit in next word
       }
     }
   }
@@ -254,7 +255,7 @@
 // Find the lowest-numbered register set in the mask.  Return the
 // HIGHEST register number in the set, or BAD if no sets.
 // Works also for size 1.
-OptoReg::Name RegMask::find_first_set(int size) const {
+OptoReg::Name RegMask::find_first_set(const int size) const {
   verify_sets(size);
   for (int i = 0; i < RM_SIZE; i++) {
     if (_A[i]) {                // Found some bits
@@ -268,7 +269,7 @@
 
 //------------------------------clear_to_sets----------------------------------
 // Clear out partial bits; leave only aligned adjacent bit pairs
-void RegMask::clear_to_sets(int size) {
+void RegMask::clear_to_sets(const int size) {
   if (size == 1) return;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -293,7 +294,7 @@
 
 //------------------------------smear_to_sets----------------------------------
 // Smear out partial bits to aligned adjacent bit sets
-void RegMask::smear_to_sets(int size) {
+void RegMask::smear_to_sets(const int size) {
   if (size == 1) return;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -318,7 +319,7 @@
 }
 
 //------------------------------is_aligned_set--------------------------------
-bool RegMask::is_aligned_sets(int size) const {
+bool RegMask::is_aligned_sets(const int size) const {
   if (size == 1) return true;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -344,7 +345,7 @@
 //------------------------------is_bound_set-----------------------------------
 // Return TRUE if the mask contains one adjacent set of bits and no other bits.
 // Works also for size 1.
-int RegMask::is_bound_set(int size) const {
+int RegMask::is_bound_set(const int size) const {
   if( is_AllStack() ) return false;
   assert(1 <= size && size <= 8, "update low bits table");
   int bit = -1;                 // Set to hold the one bit allowed
@@ -352,7 +353,7 @@
     if (_A[i] ) {               // Found some bits
       if (bit != -1)
        return false;            // Already had bits, so fail
-      bit = _A[i] & -_A[i];     // Extract 1 bit from mask
+      bit = _A[i] & -_A[i];     // Extract low bit from mask
       int hi_bit = bit << (size-1); // high bit
       if (hi_bit != 0) {        // Bit set stays in same word?
         int set = hi_bit + ((hi_bit-1) & ~(bit-1));
@@ -362,12 +363,12 @@
         if (((-1) & ~(bit-1)) != _A[i])
           return false;         // Found many bits, so fail
         i++;                    // Skip iteration forward and check high part
-        assert(size <= 8, "update next code");
         // The lower 24 bits should be 0 since it is split case and size <= 8.
         int set = bit>>24;
         set = set & -set; // Remove sign extension.
         set = (((set << size) - 1) >> 8);
-        if (_A[i] != set) return false; // Require 1 lo bit in next word
+        if (i >= RM_SIZE || _A[i] != set)
+          return false; // Require expected low bits in next word
       }
     }
   }
@@ -404,53 +405,53 @@
 
 #ifndef PRODUCT
 //------------------------------print------------------------------------------
-void RegMask::dump( ) const {
-  tty->print("[");
+void RegMask::dump(outputStream *st) const {
+  st->print("[");
   RegMask rm = *this;           // Structure copy into local temp
 
   OptoReg::Name start = rm.find_first_elem(); // Get a register
-  if( OptoReg::is_valid(start) ) { // Check for empty mask
+  if (OptoReg::is_valid(start)) { // Check for empty mask
     rm.Remove(start);           // Yank from mask
-    OptoReg::dump(start);       // Print register
+    OptoReg::dump(start, st);   // Print register
     OptoReg::Name last = start;
 
     // Now I have printed an initial register.
     // Print adjacent registers as "rX-rZ" instead of "rX,rY,rZ".
     // Begin looping over the remaining registers.
-    while( 1 ) {                //
+    while (1) {                 //
       OptoReg::Name reg = rm.find_first_elem(); // Get a register
-      if( !OptoReg::is_valid(reg) )
+      if (!OptoReg::is_valid(reg))
         break;                  // Empty mask, end loop
       rm.Remove(reg);           // Yank from mask
 
-      if( last+1 == reg ) {     // See if they are adjacent
+      if (last+1 == reg) {      // See if they are adjacent
         // Adjacent registers just collect into long runs, no printing.
         last = reg;
       } else {                  // Ending some kind of run
-        if( start == last ) {   // 1-register run; no special printing
-        } else if( start+1 == last ) {
-          tty->print(",");      // 2-register run; print as "rX,rY"
-          OptoReg::dump(last);
+        if (start == last) {    // 1-register run; no special printing
+        } else if (start+1 == last) {
+          st->print(",");       // 2-register run; print as "rX,rY"
+          OptoReg::dump(last, st);
         } else {                // Multi-register run; print as "rX-rZ"
-          tty->print("-");
-          OptoReg::dump(last);
+          st->print("-");
+          OptoReg::dump(last, st);
         }
-        tty->print(",");        // Seperate start of new run
+        st->print(",");         // Seperate start of new run
         start = last = reg;     // Start a new register run
-        OptoReg::dump(start); // Print register
+        OptoReg::dump(start, st); // Print register
       } // End of if ending a register run or not
     } // End of while regmask not empty
 
-    if( start == last ) {       // 1-register run; no special printing
-    } else if( start+1 == last ) {
-      tty->print(",");          // 2-register run; print as "rX,rY"
-      OptoReg::dump(last);
+    if (start == last) {        // 1-register run; no special printing
+    } else if (start+1 == last) {
+      st->print(",");           // 2-register run; print as "rX,rY"
+      OptoReg::dump(last, st);
     } else {                    // Multi-register run; print as "rX-rZ"
-      tty->print("-");
-      OptoReg::dump(last);
+      st->print("-");
+      OptoReg::dump(last, st);
     }
-    if( rm.is_AllStack() ) tty->print("...");
+    if (rm.is_AllStack()) st->print("...");
   }
-  tty->print("]");
+  st->print("]");
 }
 #endif
--- a/src/share/vm/opto/regmask.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/regmask.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,22 +225,22 @@
   // Find the lowest-numbered register set in the mask.  Return the
   // HIGHEST register number in the set, or BAD if no sets.
   // Assert that the mask contains only bit sets.
-  OptoReg::Name find_first_set(int size) const;
+  OptoReg::Name find_first_set(const int size) const;
 
   // Clear out partial bits; leave only aligned adjacent bit sets of size.
-  void clear_to_sets(int size);
+  void clear_to_sets(const int size);
   // Smear out partial bits to aligned adjacent bit sets.
-  void smear_to_sets(int size);
+  void smear_to_sets(const int size);
   // Verify that the mask contains only aligned adjacent bit sets
   void verify_sets(int size) const { assert(is_aligned_sets(size), "mask is not aligned, adjacent sets"); }
   // Test that the mask contains only aligned adjacent bit sets
-  bool is_aligned_sets(int size) const;
+  bool is_aligned_sets(const int size) const;
 
   // mask is a set of misaligned registers
   bool is_misaligned_set(int size) const { return (int)Size()==size && !is_aligned_sets(size);}
 
   // Test for a single adjacent set
-  int is_bound_set(int size) const;
+  int is_bound_set(const int size) const;
 
   static bool is_vector(uint ireg);
   static int num_registers(uint ireg);
@@ -310,7 +310,7 @@
 
 #ifndef PRODUCT
   void print() const { dump(); }
-  void dump() const;            // Print a mask
+  void dump(outputStream *st = tty) const; // Print a mask
 #endif
 
   static const RegMask Empty;   // Common empty mask
--- a/src/share/vm/opto/subnode.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/opto/subnode.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -399,7 +399,10 @@
 // Cosinus of a double
 class CosDNode : public Node {
 public:
-  CosDNode( Node *in1  ) : Node(0, in1) {}
+  CosDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -410,7 +413,10 @@
 // Sinus of a double
 class SinDNode : public Node {
 public:
-  SinDNode( Node *in1  ) : Node(0, in1) {}
+  SinDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -422,7 +428,10 @@
 // tangens of a double
 class TanDNode : public Node {
 public:
-  TanDNode(Node *in1  ) : Node(0, in1) {}
+  TanDNode(Compile* C, Node *c,Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -445,7 +454,10 @@
 // square root a double
 class SqrtDNode : public Node {
 public:
-  SqrtDNode(Node *c, Node *in1  ) : Node(c, in1) {}
+  SqrtDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -456,7 +468,10 @@
 //  Exponentiate a double
 class ExpDNode : public Node {
 public:
-  ExpDNode( Node *c, Node *in1 ) : Node(c, in1) {}
+  ExpDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -467,7 +482,10 @@
 // Log_e of a double
 class LogDNode : public Node {
 public:
-  LogDNode( Node *in1 ) : Node(0, in1) {}
+  LogDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -478,7 +496,10 @@
 // Log_10 of a double
 class Log10DNode : public Node {
 public:
-  Log10DNode( Node *in1 ) : Node(0, in1) {}
+  Log10DNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -489,7 +510,10 @@
 // Raise a double to a double power
 class PowDNode : public Node {
 public:
-  PowDNode(Node *c, Node *in1, Node *in2  ) : Node(c, in1, in2) {}
+  PowDNode(Compile* C, Node *c, Node *in1, Node *in2 ) : Node(c, in1, in2) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
--- a/src/share/vm/runtime/deoptimization.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/deoptimization.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -425,6 +425,7 @@
                                                                                                     callee_parameters,
                                                                                                     callee_locals,
                                                                                                     index == 0,
+                                                                                                    index == array->frames() - 1,
                                                                                                     popframe_extra_args);
     // This pc doesn't have to be perfect just good enough to identify the frame
     // as interpreted so the skeleton frame will be walkable
@@ -1559,7 +1560,7 @@
         if (trap_method() == nm->method()) {
           make_not_compilable = true;
         } else {
-          trap_method->set_not_compilable(CompLevel_full_optimization);
+          trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff");
           // But give grace to the enclosing nm->method().
         }
       }
--- a/src/share/vm/runtime/globals.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/globals.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,10 @@
 }
 
 bool Flag::is_unlocked() const {
-  if (strcmp(kind, "{diagnostic}") == 0) {
+  if (strcmp(kind, "{diagnostic}") == 0 ||
+      strcmp(kind, "{C2 diagnostic}") == 0 ||
+      strcmp(kind, "{ARCH diagnostic}") == 0 ||
+      strcmp(kind, "{Shark diagnostic}") == 0) {
     if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) {
       // transitional logic to allow tests to run until they are changed
       static int warned;
@@ -77,7 +80,9 @@
     }
     return UnlockDiagnosticVMOptions;
   } else if (strcmp(kind, "{experimental}") == 0 ||
-             strcmp(kind, "{C2 experimental}") == 0) {
+             strcmp(kind, "{C2 experimental}") == 0 ||
+             strcmp(kind, "{ARCH experimental}") == 0 ||
+             strcmp(kind, "{Shark experimental}") == 0) {
     return UnlockExperimentalVMOptions;
   } else {
     return is_unlocked_ext();
--- a/src/share/vm/runtime/globals.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -869,6 +869,11 @@
   diagnostic(bool, PrintNMTStatistics, false,                               \
           "Print native memory tracking summary data if it is on")          \
                                                                             \
+  diagnostic(bool, AutoShutdownNMT, true,                                   \
+          "Automatically shutdown native memory tracking under stress "     \
+          "situation. When set to false, native memory tracking tries to "  \
+          "stay alive at the expense of JVM performance")                   \
+                                                                            \
   diagnostic(bool, LogCompilation, false,                                   \
           "Log compilation activity in detail to hotspot.log or LogFile")   \
                                                                             \
--- a/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -99,7 +99,7 @@
     CodeBuffer* cbuf = _masm->code();
     CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
     if (blob != NULL) {
-      blob->set_comments(cbuf->comments());
+      blob->set_strings(cbuf->strings());
     }
     bool saw_first = false;
     StubCodeDesc* toprint[1000];
--- a/src/share/vm/runtime/vframeArray.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -160,6 +160,7 @@
                                          int callee_locals,
                                          frame* caller,
                                          bool is_top_frame,
+                                         bool is_bottom_frame,
                                          int exec_mode) {
   JavaThread* thread = (JavaThread*) Thread::current();
 
@@ -277,7 +278,8 @@
                                  callee_locals,
                                  caller,
                                  iframe(),
-                                 is_top_frame);
+                                 is_top_frame,
+                                 is_bottom_frame);
 
   // Update the pc in the frame object and overwrite the temporary pc
   // we placed in the skeletal frame now that we finally know the
@@ -422,6 +424,7 @@
                                       int callee_parameters,
                                       int callee_locals,
                                       bool is_top_frame,
+                                      bool is_bottom_frame,
                                       int popframe_extra_stack_expression_els) const {
   assert(method()->max_locals() == locals()->size(), "just checking");
   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
@@ -433,7 +436,8 @@
                                       caller_actual_parameters,
                                       callee_parameters,
                                       callee_locals,
-                                      is_top_frame);
+                                      is_top_frame,
+                                      is_bottom_frame);
 }
 
 
@@ -524,7 +528,7 @@
 
   // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
   // Unpack the frames from the oldest (frames() -1) to the youngest (0)
-  frame caller_frame = me;
+  frame* caller_frame = &me;
   for (index = frames() - 1; index >= 0 ; index--) {
     vframeArrayElement* elem = element(index);  // caller
     int callee_parameters, callee_locals;
@@ -544,13 +548,14 @@
     elem->unpack_on_stack(caller_actual_parameters,
                           callee_parameters,
                           callee_locals,
-                          &caller_frame,
+                          caller_frame,
                           index == 0,
+                          index == frames() - 1,
                           exec_mode);
     if (index == frames() - 1) {
       Deoptimization::unwind_callee_save_values(elem->iframe(), this);
     }
-    caller_frame = *elem->iframe();
+    caller_frame = elem->iframe();
     caller_actual_parameters = callee_parameters;
   }
   deallocate_monitor_chunks();
--- a/src/share/vm/runtime/vframeArray.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/runtime/vframeArray.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -88,6 +88,7 @@
   int on_stack_size(int caller_actual_parameters,
                     int callee_parameters,
                     int callee_locals,
+                    bool is_bottom_frame,
                     bool is_top_frame,
                     int popframe_extra_stack_expression_els) const;
 
@@ -97,6 +98,7 @@
                        int callee_locals,
                        frame* caller,
                        bool is_top_frame,
+                       bool is_bottom_frame,
                        int exec_mode);
 
 #ifndef PRODUCT
--- a/src/share/vm/services/memTracker.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/services/memTracker.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -68,6 +68,7 @@
 volatile jint                   MemTracker::_pooled_recorder_count = 0;
 volatile unsigned long          MemTracker::_processing_generation = 0;
 volatile bool                   MemTracker::_worker_thread_idle = false;
+volatile bool                   MemTracker::_slowdown_calling_thread = false;
 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
 
@@ -364,6 +365,12 @@
     }
 
     if (thread != NULL) {
+      // slow down all calling threads except NMT worker thread, so it
+      // can catch up.
+      if (_slowdown_calling_thread && thread != _worker_thread) {
+        os::yield_all();
+      }
+
       if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
         JavaThread*      java_thread = (JavaThread*)thread;
         JavaThreadState  state = java_thread->thread_state();
@@ -442,6 +449,7 @@
 #define MAX_SAFEPOINTS_TO_SKIP     128
 #define SAFE_SEQUENCE_THRESHOLD    30
 #define HIGH_GENERATION_THRESHOLD  60
+#define MAX_RECORDER_THREAD_RATIO  30
 
 void MemTracker::sync() {
   assert(_tracking_level > NMT_off, "NMT is not enabled");
@@ -487,6 +495,13 @@
         pending_recorders = _global_recorder;
         _global_recorder = NULL;
       }
+
+      // see if NMT has too many outstanding recorder instances, it usually
+      // means that worker thread is lagging behind in processing them.
+      if (!AutoShutdownNMT) {
+        _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
+      }
+
       // check _worker_thread with lock to avoid racing condition
       if (_worker_thread != NULL) {
         _worker_thread->at_sync_point(pending_recorders, instanceKlass::number_of_instance_classes());
--- a/src/share/vm/services/memTracker.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/services/memTracker.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -167,6 +167,16 @@
   // if native memory tracking tracks callsite
   static inline bool track_callsite() { return _tracking_level == NMT_detail; }
 
+  // NMT automatically shuts itself down under extreme situation by default.
+  // When the value is set to false,  NMT will try its best to stay alive,
+  // even it has to slow down VM.
+  static inline void set_autoShutdown(bool value) {
+    AutoShutdownNMT = value;
+    if (AutoShutdownNMT && _slowdown_calling_thread) {
+      _slowdown_calling_thread = false;
+    }
+  }
+
   // shutdown native memory tracking capability. Native memory tracking
   // can be shutdown by VM when it encounters low memory scenarios.
   // Memory tracker should gracefully shutdown itself, and preserve the
@@ -436,6 +446,10 @@
   // although NMT is still procesing current generation, but
   // there is not more recorder to process, set idle state
   static volatile bool             _worker_thread_idle;
+
+  // if NMT should slow down calling thread to allow
+  // worker thread to catch up
+  static volatile bool             _slowdown_calling_thread;
 };
 
 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
--- a/src/share/vm/services/nmtDCmd.cpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/services/nmtDCmd.cpp	Thu Mar 28 10:46:36 2013 -0700
@@ -49,6 +49,9 @@
   _shutdown("shutdown", "request runtime to shutdown itself and free the " \
             "memory used by runtime.",
             "BOOLEAN", false, "false"),
+  _auto_shutdown("autoShutdown", "automatically shutdown itself under "    \
+            "stress situation",
+            "BOOLEAN", true, "true"),
 #ifndef PRODUCT
   _debug("debug", "print tracker statistics. Debug only, not thread safe", \
             "BOOLEAN", false, "false"),
@@ -61,6 +64,7 @@
   _dcmdparser.add_dcmd_option(&_summary_diff);
   _dcmdparser.add_dcmd_option(&_detail_diff);
   _dcmdparser.add_dcmd_option(&_shutdown);
+  _dcmdparser.add_dcmd_option(&_auto_shutdown);
 #ifndef PRODUCT
   _dcmdparser.add_dcmd_option(&_debug);
 #endif
@@ -84,17 +88,19 @@
   }
 
   int nopt = 0;
-  if(_summary.is_set() && _summary.value()) { ++nopt; }
-  if(_detail.is_set() && _detail.value()) { ++nopt; }
-  if(_baseline.is_set() && _baseline.value()) { ++nopt; }
-  if(_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
-  if(_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
-  if(_shutdown.is_set() && _shutdown.value()) { ++nopt; }
+  if (_summary.is_set() && _summary.value()) { ++nopt; }
+  if (_detail.is_set() && _detail.value()) { ++nopt; }
+  if (_baseline.is_set() && _baseline.value()) { ++nopt; }
+  if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
+  if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
+  if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
+  if (_auto_shutdown.is_set()) { ++nopt; }
+
 #ifndef PRODUCT
-  if(_debug.is_set() && _debug.value()) { ++nopt; }
+  if (_debug.is_set() && _debug.value()) { ++nopt; }
 #endif
 
-  if(nopt > 1) {
+  if (nopt > 1) {
       output()->print_cr("At most one of the following option can be specified: " \
         "summary, detail, baseline, summary.diff, detail.diff, shutdown"
 #ifndef PRODUCT
@@ -156,6 +162,8 @@
     MemTracker::shutdown(MemTracker::NMT_shutdown_user);
     output()->print_cr("Shutdown is in progress, it will take a few moments to " \
       "completely shutdown");
+  } else if (_auto_shutdown.is_set()) {
+    MemTracker::set_autoShutdown(_auto_shutdown.value());
   } else {
     ShouldNotReachHere();
     output()->print_cr("Unknown command");
--- a/src/share/vm/services/nmtDCmd.hpp	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/services/nmtDCmd.hpp	Thu Mar 28 10:46:36 2013 -0700
@@ -39,6 +39,7 @@
   DCmdArgument<bool>  _summary_diff;
   DCmdArgument<bool>  _detail_diff;
   DCmdArgument<bool>  _shutdown;
+  DCmdArgument<bool>  _auto_shutdown;
 #ifndef PRODUCT
   DCmdArgument<bool>  _debug;
 #endif
--- a/src/share/vm/trace/trace.xml	Wed Mar 27 16:18:19 2013 -0700
+++ b/src/share/vm/trace/trace.xml	Thu Mar 28 10:46:36 2013 -0700
@@ -189,14 +189,18 @@
       <value type="ULONG" field="count" label="Total Count" />
     </event>
 
-    <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
-           description="Promotion of an object failed">
-      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+    <struct id="CopyFailed">
       <value type="BYTES64" field="objectCount" label="Object Count"/>
       <value type="BYTES64" field="firstSize" label="First Failed Object Size"/>
       <value type="BYTES64" field="smallestSize" label="Smallest Failed Object Size"/>
       <value type="BYTES64" field="totalSize" label="Total Object Size"/>
       <value type="OSTHREAD" field="thread" label="Running thread"/>
+    </struct>
+
+    <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
+           description="Promotion of an object failed">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <structvalue type="CopyFailed" field="data" label="data"/>
     </event>
 
     <event id="GCPhasePause" path="vm/gc/phases/pause" label="GC Phase Pause">
--- a/test/compiler/5091921/Test6850611.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/5091921/Test6850611.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @bug 6850611
  * @summary int / long arithmetic seems to be broken in 1.6.0_14 HotSpot Server VM (Win XP)
  *
- * @run main Test6850611
+ * @run main/timeout=480 Test6850611
  */
 
 public class Test6850611 {
--- a/test/compiler/5091921/Test6890943.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/5091921/Test6890943.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @bug 6890943
  * @summary JVM mysteriously gives wrong result on 64-bit 1.6 VMs in hotspot mode.
  *
- * @run shell Test6890943.sh
+ * @run shell/timeout=240 Test6890943.sh
  */
 import java.util.*;
 import java.io.*;
--- a/test/compiler/5091921/Test6890943.sh	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/5091921/Test6890943.sh	Thu Mar 28 10:46:36 2013 -0700
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,10 @@
 
 ${TESTJAVA}/bin/javac -d . Test6890943.java
 
-${TESTJAVA}/bin/java -XX:-PrintVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
+${TESTJAVA}/bin/java -XX:-PrintVMOptions -XX:+IgnoreUnrecognizedVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > pretest.out 2>&1
+
+# This test sometimes tickles an unrelated performance warning that interferes with diff.
+grep -v 'warning: Performance bug: SystemDictionary' pretest.out > test.out
 
 diff output6890943.txt test.out
 
--- a/test/compiler/5091921/Test6905845.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/5091921/Test6905845.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @bug 6905845
  * @summary Server VM improperly optimizing away loop.
  *
- * @run main Test6905845
+ * @run main/timeout=480 Test6905845
  */
 
 public class Test6905845 {
--- a/test/compiler/5091921/Test6992759.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/5091921/Test6992759.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @bug 6992759
  * @summary Bad code generated for integer <= comparison, fails for Integer.MAX_VALUE
  *
- * @run main Test6992759
+ * @run main/timeout=240 Test6992759
  */
 
 public class Test6992759 {
--- a/test/compiler/6852078/Test6852078.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/6852078/Test6852078.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  * @bug 6852078
  * @summary Disable SuperWord optimization for unsafe read/write
  *
- * @run main/othervm Test6852078
+ * @run main Test6852078
  */
 
 import java.util.*;
@@ -50,7 +50,11 @@
     }
 
     public static void main(String [] args) {
+        long start = System.currentTimeMillis();
         for (int i=0; i<2000; i++) {
+            // To protect slow systems from test-too-long timeouts
+            if ((i > 100) && ((System.currentTimeMillis() - start) > 100000))
+               break;
             Test6852078 t = new Test6852078(args);
         }
     }
--- a/test/compiler/7009359/Test7009359.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/7009359/Test7009359.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,13 +27,13 @@
  * @bug 7009359
  * @summary HS with -XX:+AggressiveOpts optimize new StringBuffer(null) so it does not throw NPE as expected
  *
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat -XX:CompileCommand=exclude,Test7009359,main Test7009359
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat -XX:CompileCommand=dontinline,Test7009359,stringmakerBUG Test7009359
  *
  */
 
 public class Test7009359 {
     public static void main (String[] args) {
-        for(int i = 0; i < 1000000; i++) {
+        for(int i = 0; i < 100000; i++) {
             if(!stringmakerBUG(null).equals("NPE")) {
                 System.out.println("StringBuffer(null) does not throw NPE");
                 System.exit(97);
--- a/test/compiler/8004741/Test8004741.java	Wed Mar 27 16:18:19 2013 -0700
+++ b/test/compiler/8004741/Test8004741.java	Thu Mar 28 10:46:36 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,70 +25,160 @@
  * @test Test8004741.java
  * @bug 8004741
  * @summary Missing compiled exception handle table entry for multidimensional array allocation
+ * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers -XX:+SafepointALot -XX:GuaranteedSafepointInterval=100 Test8004741
  * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers Test8004741
- *
  */
 
 import java.util.*;
 
 public class Test8004741 extends Thread {
 
+  static int passed = 0;
+
+  /**
+   * Loop forever allocating 2-d arrays.
+   * Catches and rethrows all exceptions; in the case of ThreadDeath, increments passed.
+   * Note that passed is incremented here because this is the exception handler with
+   * the smallest scope; we only want to declare success in the case where it is highly
+   * likely that the test condition
+   * (exception in 2-d array alloc interrupted by ThreadDeath)
+   * actually occurs.
+   */
   static int[][] test(int a, int b) throws Exception {
-    int[][] ar = null;
+    int[][] ar;
     try {
       ar = new int[a][b];
-    } catch (Error e) {
-      System.out.println("test got Error");
-      passed = true;
-      throw(e);
-    } catch (Exception e) {
-      System.out.println("test got Exception");
+    } catch (ThreadDeath e) {
+      System.out.println("test got ThreadDeath");
+      passed++;
       throw(e);
     }
     return ar;
   }
 
-  static boolean passed = false;
+  /* Cookbook wait-notify to track progress of test thread. */
+  Object progressLock = new Object();
+  private static final int NOT_STARTED = 0;
+  private static final int RUNNING = 1;
+  private static final int STOPPING = 2;
+
+  int progressState = NOT_STARTED;
 
-  public void run() {
-      System.out.println("test started");
-      try {
-        while(true) {
-          test(2,20000);
+  void toState(int state) {
+    synchronized (progressLock) {
+      progressState = state;
+      progressLock.notify();
+    }
+  }
+
+  void waitFor(int state) {
+    synchronized (progressLock) {
+      while (progressState < state) {
+        try {
+          progressLock.wait();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+          System.out.println("unexpected InterruptedException");
+          fail();
         }
-      } catch (ThreadDeath e) {
-        System.out.println("test got ThreadDeath");
-        passed = true;
-      } catch (Error e) {
-        e.printStackTrace();
-        System.out.println("test got Error");
-      } catch (Exception e) {
-        e.printStackTrace();
-        System.out.println("test got Exception");
+      }
+      if (progressState > state) {
+        System.out.println("unexpected test state change, expected " +
+                            state + " but saw " + progressState);
+        fail();
+      }
+    }
+  }
+
+  /**
+   * Loops running test until some sort of an exception or error,
+   * expects to see ThreadDeath.
+   */
+  public void run() {
+    try {
+      // Print before state change, so that other thread is most likely
+      // to see this thread executing calls to test() in a loop.
+      System.out.println("thread running");
+      toState(RUNNING);
+      while (true) {
+        // (2,2) (2,10) (2,100) were observed to tickle the bug;
+        test(2, 100);
       }
+    } catch (ThreadDeath e) {
+      // nothing to say, passing was incremented by the test.
+    } catch (Throwable e) {
+      e.printStackTrace();
+      System.out.println("unexpected Throwable " + e);
+      fail();
+    }
+    toState(STOPPING);
+  }
+
+  /**
+   * Runs a single trial of the test in a thread.
+   * No single trial is definitive, since the ThreadDeath
+   * exception might not land in the tested region of code.
+   */
+  public static void threadTest() throws InterruptedException {
+    Test8004741 t = new Test8004741();
+    t.start();
+    t.waitFor(RUNNING);
+    Thread.sleep(100);
+    System.out.println("stopping thread");
+    t.stop();
+    t.waitFor(STOPPING);
+    t.join();
   }
 
   public static void main(String[] args) throws Exception {
+    // Warm up "test"
+    // t will never be started.
     for (int n = 0; n < 11000; n++) {
-      test(2, 20);
+      test(2, 100);
+    }
+
+    // Will this sleep help ensure that the compiler is run?
+    Thread.sleep(500);
+    passed = 0;
+
+    try {
+      test(-1, 100);
+      System.out.println("Missing NegativeArraySizeException #1");
+      fail();
+    } catch ( java.lang.NegativeArraySizeException e ) {
+      System.out.println("Saw expected NegativeArraySizeException #1");
     }
 
-    // First test exception catch
-    Test8004741 t = new Test8004741();
+    try {
+      test(100, -1);
+      fail();
+      System.out.println("Missing NegativeArraySizeException #2");
+      fail();
+    } catch ( java.lang.NegativeArraySizeException e ) {
+      System.out.println("Saw expected NegativeArraySizeException #2");
+    }
 
-    passed = false;
-    t.start();
-    Thread.sleep(1000);
-    t.stop();
+    /* Test repetitions.  If the test succeeds-mostly, it succeeds,
+     * as long as it does not crash (the outcome if the exception range
+     * table entry for the array allocation is missing).
+     */
+    int N = 12;
+    for (int n = 0; n < N; n++) {
+      threadTest();
+    }
 
-    Thread.sleep(5000);
-    t.join();
-    if (passed) {
+    if (passed > N/2) {
+      System.out.println("Saw " + passed + " out of " + N + " possible ThreadDeath hits");
       System.out.println("PASSED");
     } else {
-      System.out.println("FAILED");
-      System.exit(97);
+      System.out.println("Too few ThreadDeath hits; expected at least " + N/2 +
+                         " but saw only " + passed);
+      fail();
     }
   }
 
+  static void fail() {
+    System.out.println("FAILED");
+    System.exit(97);
+  }
 };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8007294/Test8007294.java	Thu Mar 28 10:46:36 2013 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8007294
+ * @summary ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+AlwaysIncrementalInline -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8007294
+ *
+ */
+
+public class Test8007294 {
+
+    int i1;
+    int i2;
+
+    Test8007294(int i1, int i2) {
+        this.i1 = i1;
+        this.i2 = i2;
+    }
+
+    static int m(int v) {
+        return v;
+    }
+
+    static Test8007294 test1() {
+        Test8007294 obj = new Test8007294(10, 100);
+        int v1 = obj.i1;
+
+        int v3 = m(v1);
+        int v2 = obj.i2;
+        obj.i2 = v3;
+        obj.i1 = v2;
+
+        return obj;
+    }
+
+    static int test2(int i) {
+        int j = 0;
+        if (i > 0) {
+            j = 1;
+        }
+
+        int[] arr = new int[10];
+        arr[0] = 1;
+        arr[1] = 2;
+        int v1 = arr[j];
+        arr[0] = 3;
+        arr[1] = 4;
+
+        return v1;
+    }
+
+    static public void main(String[] args) {
+        boolean failed = false;
+        for (int i = 0; i < 20000; i++) {
+            Test8007294 obj = test1();
+            if (obj.i1 != 100 || obj.i2 != 10) {
+                System.out.println("FAILED test1 obj.i1 = " + obj.i1 +", obj.i2 = " + obj.i2);
+                failed = true;
+                break;
+            }
+        }
+        for (int i = 0; i < 20000; i++) {
+            int res = test2(1);
+            if (res != 2) {
+                System.out.println("FAILED test2 = " + res);
+                failed = true;
+                break;
+            }
+        }
+        if (failed) {
+            System.exit(97);
+        } else {
+            System.out.println("PASSED");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8007722/Test8007722.java	Thu Mar 28 10:46:36 2013 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8007722
+ * @summary GetAndSetP's MachNode should capture bottom type
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8007722
+ *
+ */
+
+import java.util.concurrent.atomic.*;
+
+public class Test8007722 {
+
+    int i;
+    static AtomicReference<Test8007722> ref;
+
+    static int test(Test8007722 new_obj) {
+        Test8007722 o = ref.getAndSet(new_obj);
+        int ret = o.i;
+        o.i = 5;
+        return ret;
+    }
+
+    static public void main(String[] args) {
+        Test8007722 obj = new Test8007722();
+        ref = new AtomicReference<Test8007722>(obj);
+
+        for (int i = 0; i < 20000; i++) {
+            test(obj);
+        }
+
+        System.out.println("PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/8009761/Test8009761.java	Thu Mar 28 10:46:36 2013 -0700
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8009761
+ * @summary Deoptimization on sparc doesn't set Llast_SP correctly in the interpreter frames it creates
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8009761
+ *
+ */
+
+public class Test8009761 {
+
+    static class UnloadedClass {
+        volatile int i;
+    }
+
+    static Object m1(boolean deopt) {
+        // When running interpreted, on sparc, the caller's stack is
+        // extended for the locals and the caller's frame is restored
+        // on return.
+        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+        l508, l509, l510, l511;
+
+        long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
+        ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
+        ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
+        ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
+        ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
+        ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
+        ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
+        ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
+        ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
+        ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
+        ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
+        ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
+        ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
+        ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
+        ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
+        ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
+        ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
+        ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
+        ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
+        ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
+        ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
+        ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
+        ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
+        ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
+        ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
+        ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
+        ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
+        ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
+        ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
+        ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
+        ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
+        ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
+        ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
+        ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
+        ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
+        ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
+        ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
+        ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
+        ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
+        ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
+        ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
+        ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
+        ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
+        ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
+        ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
+        ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
+        ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
+        ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
+        ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
+        ll508, ll509, ll510, ll511;
+
+        if (deopt) {
+            UnloadedClass res = new UnloadedClass(); // sufficient to force deopt with c2 but not c1
+            res.i = 0; // forces deopt with c1
+            return res;
+        }
+        return null;
+    }
+
+    static int count = 0;
+
+    static void m2() {
+        // Will be called recursively until a stack overflow
+        // exception. Makes sure it has a lot of locals so that it's
+        // not called a sufficient number of times to trigger
+        // compilation.
+
+        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+        l508, l509, l510, l511;
+
+        count++;
+        m2();
+    }
+
+    static Object m3(boolean overflow_stack, boolean deopt) {
+        if (overflow_stack) {
+            m2();
+            return null;
+        }
+        Object o = m1(deopt);
+        if (deopt) {
+            m2();
+        }
+        return o;
+    }
+
+    static public void main(String[] args) {
+        int c1;
+        // Call m2 from m3 recursively until stack overflow. Count the number of recursive calls.
+        try {
+            m3(true, false);
+        } catch(StackOverflowError soe) {
+        }
+        c1 = count;
+        // Force the compilation of m3() that will inline m1()
+        for (int i = 0; i < 20000; i++) {
+            m3(false, false);
+        }
+        count = 0;
+        // Force deoptimization of m3() in m1(), then return from m1()
+        // to m3(), call recursively m2(). If deoptimization correctly
+        // built the interpreter stack for m3()/m1() then we should be
+        // able to call m2() recursively as many times as before.
+        try {
+            m3(false, true);
+        } catch(StackOverflowError soe) {
+        }
+        if (c1 != count) {
+            System.out.println("Failed: init recursive calls: " + c1 + ". After deopt " + count);
+            System.exit(97);
+        } else {
+            System.out.println("PASSED");
+        }
+    }
+}