changeset 4158:1cdf241a4b26

Merge
author vlivanov
date Thu, 14 Feb 2013 05:36:59 -0800
parents b8d5d7a6c94c (current diff) 12e01444ca2d (diff)
children 9f19f4a7d48a dc1de5e78a85
files src/share/vm/runtime/arguments.cpp
diffstat 29 files changed, 1044 insertions(+), 62 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -2270,10 +2270,11 @@
 }
 
 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, bool vector256) {
-    int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
-    emit_int8(0x00);
-    emit_int8(0xC0 | encode);
-    emit_int8(imm8);
+  assert(VM_Version::supports_avx2(), "");
+  int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true, vector256);
+  emit_int8(0x00);
+  emit_int8(0xC0 | encode);
+  emit_int8(imm8);
 }
 
 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -5691,7 +5691,7 @@
   Address::ScaleFactor scale = Address::times_2;
   int stride = 8;
 
-  if (UseAVX >= 2) {
+  if (UseAVX >= 2 && UseSSE42Intrinsics) {
     Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
     Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
     Label COMPARE_TAIL_LONG;
--- a/src/share/tools/whitebox/sun/hotspot/WhiteBox.java	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/tools/whitebox/sun/hotspot/WhiteBox.java	Thu Feb 14 05:36:59 2013 -0800
@@ -23,6 +23,8 @@
  */
 
 package sun.hotspot;
+
+import java.lang.reflect.Method;
 import java.security.BasicPermission;
 import sun.hotspot.parser.DiagnosticCommand;
 
@@ -81,4 +83,15 @@
   public native boolean NMTAllocTest();
   public native boolean NMTFreeTestMemory();
   public native boolean NMTWaitForDataMerge();
+
+  // Compiler
+  public native void    deoptimizeAll();
+  public native boolean isMethodCompiled(Method method);
+  public native boolean isMethodCompilable(Method method);
+  public native boolean isMethodQueuedForCompilation(Method method);
+  public native int     deoptimizeMethod(Method method);
+  public native void    makeMethodNotCompilable(Method method);
+  public native int     getMethodCompilationLevel(Method method);
+  public native boolean setDontInlineMethod(Method method, boolean value);
+  public native int     getCompileQueuesSize();
 }
--- a/src/share/vm/asm/macroAssembler.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/asm/macroAssembler.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -37,10 +37,10 @@
 # include "assembler_zero.hpp"
 #endif
 #ifdef TARGET_ARCH_arm
-# include "assembler_arm.hpp"
+# include "macroAssembler_arm.hpp"
 #endif
 #ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.hpp"
+# include "macroAssembler_ppc.hpp"
 #endif
 
 #endif // SHARE_VM_ASM_MACROASSEMBLER_HPP
--- a/src/share/vm/asm/macroAssembler.inline.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/asm/macroAssembler.inline.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -37,10 +37,10 @@
 # include "assembler_zero.inline.hpp"
 #endif
 #ifdef TARGET_ARCH_arm
-# include "assembler_arm.inline.hpp"
+# include "macroAssembler_arm.inline.hpp"
 #endif
 #ifdef TARGET_ARCH_ppc
-# include "assembler_ppc.inline.hpp"
+# include "macroAssembler_ppc.inline.hpp"
 #endif
 
 #endif // SHARE_VM_ASM_MACROASSEMBLER_INLINE_HPP
--- a/src/share/vm/opto/c2_globals.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/c2_globals.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -618,6 +618,9 @@
                                                                             \
   product(intx, LiveNodeCountInliningCutoff, 20000,                         \
           "max number of live nodes in a method")                           \
+                                                                            \
+  diagnostic(bool, OptimizeExpensiveOps, true,                              \
+          "Find best control for expensive operations")                     \
 
 
 C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/src/share/vm/opto/compile.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/compile.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -409,6 +409,13 @@
       remove_macro_node(n);
     }
   }
+  // Remove useless expensive node
+  for (int i = C->expensive_count()-1; i >= 0; i--) {
+    Node* n = C->expensive_node(i);
+    if (!useful.member(n)) {
+      remove_expensive_node(n);
+    }
+  }
   // clean up the late inline lists
   remove_useless_late_inlines(&_string_late_inlines, useful);
   remove_useless_late_inlines(&_late_inlines, useful);
@@ -1061,6 +1068,7 @@
   _intrinsics = NULL;
   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
 }
 
@@ -1927,6 +1935,10 @@
 
   if (failing())  return;
 
+  // No more new expensive nodes will be added to the list from here
+  // so keep only the actual candidates for optimizations.
+  cleanup_expensive_nodes(igvn);
+
   // Perform escape analysis
   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
     if (has_loops()) {
@@ -3010,6 +3022,15 @@
     return true;
   }
 
+  // Expensive nodes have their control input set to prevent the GVN
+  // from freely commoning them. There's no GVN beyond this point so
+  // no need to keep the control input. We want the expensive nodes to
+  // be freely moved to the least frequent code path by gcm.
+  assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
+  for (int i = 0; i < expensive_count(); i++) {
+    _expensive_nodes->at(i)->set_req(0, NULL);
+  }
+
   Final_Reshape_Counts frc;
 
   // Visit everybody reachable!
@@ -3525,3 +3546,126 @@
     }
   }
 }
+
+int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
+  if (n1->Opcode() < n2->Opcode())      return -1;
+  else if (n1->Opcode() > n2->Opcode()) return 1;
+
+  assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
+  for (uint i = 1; i < n1->req(); i++) {
+    if (n1->in(i) < n2->in(i))      return -1;
+    else if (n1->in(i) > n2->in(i)) return 1;
+  }
+
+  return 0;
+}
+
+int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
+  Node* n1 = *n1p;
+  Node* n2 = *n2p;
+
+  return cmp_expensive_nodes(n1, n2);
+}
+
+void Compile::sort_expensive_nodes() {
+  if (!expensive_nodes_sorted()) {
+    _expensive_nodes->sort(cmp_expensive_nodes);
+  }
+}
+
+bool Compile::expensive_nodes_sorted() const {
+  for (int i = 1; i < _expensive_nodes->length(); i++) {
+    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
+  if (_expensive_nodes->length() == 0) {
+    return false;
+  }
+
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Take this opportunity to remove dead nodes from the list
+  int j = 0;
+  for (int i = 0; i < _expensive_nodes->length(); i++) {
+    Node* n = _expensive_nodes->at(i);
+    if (!n->is_unreachable(igvn)) {
+      assert(n->is_expensive(), "should be expensive");
+      _expensive_nodes->at_put(j, n);
+      j++;
+    }
+  }
+  _expensive_nodes->trunc_to(j);
+
+  // Then sort the list so that similar nodes are next to each other
+  // and check for at least two nodes of identical kind with same data
+  // inputs.
+  sort_expensive_nodes();
+
+  for (int i = 0; i < _expensive_nodes->length()-1; i++) {
+    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
+  if (_expensive_nodes->length() == 0) {
+    return;
+  }
+
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Sort to bring similar nodes next to each other and clear the
+  // control input of nodes for which there's only a single copy.
+  sort_expensive_nodes();
+
+  int j = 0;
+  int identical = 0;
+  int i = 0;
+  for (; i < _expensive_nodes->length()-1; i++) {
+    assert(j <= i, "can't write beyond current index");
+    if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
+      identical++;
+      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+      continue;
+    }
+    if (identical > 0) {
+      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+      identical = 0;
+    } else {
+      Node* n = _expensive_nodes->at(i);
+      igvn.hash_delete(n);
+      n->set_req(0, NULL);
+      igvn.hash_insert(n);
+    }
+  }
+  if (identical > 0) {
+    _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
+  } else if (_expensive_nodes->length() >= 1) {
+    Node* n = _expensive_nodes->at(i);
+    igvn.hash_delete(n);
+    n->set_req(0, NULL);
+    igvn.hash_insert(n);
+  }
+  _expensive_nodes->trunc_to(j);
+}
+
+void Compile::add_expensive_node(Node * n) {
+  assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
+  assert(n->is_expensive(), "expensive nodes with non-null control here only");
+  assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
+  if (OptimizeExpensiveOps) {
+    _expensive_nodes->append(n);
+  } else {
+    // Clear control input and let IGVN optimize expensive nodes if
+    // OptimizeExpensiveOps is off.
+    n->set_req(0, NULL);
+  }
+}
--- a/src/share/vm/opto/compile.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/compile.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -314,6 +314,7 @@
   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
+  GrowableArray<Node*>* _expensive_nodes;       // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
   ConnectionGraph*      _congraph;
 #ifndef PRODUCT
   IdealGraphPrinter*    _printer;
@@ -398,6 +399,13 @@
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
   int _print_inlining;
 
+  // Only keep nodes in the expensive node list that need to be optimized
+  void cleanup_expensive_nodes(PhaseIterGVN &igvn);
+  // Use for sorting expensive nodes to bring similar nodes together
+  static int cmp_expensive_nodes(Node** n1, Node** n2);
+  // Expensive nodes list already sorted?
+  bool expensive_nodes_sorted() const;
+
  public:
 
   outputStream* print_inlining_stream() const {
@@ -573,8 +581,10 @@
 
   int           macro_count()                   { return _macro_nodes->length(); }
   int           predicate_count()               { return _predicate_opaqs->length();}
+  int           expensive_count()               { return _expensive_nodes->length(); }
   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
   Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
+  Node*         expensive_node(int idx)         { return _expensive_nodes->at(idx); }
   ConnectionGraph* congraph()                   { return _congraph;}
   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
   void add_macro_node(Node * n) {
@@ -592,6 +602,12 @@
       _predicate_opaqs->remove(n);
     }
   }
+  void add_expensive_node(Node * n);
+  void remove_expensive_node(Node * n) {
+    if (_expensive_nodes->contains(n)) {
+      _expensive_nodes->remove(n);
+    }
+  }
   void add_predicate_opaq(Node * n) {
     assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
     assert(_macro_nodes->contains(n), "should have already been in macro list");
@@ -604,6 +620,13 @@
     return _predicate_opaqs->contains(n);
   }
 
+  // Are there candidate expensive nodes for optimization?
+  bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
+  // Check whether n1 and n2 are similar
+  static int cmp_expensive_nodes(Node* n1, Node* n2);
+  // Sort expensive nodes to locate similar expensive nodes
+  void sort_expensive_nodes();
+
   // Compilation environment.
   Arena*            comp_arena()                { return &_comp_arena; }
   ciEnv*            env() const                 { return _env; }
--- a/src/share/vm/opto/library_call.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/library_call.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1653,7 +1653,7 @@
 // really odd corner cases (+/- Infinity).  Just uncommon-trap them.
 bool LibraryCallKit::inline_exp() {
   Node* arg = round_double_node(argument(0));
-  Node* n   = _gvn.transform(new (C) ExpDNode(0, arg));
+  Node* n   = _gvn.transform(new (C) ExpDNode(C, control(), arg));
 
   finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
 
@@ -1688,7 +1688,7 @@
 
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
     // Short form: skip the fancy tests and just check for NaN result.
-    result = _gvn.transform(new (C) PowDNode(0, x, y));
+    result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
   } else {
     // If this inlining ever returned NaN in the past, include all
     // checks + call to the runtime.
@@ -1715,7 +1715,7 @@
     Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) );
 
     // Set fast path result
-    Node *fast_result = _gvn.transform( new (C) PowDNode(0, x, y) );
+    Node *fast_result = _gvn.transform( new (C) PowDNode(C, control(), x, y) );
     phi->init_req(3, fast_result);
 
     // Complex path
@@ -1775,7 +1775,7 @@
     // abs(x)
     Node *absx=_gvn.transform( new (C) AbsDNode(x));
     // abs(x)^y
-    Node *absxpowy = _gvn.transform( new (C) PowDNode(0, absx, y) );
+    Node *absxpowy = _gvn.transform( new (C) PowDNode(C, control(), absx, y) );
     // -abs(x)^y
     Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
     // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
--- a/src/share/vm/opto/loopnode.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/loopnode.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -88,9 +88,9 @@
   assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" );
   uint i;
   Node *early;
-  if( n->in(0) ) {
+  if (n->in(0) && !n->is_expensive()) {
     early = n->in(0);
-    if( !early->is_CFG() ) // Might be a non-CFG multi-def
+    if (!early->is_CFG()) // Might be a non-CFG multi-def
       early = get_ctrl(early);        // So treat input as a straight data input
     i = 1;
   } else {
@@ -99,28 +99,28 @@
   }
   uint e_d = dom_depth(early);
   assert( early, "" );
-  for( ; i < n->req(); i++ ) {
+  for (; i < n->req(); i++) {
     Node *cin = get_ctrl(n->in(i));
     assert( cin, "" );
     // Keep deepest dominator depth
     uint c_d = dom_depth(cin);
-    if( c_d > e_d ) {           // Deeper guy?
+    if (c_d > e_d) {           // Deeper guy?
       early = cin;              // Keep deepest found so far
       e_d = c_d;
-    } else if( c_d == e_d &&    // Same depth?
-               early != cin ) { // If not equal, must use slower algorithm
+    } else if (c_d == e_d &&    // Same depth?
+               early != cin) { // If not equal, must use slower algorithm
       // If same depth but not equal, one _must_ dominate the other
       // and we want the deeper (i.e., dominated) guy.
       Node *n1 = early;
       Node *n2 = cin;
-      while( 1 ) {
+      while (1) {
         n1 = idom(n1);          // Walk up until break cycle
         n2 = idom(n2);
-        if( n1 == cin ||        // Walked early up to cin
-            dom_depth(n2) < c_d )
+        if (n1 == cin ||        // Walked early up to cin
+            dom_depth(n2) < c_d)
           break;                // early is deeper; keep him
-        if( n2 == early ||      // Walked cin up to early
-            dom_depth(n1) < c_d ) {
+        if (n2 == early ||      // Walked cin up to early
+            dom_depth(n1) < c_d) {
           early = cin;          // cin is deeper; keep him
           break;
         }
@@ -132,9 +132,108 @@
   // Return earliest legal location
   assert(early == find_non_split_ctrl(early), "unexpected early control");
 
+  if (n->is_expensive()) {
+    assert(n->in(0), "should have control input");
+    early = get_early_ctrl_for_expensive(n, early);
+  }
+
   return early;
 }
 
+//------------------------------get_early_ctrl_for_expensive---------------------------------
+// Move node up the dominator tree as high as legal while still beneficial
+Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
+  assert(n->in(0) && n->is_expensive(), "expensive node with control input here");
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  Node* ctl = n->in(0);
+  assert(ctl->is_CFG(), "expensive input 0 must be cfg");
+  uint min_dom_depth = dom_depth(earliest);
+#ifdef ASSERT
+  if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) {
+    dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl);
+    assert(false, "Bad graph detected in get_early_ctrl_for_expensive");
+  }
+#endif
+  if (dom_depth(ctl) < min_dom_depth) {
+    return earliest;
+  }
+
+  while (1) {
+    Node *next = ctl;
+    // Moving the node out of a loop on the projection of a If
+    // confuses loop predication. So once we hit a Loop in a If branch
+    // that doesn't branch to an UNC, we stop. The code that process
+    // expensive nodes will notice the loop and skip over it to try to
+    // move the node further up.
+    if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
+      if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) {
+        break;
+      }
+      next = idom(ctl->in(1)->in(0));
+    } else if (ctl->is_Proj()) {
+      // We only move it up along a projection if the projection is
+      // the single control projection for its parent: same code path,
+      // if it's a If with UNC or fallthrough of a call.
+      Node* parent_ctl = ctl->in(0);
+      if (parent_ctl == NULL) {
+        break;
+      } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
+        next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
+      } else if (parent_ctl->is_If()) {
+        if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) {
+          break;
+        }
+        assert(idom(ctl) == parent_ctl, "strange");
+        next = idom(parent_ctl);
+      } else if (ctl->is_CatchProj()) {
+        if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) {
+          break;
+        }
+        assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph");
+        next = parent_ctl->in(0)->in(0)->in(0);
+      } else {
+        // Check if parent control has a single projection (this
+        // control is the only possible successor of the parent
+        // control). If so, we can try to move the node above the
+        // parent control.
+        int nb_ctl_proj = 0;
+        for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) {
+          Node *p = parent_ctl->fast_out(i);
+          if (p->is_Proj() && p->is_CFG()) {
+            nb_ctl_proj++;
+            if (nb_ctl_proj > 1) {
+              break;
+            }
+          }
+        }
+
+        if (nb_ctl_proj > 1) {
+          break;
+        }
+        assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call(), "unexpected node");
+        assert(idom(ctl) == parent_ctl, "strange");
+        next = idom(parent_ctl);
+      }
+    } else {
+      next = idom(ctl);
+    }
+    if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) {
+      break;
+    }
+    ctl = next;
+  }
+
+  if (ctl != n->in(0)) {
+    _igvn.hash_delete(n);
+    n->set_req(0, ctl);
+    _igvn.hash_insert(n);
+  }
+
+  return ctl;
+}
+
+
 //------------------------------set_early_ctrl---------------------------------
 // Set earliest legal control
 void PhaseIdealLoop::set_early_ctrl( Node *n ) {
@@ -1892,6 +1991,98 @@
   }
 }
 
+//------------------------process_expensive_nodes-----------------------------
+// Expensive nodes have their control input set to prevent the GVN
+// from commoning them and as a result forcing the resulting node to
+// be in a more frequent path. Use CFG information here, to change the
+// control inputs so that some expensive nodes can be commoned while
+// not executed more frequently.
+bool PhaseIdealLoop::process_expensive_nodes() {
+  assert(OptimizeExpensiveOps, "optimization off?");
+
+  // Sort nodes to bring similar nodes together
+  C->sort_expensive_nodes();
+
+  bool progress = false;
+
+  for (int i = 0; i < C->expensive_count(); ) {
+    Node* n = C->expensive_node(i);
+    int start = i;
+    // Find nodes similar to n
+    i++;
+    for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++);
+    int end = i;
+    // And compare them two by two
+    for (int j = start; j < end; j++) {
+      Node* n1 = C->expensive_node(j);
+      if (is_node_unreachable(n1)) {
+        continue;
+      }
+      for (int k = j+1; k < end; k++) {
+        Node* n2 = C->expensive_node(k);
+        if (is_node_unreachable(n2)) {
+          continue;
+        }
+
+        assert(n1 != n2, "should be pair of nodes");
+
+        Node* c1 = n1->in(0);
+        Node* c2 = n2->in(0);
+
+        Node* parent_c1 = c1;
+        Node* parent_c2 = c2;
+
+        // The call to get_early_ctrl_for_expensive() moves the
+        // expensive nodes up but stops at loops that are in a if
+        // branch. See whether we can exit the loop and move above the
+        // If.
+        if (c1->is_Loop()) {
+          parent_c1 = c1->in(1);
+        }
+        if (c2->is_Loop()) {
+          parent_c2 = c2->in(1);
+        }
+
+        if (parent_c1 == parent_c2) {
+          _igvn._worklist.push(n1);
+          _igvn._worklist.push(n2);
+          continue;
+        }
+
+        // Look for identical expensive node up the dominator chain.
+        if (is_dominator(c1, c2)) {
+          c2 = c1;
+        } else if (is_dominator(c2, c1)) {
+          c1 = c2;
+        } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() &&
+                   parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) {
+          // Both branches have the same expensive node so move it up
+          // before the if.
+          c1 = c2 = idom(parent_c1->in(0));
+        }
+        // Do the actual moves
+        if (n1->in(0) != c1) {
+          _igvn.hash_delete(n1);
+          n1->set_req(0, c1);
+          _igvn.hash_insert(n1);
+          _igvn._worklist.push(n1);
+          progress = true;
+        }
+        if (n2->in(0) != c2) {
+          _igvn.hash_delete(n2);
+          n2->set_req(0, c2);
+          _igvn.hash_insert(n2);
+          _igvn._worklist.push(n2);
+          progress = true;
+        }
+      }
+    }
+  }
+
+  return progress;
+}
+
+
 //=============================================================================
 //----------------------------build_and_optimize-------------------------------
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
@@ -1960,7 +2151,9 @@
   }
 
   // Nothing to do, so get out
-  if( !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only ) {
+  bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only;
+  bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
+  if (stop_early && !do_expensive_nodes) {
     _igvn.optimize();           // Cleanup NeverBranches
     return;
   }
@@ -2058,6 +2251,21 @@
     return;
   }
 
+  if (stop_early) {
+    assert(do_expensive_nodes, "why are we here?");
+    if (process_expensive_nodes()) {
+      // If we made some progress when processing expensive nodes then
+      // the IGVN may modify the graph in a way that will allow us to
+      // make some more progress: we need to try processing expensive
+      // nodes again.
+      C->set_major_progress();
+    }
+
+    _igvn.optimize();
+
+    return;
+  }
+
   // Some parser-inserted loop predicates could never be used by loop
   // predication or they were moved away from loop during some optimizations.
   // For example, peeling. Eliminate them before next loop optimizations.
@@ -2120,6 +2328,10 @@
     NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
   }
 
+  if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
+    C->set_major_progress();
+  }
+
   // Perform loop predication before iteration splitting
   if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) {
     _ltree_root->_child->loop_predication(this);
@@ -3299,7 +3511,7 @@
 #ifdef ASSERT
     if (legal->is_Start() && !early->is_Root()) {
       // Bad graph. Print idom path and fail.
-      dump_bad_graph(n, early, LCA);
+      dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA);
       assert(false, "Bad graph detected in build_loop_late");
     }
 #endif
@@ -3350,8 +3562,8 @@
 }
 
 #ifdef ASSERT
-void PhaseIdealLoop::dump_bad_graph(Node* n, Node* early, Node* LCA) {
-  tty->print_cr( "Bad graph detected in build_loop_late");
+void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) {
+  tty->print_cr(msg);
   tty->print("n: "); n->dump();
   tty->print("early(n): "); early->dump();
   if (n->in(0) != NULL  && !n->in(0)->is_top() &&
--- a/src/share/vm/opto/loopnode.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/loopnode.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -263,9 +263,18 @@
   bool stride_is_con() const        { Node *tmp = stride  (); return (tmp != NULL && tmp->is_Con()); }
   BoolTest::mask test_trip() const  { return in(TestValue)->as_Bool()->_test._test; }
   CountedLoopNode *loopnode() const {
+    // The CountedLoopNode that goes with this CountedLoopEndNode may
+    // have been optimized out by the IGVN so be cautious with the
+    // pattern matching on the graph
+    if (phi() == NULL) {
+      return NULL;
+    }
     Node *ln = phi()->in(0);
-    assert( ln->Opcode() == Op_CountedLoop, "malformed loop" );
-    return (CountedLoopNode*)ln; }
+    if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) {
+      return (CountedLoopNode*)ln;
+    }
+    return NULL;
+  }
 
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
@@ -598,6 +607,7 @@
   // check if transform created new nodes that need _ctrl recorded
   Node *get_late_ctrl( Node *n, Node *early );
   Node *get_early_ctrl( Node *n );
+  Node *get_early_ctrl_for_expensive(Node *n, Node* earliest);
   void set_early_ctrl( Node *n );
   void set_subtree_ctrl( Node *root );
   void set_ctrl( Node *n, Node *ctrl ) {
@@ -905,6 +915,16 @@
   void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
   void eliminate_useless_predicates();
 
+  // Change the control input of expensive nodes to allow commoning by
+  // IGVN when it is guaranteed to not result in a more frequent
+  // execution of the expensive node. Return true if progress.
+  bool process_expensive_nodes();
+
+  // Check whether node has become unreachable
+  bool is_node_unreachable(Node *n) const {
+    return !has_node(n) || n->is_unreachable(_igvn);
+  }
+
   // Eliminate range-checks and other trip-counter vs loop-invariant tests.
   void do_range_check( IdealLoopTree *loop, Node_List &old_new );
 
@@ -1043,7 +1063,7 @@
   void register_new_node( Node *n, Node *blk );
 
 #ifdef ASSERT
-void dump_bad_graph(Node* n, Node* early, Node* LCA);
+  void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA);
 #endif
 
 #ifndef PRODUCT
--- a/src/share/vm/opto/node.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/node.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -493,6 +493,8 @@
   }
   if (is_macro())
     compile->add_macro_node(n);
+  if (is_expensive())
+    compile->add_expensive_node(n);
 
   n->set_idx(compile->next_unique()); // Get new unique index as well
   debug_only( n->verify_construction() );
@@ -616,6 +618,9 @@
   if (is_macro()) {
     compile->remove_macro_node(this);
   }
+  if (is_expensive()) {
+    compile->remove_expensive_node(this);
+  }
 #ifdef ASSERT
   // We will not actually delete the storage, but we'll make the node unusable.
   *(address*)this = badAddress;  // smash the C++ vtbl, probably
@@ -689,6 +694,13 @@
 }
 #endif
 
+
+//------------------------------is_unreachable---------------------------------
+bool Node::is_unreachable(PhaseIterGVN &igvn) const {
+  assert(!is_Mach(), "doesn't work with MachNodes");
+  return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
+}
+
 //------------------------------add_req----------------------------------------
 // Add a new required input at the end
 void Node::add_req( Node *n ) {
@@ -1246,6 +1258,9 @@
       if (dead->is_macro()) {
         igvn->C->remove_macro_node(dead);
       }
+      if (dead->is_expensive()) {
+        igvn->C->remove_expensive_node(dead);
+      }
       // Kill all inputs to the dead guy
       for (uint i=0; i < dead->req(); i++) {
         Node *n = dead->in(i);      // Get input to dead guy
--- a/src/share/vm/opto/node.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/node.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -378,6 +378,8 @@
   bool is_dead() const;
 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
 #endif
+  // Check whether node has become unreachable
+  bool is_unreachable(PhaseIterGVN &igvn) const;
 
   // Set a required input edge, also updates corresponding output edge
   void add_req( Node *n ); // Append a NEW required input
@@ -646,7 +648,8 @@
     Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
     Flag_avoid_back_to_back  = Flag_may_be_short_branch << 1,
     Flag_has_call            = Flag_avoid_back_to_back << 1,
-    _max_flags = (Flag_has_call << 1) - 1 // allow flags combination
+    Flag_is_expensive        = Flag_has_call << 1,
+    _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
   };
 
 private:
@@ -819,6 +822,8 @@
 
   // The node is a "macro" node which needs to be expanded before matching
   bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
+  // The node is expensive: the best control is set during loop opts
+  bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
 
 //----------------- Optimization
 
--- a/src/share/vm/opto/phaseX.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/phaseX.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1203,6 +1203,9 @@
         if (dead->is_macro()) {
           C->remove_macro_node(dead);
         }
+        if (dead->is_expensive()) {
+          C->remove_expensive_node(dead);
+        }
 
         if (recurse) {
           continue;
--- a/src/share/vm/opto/regmask.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/regmask.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -241,7 +241,8 @@
       } else {                  // Else its a split-pair case
         if( bit != _A[i] ) return false; // Found many bits, so fail
         i++;                    // Skip iteration forward
-        if( _A[i] != 1 ) return false; // Require 1 lo bit in next word
+        if( i >= RM_SIZE || _A[i] != 1 )
+          return false; // Require 1 lo bit in next word
       }
     }
   }
@@ -254,7 +255,7 @@
 // Find the lowest-numbered register set in the mask.  Return the
 // HIGHEST register number in the set, or BAD if no sets.
 // Works also for size 1.
-OptoReg::Name RegMask::find_first_set(int size) const {
+OptoReg::Name RegMask::find_first_set(const int size) const {
   verify_sets(size);
   for (int i = 0; i < RM_SIZE; i++) {
     if (_A[i]) {                // Found some bits
@@ -268,7 +269,7 @@
 
 //------------------------------clear_to_sets----------------------------------
 // Clear out partial bits; leave only aligned adjacent bit pairs
-void RegMask::clear_to_sets(int size) {
+void RegMask::clear_to_sets(const int size) {
   if (size == 1) return;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -293,7 +294,7 @@
 
 //------------------------------smear_to_sets----------------------------------
 // Smear out partial bits to aligned adjacent bit sets
-void RegMask::smear_to_sets(int size) {
+void RegMask::smear_to_sets(const int size) {
   if (size == 1) return;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -318,7 +319,7 @@
 }
 
 //------------------------------is_aligned_set--------------------------------
-bool RegMask::is_aligned_sets(int size) const {
+bool RegMask::is_aligned_sets(const int size) const {
   if (size == 1) return true;
   assert(2 <= size && size <= 8, "update low bits table");
   assert(is_power_of_2(size), "sanity");
@@ -344,7 +345,7 @@
 //------------------------------is_bound_set-----------------------------------
 // Return TRUE if the mask contains one adjacent set of bits and no other bits.
 // Works also for size 1.
-int RegMask::is_bound_set(int size) const {
+int RegMask::is_bound_set(const int size) const {
   if( is_AllStack() ) return false;
   assert(1 <= size && size <= 8, "update low bits table");
   int bit = -1;                 // Set to hold the one bit allowed
@@ -352,7 +353,7 @@
     if (_A[i] ) {               // Found some bits
       if (bit != -1)
        return false;            // Already had bits, so fail
-      bit = _A[i] & -_A[i];     // Extract 1 bit from mask
+      bit = _A[i] & -_A[i];     // Extract low bit from mask
       int hi_bit = bit << (size-1); // high bit
       if (hi_bit != 0) {        // Bit set stays in same word?
         int set = hi_bit + ((hi_bit-1) & ~(bit-1));
@@ -362,12 +363,12 @@
         if (((-1) & ~(bit-1)) != _A[i])
           return false;         // Found many bits, so fail
         i++;                    // Skip iteration forward and check high part
-        assert(size <= 8, "update next code");
         // The lower 24 bits should be 0 since it is split case and size <= 8.
         int set = bit>>24;
         set = set & -set; // Remove sign extension.
         set = (((set << size) - 1) >> 8);
-        if (_A[i] != set) return false; // Require 1 lo bit in next word
+        if (i >= RM_SIZE || _A[i] != set)
+          return false; // Require expected low bits in next word
       }
     }
   }
--- a/src/share/vm/opto/regmask.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/regmask.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,22 +225,22 @@
   // Find the lowest-numbered register set in the mask.  Return the
   // HIGHEST register number in the set, or BAD if no sets.
   // Assert that the mask contains only bit sets.
-  OptoReg::Name find_first_set(int size) const;
+  OptoReg::Name find_first_set(const int size) const;
 
   // Clear out partial bits; leave only aligned adjacent bit sets of size.
-  void clear_to_sets(int size);
+  void clear_to_sets(const int size);
   // Smear out partial bits to aligned adjacent bit sets.
-  void smear_to_sets(int size);
+  void smear_to_sets(const int size);
   // Verify that the mask contains only aligned adjacent bit sets
   void verify_sets(int size) const { assert(is_aligned_sets(size), "mask is not aligned, adjacent sets"); }
   // Test that the mask contains only aligned adjacent bit sets
-  bool is_aligned_sets(int size) const;
+  bool is_aligned_sets(const int size) const;
 
   // mask is a set of misaligned registers
   bool is_misaligned_set(int size) const { return (int)Size()==size && !is_aligned_sets(size);}
 
   // Test for a single adjacent set
-  int is_bound_set(int size) const;
+  int is_bound_set(const int size) const;
 
   static bool is_vector(uint ireg);
   static int num_registers(uint ireg);
--- a/src/share/vm/opto/subnode.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/opto/subnode.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -456,7 +456,10 @@
 //  Exponentiate a double
 class ExpDNode : public Node {
 public:
-  ExpDNode( Node *c, Node *in1 ) : Node(c, in1) {}
+  ExpDNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
@@ -489,7 +492,10 @@
 // Raise a double to a double power
 class PowDNode : public Node {
 public:
-  PowDNode(Node *c, Node *in1, Node *in2  ) : Node(c, in1, in2) {}
+  PowDNode(Compile* C, Node *c, Node *in1, Node *in2 ) : Node(c, in1, in2) {
+    init_flags(Flag_is_expensive);
+    C->add_expensive_node(this);
+  }
   virtual int Opcode() const;
   const Type *bottom_type() const { return Type::DOUBLE; }
   virtual uint ideal_reg() const { return Op_RegD; }
--- a/src/share/vm/prims/wbtestmethods/parserTests.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/prims/wbtestmethods/parserTests.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -27,6 +27,6 @@
 #include "prims/jni.h"
 #include "prims/whitebox.hpp"
 
-WB_METHOD_DECLARE WB_ParseCommandLine(JNIEnv* env, jobject o, jstring args, jobjectArray arguments);
+WB_METHOD_DECLARE(jobjectArray) WB_ParseCommandLine(JNIEnv* env, jobject o, jstring args, jobjectArray arguments);
 
 #endif //SHARE_VM_PRIMS_WBTESTMETHODS_PARSERTESTS_H
--- a/src/share/vm/prims/whitebox.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/prims/whitebox.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -48,6 +48,8 @@
 #include "services/memTracker.hpp"
 #endif // INCLUDE_NMT
 
+#include "compiler/compileBroker.hpp"
+
 bool WhiteBox::_used = false;
 
 WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
@@ -169,6 +171,89 @@
 
 #endif // INCLUDE_NMT
 
+static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
+  assert(method != NULL, "method should not be null");
+  ThreadToNativeFromVM ttn(thread);
+  return env->FromReflectedMethod(method);
+}
+
+WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
+  MutexLockerEx mu(Compile_lock);
+  CodeCache::mark_all_nmethods_for_deoptimization();
+  VM_Deoptimize op;
+  VMThread::execute(&op);
+WB_END
+
+WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  MutexLockerEx mu(Compile_lock);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  int result = 0;
+  nmethod* code = mh->code();
+  if (code != NULL) {
+    code->mark_for_deoptimization();
+    ++result;
+  }
+  result += CodeCache::mark_for_deoptimization(mh());
+  if (result > 0) {
+    VM_Deoptimize op;
+    VMThread::execute(&op);
+  }
+  return result;
+WB_END
+
+WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  MutexLockerEx mu(Compile_lock);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  nmethod* code = mh->code();
+  if (code == NULL) {
+    return JNI_FALSE;
+  }
+  return (code->is_alive() && !code->is_marked_for_deoptimization());
+WB_END
+
+WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  MutexLockerEx mu(Compile_lock);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  return !mh->is_not_compilable();
+WB_END
+
+WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  MutexLockerEx mu(Compile_lock);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  return mh->queued_for_compilation();
+WB_END
+
+WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  nmethod* code = mh->code();
+  return (code != NULL ? code->comp_level() : CompLevel_none);
+WB_END
+
+
+WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  mh->set_not_compilable();
+WB_END
+
+WB_ENTRY(jboolean, WB_SetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
+  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
+  bool result = mh->dont_inline();
+  mh->set_dont_inline(value == JNI_TRUE);
+  return result;
+WB_END
+
+WB_ENTRY(jint, WB_GetCompileQueuesSize(JNIEnv* env, jobject o))
+  return CompileBroker::queue_size(CompLevel_full_optimization) /* C2 */ +
+         CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
+WB_END
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -225,9 +310,9 @@
 static JNINativeMethod methods[] = {
   {CC"getObjectAddress",   CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress  },
   {CC"getHeapOopSize",     CC"()I",                   (void*)&WB_GetHeapOopSize    },
-  {CC"isClassAlive0",       CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
-  {CC "parseCommandLine",
-      CC "(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
+  {CC"isClassAlive0",      CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
+  {CC"parseCommandLine",
+      CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
 #if INCLUDE_ALL_GCS
@@ -241,6 +326,23 @@
   {CC"NMTFreeTestMemory",  CC"()Z",                   (void*)&WB_NMTFreeTestMemory },
   {CC"NMTWaitForDataMerge",CC"()Z",                   (void*)&WB_NMTWaitForDataMerge},
 #endif // INCLUDE_NMT
+  {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
+  {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Method;)I",
+                                                      (void*)&WB_DeoptimizeMethod  },
+  {CC"isMethodCompiled",   CC"(Ljava/lang/reflect/Method;)Z",
+                                                      (void*)&WB_IsMethodCompiled  },
+  {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Method;)Z",
+                                                      (void*)&WB_IsMethodCompilable},
+  {CC"isMethodQueuedForCompilation",
+      CC"(Ljava/lang/reflect/Method;)Z",              (void*)&WB_IsMethodQueuedForCompilation},
+  {CC"makeMethodNotCompilable",
+      CC"(Ljava/lang/reflect/Method;)V",              (void*)&WB_MakeMethodNotCompilable},
+  {CC"setDontInlineMethod",
+      CC"(Ljava/lang/reflect/Method;Z)Z",             (void*)&WB_SetDontInlineMethod},
+  {CC"getMethodCompilationLevel",
+      CC"(Ljava/lang/reflect/Method;)I",              (void*)&WB_GetMethodCompilationLevel},
+  {CC"getCompileQueuesSize",
+      CC"()I",                                        (void*)&WB_GetCompileQueuesSize},
 };
 
 #undef CC
--- a/src/share/vm/prims/whitebox.hpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/prims/whitebox.hpp	Thu Feb 14 05:36:59 2013 -0800
@@ -34,7 +34,7 @@
 
 #define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header)
 #define WB_END JNI_END
-#define WB_METHOD_DECLARE extern "C" jobjectArray JNICALL
+#define WB_METHOD_DECLARE(result_type) extern "C" result_type JNICALL
 
 class WhiteBox : public AllStatic {
  private:
--- a/src/share/vm/runtime/arguments.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1086,7 +1086,7 @@
   }
   // Increase the code cache size - tiered compiles a lot more.
   if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
-    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
+    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
   }
 }
 
--- a/src/share/vm/runtime/globals.cpp	Thu Feb 14 11:01:05 2013 +0100
+++ b/src/share/vm/runtime/globals.cpp	Thu Feb 14 05:36:59 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,10 @@
 }
 
 bool Flag::is_unlocked() const {
-  if (strcmp(kind, "{diagnostic}") == 0) {
+  if (strcmp(kind, "{diagnostic}") == 0 ||
+      strcmp(kind, "{C2 diagnostic}") == 0 ||
+      strcmp(kind, "{ARCH diagnostic}") == 0 ||
+      strcmp(kind, "{Shark diagnostic}") == 0) {
     if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) {
       // transitional logic to allow tests to run until they are changed
       static int warned;
@@ -78,7 +81,9 @@
     }
     return UnlockDiagnosticVMOptions;
   } else if (strcmp(kind, "{experimental}") == 0 ||
-             strcmp(kind, "{C2 experimental}") == 0) {
+             strcmp(kind, "{C2 experimental}") == 0 ||
+             strcmp(kind, "{ARCH experimental}") == 0 ||
+             strcmp(kind, "{Shark experimental}") == 0) {
     return UnlockExperimentalVMOptions;
   } else {
     return is_unlocked_ext();
--- a/test/compiler/7009359/Test7009359.java	Thu Feb 14 11:01:05 2013 +0100
+++ b/test/compiler/7009359/Test7009359.java	Thu Feb 14 05:36:59 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,13 +27,13 @@
  * @bug 7009359
  * @summary HS with -XX:+AggressiveOpts optimize new StringBuffer(null) so it does not throw NPE as expected
  *
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat -XX:CompileCommand=exclude,Test7009359,main Test7009359
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat -XX:CompileCommand=dontinline,Test7009359,stringmakerBUG Test7009359
  *
  */
 
 public class Test7009359 {
     public static void main (String[] args) {
-        for(int i = 0; i < 1000000; i++) {
+        for(int i = 0; i < 100000; i++) {
             if(!stringmakerBUG(null).equals("NPE")) {
                 System.out.println("StringBuffer(null) does not throw NPE");
                 System.exit(97);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+import sun.management.ManagementFactoryHelper;
+import com.sun.management.HotSpotDiagnosticMXBean;
+
+import java.lang.reflect.Method;
+
+/*
+ * @author igor.ignatyev@oracle.com
+ */
+public abstract class CompilerWhiteBoxTest {
+    protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    protected static final Method METHOD = getMethod("method");
+    protected static final int COMPILE_THRESHOLD
+            = Integer.parseInt(getVMOption("CompileThreshold", "10000"));
+
+    protected static Method getMethod(String name) {
+        try {
+            return CompilerWhiteBoxTest.class.getDeclaredMethod(name);
+        } catch (NoSuchMethodException | SecurityException e) {
+            throw new RuntimeException(
+                    "exception on getting method " + name, e);
+        }
+    }
+
+    protected static String getVMOption(String name, String defaultValue) {
+        String result;
+        HotSpotDiagnosticMXBean diagnostic
+                = ManagementFactoryHelper.getDiagnosticMXBean();
+        result = diagnostic.getVMOption(name).getValue();
+        return result == null ? defaultValue : result;
+    }
+
+    protected final void runTest() throws RuntimeException {
+        if (ManagementFactoryHelper.getCompilationMXBean() == null) {
+            System.err.println(
+                    "Warning: test is not applicable in interpreted mode");
+            return;
+        }
+        System.out.println("at test's start:");
+        printInfo(METHOD);
+        try {
+            test();
+        } catch (Exception e) {
+            System.out.printf("on exception '%s':", e.getMessage());
+            printInfo(METHOD);
+            throw new RuntimeException(e);
+        }
+        System.out.println("at test's end:");
+        printInfo(METHOD);
+    }
+
+    protected static void checkNotCompiled(Method method) {
+        if (WHITE_BOX.isMethodCompiled(method)) {
+            throw new RuntimeException(method + " must be not compiled");
+        }
+        if (WHITE_BOX.getMethodCompilationLevel(method) != 0) {
+            throw new RuntimeException(method + " comp_level must be == 0");
+        }
+    }
+
+    protected static void checkCompiled(Method method)
+            throws InterruptedException {
+        final long start = System.currentTimeMillis();
+        waitBackgroundCompilation(method);
+        if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
+            System.err.printf("Warning: %s is still in queue after %dms%n",
+                    method, System.currentTimeMillis() - start);
+            return;
+        }
+        if (!WHITE_BOX.isMethodCompiled(method)) {
+            throw new RuntimeException(method + " must be compiled");
+        }
+        if (WHITE_BOX.getMethodCompilationLevel(method) == 0) {
+            throw new RuntimeException(method + " comp_level must be != 0");
+        }
+    }
+
+    protected static void waitBackgroundCompilation(Method method)
+            throws InterruptedException {
+        final Object obj = new Object();
+        synchronized (obj) {
+            for (int i = 0; i < 10; ++i) {
+                if (!WHITE_BOX.isMethodQueuedForCompilation(method)) {
+                    break;
+                }
+                obj.wait(1000);
+            }
+        }
+    }
+
+    protected static void printInfo(Method method) {
+        System.out.printf("%n%s:%n", method);
+        System.out.printf("\tcompilable:\t%b%n",
+                WHITE_BOX.isMethodCompilable(method));
+        System.out.printf("\tcompiled:\t%b%n",
+                WHITE_BOX.isMethodCompiled(method));
+        System.out.printf("\tcomp_level:\t%d%n",
+                WHITE_BOX.getMethodCompilationLevel(method));
+        System.out.printf("\tin_queue:\t%b%n",
+                WHITE_BOX.isMethodQueuedForCompilation(method));
+        System.out.printf("compile_queues_size:\t%d%n%n",
+                WHITE_BOX.getCompileQueuesSize());
+    }
+
+    protected abstract void test() throws Exception;
+
+    protected final int compile() {
+        int result = 0;
+        for (int i = 0; i < COMPILE_THRESHOLD; ++i) {
+            result += method();
+        }
+        return result;
+    }
+
+
+    protected int method() {
+        return 42;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test DeoptimizeAllTest
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI CompilerWhiteBoxTest.java
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI DeoptimizeAllTest.java
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeAllTest
+ * @author igor.ignatyev@oracle.com
+ */
+public class DeoptimizeAllTest extends CompilerWhiteBoxTest {
+
+    public static void main(String[] args) throws Exception {
+        new DeoptimizeAllTest().runTest();
+    }
+
+    protected void test() throws Exception {
+        // to prevent inlining #method into #compile()
+        WHITE_BOX.setDontInlineMethod(METHOD, true);
+        compile();
+        checkCompiled(METHOD);
+        WHITE_BOX.deoptimizeAll();
+        checkNotCompiled(METHOD);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test DeoptimizeMethodTest
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI CompilerWhiteBoxTest.java
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI DeoptimizeMethodTest.java
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeMethodTest
+ * @author igor.ignatyev@oracle.com
+ */
+public class DeoptimizeMethodTest extends CompilerWhiteBoxTest {
+
+    public static void main(String[] args) throws Exception {
+        new DeoptimizeMethodTest().runTest();
+    }
+
+    protected void test() throws Exception {
+        // to prevent inlining #method into #compile()
+        WHITE_BOX.setDontInlineMethod(METHOD, true);
+        compile();
+        checkCompiled(METHOD);
+        WHITE_BOX.deoptimizeMethod(METHOD);
+        checkNotCompiled(METHOD);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test IsMethodCompilableTest
+ * @bug 8007270
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI CompilerWhiteBoxTest.java
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI IsMethodCompilableTest.java
+ * @run main/othervm/timeout=600 -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IsMethodCompilableTest
+ * @author igor.ignatyev@oracle.com
+ */
+public class IsMethodCompilableTest extends CompilerWhiteBoxTest {
+    protected static final long PER_METHOD_RECOMPILATION_CUTOFF;
+
+    static {
+        long tmp = Long.parseLong(
+                getVMOption("PerMethodRecompilationCutoff", "400"));
+        if (tmp == -1) {
+            PER_METHOD_RECOMPILATION_CUTOFF = -1 /* Inf */;
+        } else {
+            PER_METHOD_RECOMPILATION_CUTOFF = 1 + (0xFFFFFFFFL & tmp);
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        new IsMethodCompilableTest().runTest();
+    }
+
+    protected void test() throws Exception {
+        if (!WHITE_BOX.isMethodCompilable(METHOD)) {
+            throw new RuntimeException(METHOD + " must be compilable");
+        }
+        System.out.println("PerMethodRecompilationCutoff = "
+                + PER_METHOD_RECOMPILATION_CUTOFF);
+        if (PER_METHOD_RECOMPILATION_CUTOFF == -1) {
+            System.err.println(
+                    "Warning: test is not applicable if PerMethodRecompilationCutoff == Inf");
+            return;
+        }
+        // to prevent inlining #method into #compile()
+        WHITE_BOX.setDontInlineMethod(METHOD, true);
+        boolean madeNotCompilable = false;
+
+        for (long i = 0; i < PER_METHOD_RECOMPILATION_CUTOFF; ++i) {
+            compile();
+            waitBackgroundCompilation(METHOD);
+            WHITE_BOX.deoptimizeMethod(METHOD);
+            if (!WHITE_BOX.isMethodCompilable(METHOD)) {
+                madeNotCompilable = true;
+                break;
+            }
+        }
+        if (!madeNotCompilable) {
+            throw new RuntimeException(METHOD + " is still compilable after "
+                    + PER_METHOD_RECOMPILATION_CUTOFF + " iterations");
+        }
+        compile();
+        if (WHITE_BOX.isMethodCompiled(METHOD)) {
+            printInfo(METHOD);
+            throw new RuntimeException(
+                    METHOD + " is not compilable but compiled");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test MakeMethodNotCompilableTest
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI CompilerWhiteBoxTest.java
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI MakeMethodNotCompilableTest.java
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI MakeMethodNotCompilableTest
+ * @author igor.ignatyev@oracle.com
+ */
+public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest {
+
+    public static void main(String[] args) throws Exception {
+        new MakeMethodNotCompilableTest().runTest();
+    }
+
+    protected void test() throws Exception  {
+        if (!WHITE_BOX.isMethodCompilable(METHOD)) {
+            throw new RuntimeException(METHOD + " must be compilable");
+        }
+        WHITE_BOX.makeMethodNotCompilable(METHOD);
+        if (WHITE_BOX.isMethodCompilable(METHOD)) {
+            throw new RuntimeException(METHOD + " must be not compilable");
+        }
+        compile();
+        if (WHITE_BOX.isMethodQueuedForCompilation(METHOD)) {
+            throw new RuntimeException(METHOD + " must not be in queue");
+        }
+        checkNotCompiled(METHOD);
+        if (WHITE_BOX.isMethodCompilable(METHOD)) {
+            throw new RuntimeException(METHOD + " must be not compilable");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/SetDontInlineMethodTest.java	Thu Feb 14 05:36:59 2013 -0800
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SetDontInlineMethodTest
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI CompilerWhiteBoxTest.java
+ * @compile -J-XX:+UnlockDiagnosticVMOptions -J-XX:+WhiteBoxAPI SetDontInlineMethodTest.java
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetDontInlineMethodTest
+ * @author igor.ignatyev@oracle.com
+ */
+public class SetDontInlineMethodTest extends CompilerWhiteBoxTest {
+
+    public static void main(String[] args) throws Exception {
+        new SetDontInlineMethodTest().runTest();
+    }
+
+    protected void test() throws Exception {
+        if (WHITE_BOX.setDontInlineMethod(METHOD, true)) {
+            throw new RuntimeException("on start " + METHOD
+                    + " must be inlineable");
+        }
+        if (!WHITE_BOX.setDontInlineMethod(METHOD, true)) {
+            throw new RuntimeException("after first change to true " + METHOD
+                    + " must be not inlineable");
+        }
+        if (!WHITE_BOX.setDontInlineMethod(METHOD, false)) {
+            throw new RuntimeException("after second change to true " + METHOD
+                    + " must be still not inlineable");
+        }
+        if (WHITE_BOX.setDontInlineMethod(METHOD, false)) {
+            throw new RuntimeException("after first change to false" + METHOD
+                    + " must be inlineable");
+        }
+        if (WHITE_BOX.setDontInlineMethod(METHOD, false)) {
+            throw new RuntimeException("after second change to false " + METHOD
+                    + " must be inlineable");
+        }
+    }
+}