changeset 1527:fcbb92a1ab3b

Merge
author jrose
date Tue, 29 Jun 2010 16:09:57 -0700
parents 5a297ea605c7 (current diff) 76efbe666d6c (diff)
children a00567c82f02 65b0c03b165d
files src/share/vm/runtime/arguments.cpp
diffstat 15 files changed, 180 insertions(+), 101 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 VM_Version::CpuidInfo VM_Version::_cpuid_info   = { 0, };
 
 static BufferBlob* stub_blob;
-static const int stub_size = 300;
+static const int stub_size = 400;
 
 extern "C" {
   typedef void (*getPsrInfo_stub_t)(void*);
@@ -56,7 +56,7 @@
     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
     const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
 
-    Label detect_486, cpu486, detect_586, std_cpuid1;
+    Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
     Label ext_cpuid1, ext_cpuid5, done;
 
     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
@@ -131,13 +131,62 @@
     __ movl(Address(rsi, 8), rcx);
     __ movl(Address(rsi,12), rdx);
 
-    __ cmpl(rax, 3);     // Is cpuid(0x4) supported?
-    __ jccb(Assembler::belowEqual, std_cpuid1);
+    __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
+    __ jccb(Assembler::belowEqual, std_cpuid4);
+
+    //
+    // cpuid(0xB) Processor Topology
+    //
+    __ movl(rax, 0xb);
+    __ xorl(rcx, rcx);   // Threads level
+    __ cpuid();
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 1);     // Cores level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
+
+    __ movl(rax, 0xb);
+    __ movl(rcx, 2);     // Packages level
+    __ cpuid();
+    __ push(rax);
+    __ andl(rax, 0x1f);  // Determine if valid topology level
+    __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
+    __ andl(rax, 0xffff);
+    __ pop(rax);
+    __ jccb(Assembler::equal, std_cpuid4);
+
+    __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
+    __ movl(Address(rsi, 0), rax);
+    __ movl(Address(rsi, 4), rbx);
+    __ movl(Address(rsi, 8), rcx);
+    __ movl(Address(rsi,12), rdx);
 
     //
     // cpuid(0x4) Deterministic cache params
     //
+    __ bind(std_cpuid4);
     __ movl(rax, 4);
+    __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
+    __ jccb(Assembler::greater, std_cpuid1);
+
     __ xorl(rcx, rcx);   // L1 cache
     __ cpuid();
     __ push(rax);
@@ -460,13 +509,18 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( AllocatePrefetchStyle == 2 && is_intel() &&
-      cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core
+  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
+    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
 #ifdef _LP64
-    AllocatePrefetchDistance = 384;
+      AllocatePrefetchDistance = 384;
 #else
-    AllocatePrefetchDistance = 320;
+      AllocatePrefetchDistance = 320;
 #endif
+    }
+    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+      AllocatePrefetchDistance = 192;
+      AllocatePrefetchLines = 4;
+    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,14 @@
     } bits;
   };
 
+  union TplCpuidBEbx {
+    uint32_t value;
+    struct {
+      uint32_t logical_cpus : 16,
+                            : 16;
+    } bits;
+  };
+
   union ExtCpuid1Ecx {
     uint32_t value;
     struct {
@@ -211,6 +219,25 @@
     uint32_t     dcp_cpuid4_ecx; // unused currently
     uint32_t     dcp_cpuid4_edx; // unused currently
 
+    // cpuid function 0xB (processor topology)
+    // ecx = 0
+    uint32_t     tpl_cpuidB0_eax;
+    TplCpuidBEbx tpl_cpuidB0_ebx;
+    uint32_t     tpl_cpuidB0_ecx; // unused currently
+    uint32_t     tpl_cpuidB0_edx; // unused currently
+
+    // ecx = 1
+    uint32_t     tpl_cpuidB1_eax;
+    TplCpuidBEbx tpl_cpuidB1_ebx;
+    uint32_t     tpl_cpuidB1_ecx; // unused currently
+    uint32_t     tpl_cpuidB1_edx; // unused currently
+
+    // ecx = 2
+    uint32_t     tpl_cpuidB2_eax;
+    TplCpuidBEbx tpl_cpuidB2_ebx;
+    uint32_t     tpl_cpuidB2_ecx; // unused currently
+    uint32_t     tpl_cpuidB2_edx; // unused currently
+
     // cpuid function 0x80000000 // example, unused
     uint32_t ext_max_function;
     uint32_t ext_vendor_name_0;
@@ -316,6 +343,9 @@
   static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
   static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
   static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
+  static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
+  static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
+  static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
 
   // Initialization
   static void initialize();
@@ -349,7 +379,12 @@
   static uint cores_per_cpu()  {
     uint result = 1;
     if (is_intel()) {
-      result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      if (_cpuid_info.std_max_function >= 0xB) {
+        result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
+                 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+      } else {
+        result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
+      }
     } else if (is_amd()) {
       result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
     }
@@ -358,7 +393,9 @@
 
   static uint threads_per_core()  {
     uint result = 1;
-    if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
+    if (is_intel() && _cpuid_info.std_max_function >= 0xB) {
+      result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
+    } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
       result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
                cores_per_cpu();
     }
--- a/src/share/vm/opto/cfgnode.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -472,9 +472,7 @@
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
             // Break dead loop data path.
             // Eagerly replace phis with top to avoid phis copies generation.
-            igvn->add_users_to_worklist(n);
-            igvn->hash_delete(n); // Yank from hash before hacking edges
-            igvn->subsume_node(n, top);
+            igvn->replace_node(n, top);
             if( max != outcnt() ) {
               progress = true;
               j = refresh_out_pos(j);
@@ -518,18 +516,17 @@
         igvn->hash_delete(n); // Remove from worklist before modifying edges
         if( n->is_Phi() ) {   // Collapse all Phis
           // Eagerly replace phis to avoid copies generation.
-          igvn->add_users_to_worklist(n);
-          igvn->hash_delete(n); // Yank from hash before hacking edges
+          Node* in;
           if( cnt == 0 ) {
             assert( n->req() == 1, "No data inputs expected" );
-            igvn->subsume_node(n, parent_ctrl); // replaced by top
+            in = parent_ctrl; // replaced by top
           } else {
             assert( n->req() == 2 &&  n->in(1) != NULL, "Only one data input expected" );
-            Node* in1 = n->in(1);               // replaced by unique input
-            if( n->as_Phi()->is_unsafe_data_reference(in1) )
-              in1 = phase->C->top();            // replaced by top
-            igvn->subsume_node(n, in1);
+            in = n->in(1);               // replaced by unique input
+            if( n->as_Phi()->is_unsafe_data_reference(in) )
+              in = phase->C->top();      // replaced by top
           }
+          igvn->replace_node(n, in);
         }
         else if( n->is_Region() ) { // Update all incoming edges
           assert( !igvn->eqv(n, this), "Must be removed from DefUse edges");
@@ -2127,7 +2124,7 @@
     // if it's not there, there's nothing to do.
     Node* fallthru = proj_out(0);
     if (fallthru != NULL) {
-      phase->is_IterGVN()->subsume_node(fallthru, in(0));
+      phase->is_IterGVN()->replace_node(fallthru, in(0));
     }
     return phase->C->top();
   }
--- a/src/share/vm/opto/ifnode.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/ifnode.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1081,11 +1081,9 @@
 
   igvn->register_new_node_with_optimizer(new_if_f);
   igvn->register_new_node_with_optimizer(new_if_t);
-  igvn->hash_delete(old_if_f);
-  igvn->hash_delete(old_if_t);
   // Flip test, so flip trailing control
-  igvn->subsume_node(old_if_f, new_if_t);
-  igvn->subsume_node(old_if_t, new_if_f);
+  igvn->replace_node(old_if_f, new_if_t);
+  igvn->replace_node(old_if_t, new_if_f);
 
   // Progress
   return iff;
--- a/src/share/vm/opto/loopTransform.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -194,8 +194,7 @@
     addx = new (phase->C, 3) AddINode(x, inv);
   }
   phase->register_new_node(addx, phase->get_ctrl(x));
-  phase->_igvn.hash_delete(n1);
-  phase->_igvn.subsume_node(n1, addx);
+  phase->_igvn.replace_node(n1, addx);
   return addx;
 }
 
@@ -1586,8 +1585,7 @@
   Node *phi = cl->phi();
   Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
   phase->register_new_node(final,cl->in(LoopNode::EntryControl));
-  phase->_igvn.hash_delete(phi);
-  phase->_igvn.subsume_node(phi,final);
+  phase->_igvn.replace_node(phi,final);
   phase->C->set_major_progress();
   return true;
 }
--- a/src/share/vm/opto/loopnode.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/loopnode.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -400,7 +400,7 @@
     nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl));
     nphi = _igvn.register_new_node_with_optimizer(nphi);
     set_ctrl(nphi, get_ctrl(phi));
-    _igvn.subsume_node(phi, nphi);
+    _igvn.replace_node(phi, nphi);
     phi = nphi->as_Phi();
   }
   cmp = cmp->clone();
@@ -760,7 +760,7 @@
       // which in turn prevents removing an empty loop.
       Node *id_old_phi = old_phi->Identity( &igvn );
       if( id_old_phi != old_phi ) { // Found a simple identity?
-        // Note that I cannot call 'subsume_node' here, because
+        // Note that I cannot call 'replace_node' here, because
         // that will yank the edge from old_phi to the Region and
         // I'm mid-iteration over the Region's uses.
         for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
@@ -1065,11 +1065,9 @@
     l = igvn.register_new_node_with_optimizer(l, _head);
     phase->set_created_loop_node();
     // Go ahead and replace _head
-    phase->_igvn.subsume_node( _head, l );
+    phase->_igvn.replace_node( _head, l );
     _head = l;
     phase->set_loop(_head, this);
-    for (DUIterator_Fast imax, i = l->fast_outs(imax); i < imax; i++)
-      phase->_igvn.add_users_to_worklist(l->fast_out(i));
   }
 
   // Now recursively beautify nested loops
@@ -1329,8 +1327,7 @@
         Node* add  = new (C, 3) AddINode(ratio_idx, diff);
         phase->_igvn.register_new_node_with_optimizer(add);
         phase->set_ctrl(add, cl);
-        phase->_igvn.hash_delete( phi2 );
-        phase->_igvn.subsume_node( phi2, add );
+        phase->_igvn.replace_node( phi2, add );
         // Sometimes an induction variable is unused
         if (add->outcnt() == 0) {
           phase->_igvn.remove_dead_node(add);
--- a/src/share/vm/opto/loopnode.hpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Tue Jun 29 16:09:57 2010 -0700
@@ -626,8 +626,7 @@
     _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) );
   }
   void lazy_replace( Node *old_node, Node *new_node ) {
-    _igvn.hash_delete(old_node);
-    _igvn.subsume_node( old_node, new_node );
+    _igvn.replace_node( old_node, new_node );
     lazy_update( old_node, new_node );
   }
   void lazy_replace_proj( Node *old_node, Node *new_node ) {
--- a/src/share/vm/opto/loopopts.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/loopopts.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -354,8 +354,7 @@
     register_new_node( var_scale, n_ctrl );
     Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
     register_new_node( var_add, n_ctrl );
-    _igvn.hash_delete( n );
-    _igvn.subsume_node( n, var_add );
+    _igvn.replace_node( n, var_add );
     return var_add;
   }
 
@@ -390,8 +389,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, n->in(2)->in(3) );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -412,8 +410,7 @@
           register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) );
           Node *add2 = new (C, 4) AddPNode( n->in(1), add1, V );
           register_new_node( add2, n_ctrl );
-          _igvn.hash_delete( n );
-          _igvn.subsume_node( n, add2 );
+          _igvn.replace_node( n, add2 );
           return add2;
         }
       }
@@ -555,8 +552,7 @@
     }
     Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) );
     register_new_node( cmov, cmov_ctrl );
-    _igvn.hash_delete(phi);
-    _igvn.subsume_node( phi, cmov );
+    _igvn.replace_node( phi, cmov );
 #ifndef PRODUCT
     if( VerifyLoopOptimizations ) verify();
 #endif
@@ -642,8 +638,7 @@
 
   // Found a Phi to split thru!
   // Replace 'n' with the new phi
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
   // Moved a load around the loop, 'en-registering' something.
   if( n_blk->Opcode() == Op_Loop && n->is_Load() &&
       !phi->in(LoopNode::LoopBackControl)->is_Load() )
@@ -789,13 +784,11 @@
 
     // Found a Phi to split thru!
     // Replace 'n' with the new phi
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, phi );
+    _igvn.replace_node( n, phi );
 
     // Now split the bool up thru the phi
     Node *bolphi = split_thru_phi( bol, n_ctrl, -1 );
-    _igvn.hash_delete(bol);
-    _igvn.subsume_node( bol, bolphi );
+    _igvn.replace_node( bol, bolphi );
     assert( iff->in(1) == bolphi, "" );
     if( bolphi->Value(&_igvn)->singleton() )
       return;
@@ -803,8 +796,7 @@
     // Conditional-move?  Must split up now
     if( !iff->is_If() ) {
       Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 );
-      _igvn.hash_delete(iff);
-      _igvn.subsume_node( iff, cmovphi );
+      _igvn.replace_node( iff, cmovphi );
       return;
     }
 
@@ -950,9 +942,7 @@
   if( n_op == Op_Opaque2 &&
       n->in(1) != NULL &&
       get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
-    _igvn.add_users_to_worklist(n);
-    _igvn.hash_delete(n);
-    _igvn.subsume_node( n, n->in(1) );
+    _igvn.replace_node( n, n->in(1) );
   }
 }
 
@@ -1425,7 +1415,7 @@
           // IGVN does CSE).
           Node *hit = _igvn.hash_find_insert(use);
           if( hit )             // Go ahead and re-hash for hits.
-            _igvn.subsume_node( use, hit );
+            _igvn.replace_node( use, hit );
         }
 
         // If 'use' was in the loop-exit block, it now needs to be sunk
--- a/src/share/vm/opto/macro.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/macro.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -135,8 +135,7 @@
   if (parm1 != NULL)  call->init_req(TypeFunc::Parms+1, parm1);
   copy_call_debug_info(oldcall, call);
   call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
-  _igvn.hash_delete(oldcall);
-  _igvn.subsume_node(oldcall, call);
+  _igvn.replace_node(oldcall, call);
   transform_later(call);
 
   return call;
@@ -523,8 +522,7 @@
         // Kill all new Phis
         while(value_phis.is_nonempty()) {
           Node* n = value_phis.node();
-          _igvn.hash_delete(n);
-          _igvn.subsume_node(n, C->top());
+          _igvn.replace_node(n, C->top());
           value_phis.pop();
         }
       }
@@ -1311,8 +1309,7 @@
   if (!always_slow) {
     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
   }
-  _igvn.hash_delete(alloc);
-  _igvn.subsume_node(alloc, call);
+  _igvn.replace_node(alloc, call);
   transform_later(call);
 
   // Identify the output projections from the allocate node and
--- a/src/share/vm/opto/phaseX.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/phaseX.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1447,16 +1447,12 @@
           Node* m = n->out(i);
           if( m->is_Phi() ) {
             assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
-            add_users_to_worklist(m);
-            hash_delete(m); // Yank from hash before hacking edges
-            subsume_node(m, nn);
+            replace_node(m, nn);
             --i; // deleted this phi; rescan starting with next position
           }
         }
       }
-      add_users_to_worklist(n); // Users of about-to-be-constant 'n'
-      hash_delete(n);           // Removed 'n' from table before subsuming it
-      subsume_node(n,nn);       // Update DefUse edges for new constant
+      replace_node(n,nn);       // Update DefUse edges for new constant
     }
     return nn;
   }
--- a/src/share/vm/opto/phaseX.hpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/phaseX.hpp	Tue Jun 29 16:09:57 2010 -0700
@@ -393,6 +393,10 @@
 
   // Idealize old Node 'n' with respect to its inputs and its value
   virtual Node *transform_old( Node *a_node );
+
+  // Subsume users of node 'old' into node 'nn'
+  void subsume_node( Node *old, Node *nn );
+
 protected:
 
   // Idealize new Node 'n' with respect to its inputs and its value
@@ -439,10 +443,6 @@
     remove_globally_dead_node(dead);
   }
 
-  // Subsume users of node 'old' into node 'nn'
-  // If no Def-Use info existed for 'nn' it will after call.
-  void subsume_node( Node *old, Node *nn );
-
   // Add users of 'n' to worklist
   void add_users_to_worklist0( Node *n );
   void add_users_to_worklist ( Node *n );
@@ -450,7 +450,7 @@
   // Replace old node with new one.
   void replace_node( Node *old, Node *nn ) {
     add_users_to_worklist(old);
-    hash_delete(old);
+    hash_delete(old); // Yank from hash before hacking edges
     subsume_node(old, nn);
   }
 
--- a/src/share/vm/opto/split_if.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/split_if.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -217,8 +217,7 @@
   register_new_node(phi, blk1);
 
   // Remove cloned-up value from optimizer; use phi instead
-  _igvn.hash_delete(n);
-  _igvn.subsume_node( n, phi );
+  _igvn.replace_node( n, phi );
 
   // (There used to be a self-recursive call to split_up() here,
   // but it is not needed.  All necessary forward walking is done
@@ -352,8 +351,7 @@
   }
 
   if (use_blk == NULL) {        // He's dead, Jim
-    _igvn.hash_delete(use);
-    _igvn.subsume_node(use, C->top());
+    _igvn.replace_node(use, C->top());
   }
 
   return use_blk;
--- a/src/share/vm/opto/superword.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/superword.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1172,8 +1172,7 @@
       _phase->set_ctrl(vn, _phase->get_ctrl(p->at(0)));
       for (uint j = 0; j < p->size(); j++) {
         Node* pm = p->at(j);
-        _igvn.hash_delete(pm);
-        _igvn.subsume_node(pm, vn);
+        _igvn.replace_node(pm, vn);
       }
       _igvn._worklist.push(vn);
     }
--- a/src/share/vm/opto/type.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/opto/type.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -182,6 +182,8 @@
   return t->hash();
 }
 
+#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
+
 //--------------------------Initialize_shared----------------------------------
 void Type::Initialize_shared(Compile* current) {
   // This method does not need to be locked because the first system
@@ -240,6 +242,7 @@
   assert( TypeInt::CC_GT == TypeInt::ONE,     "types must match for CmpL to work" );
   assert( TypeInt::CC_EQ == TypeInt::ZERO,    "types must match for CmpL to work" );
   assert( TypeInt::CC_GE == TypeInt::BOOL,    "types must match for CmpL to work" );
+  assert( (juint)(TypeInt::CC->_hi - TypeInt::CC->_lo) <= SMALLINT, "CC is truly small");
 
   TypeLong::MINUS_1 = TypeLong::make(-1);        // -1
   TypeLong::ZERO    = TypeLong::make( 0);        //  0
@@ -1054,16 +1057,21 @@
   return (TypeInt*)(new TypeInt(lo,lo,WidenMin))->hashcons();
 }
 
-#define SMALLINT ((juint)3)  // a value too insignificant to consider widening
-
-const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+static int normalize_int_widen( jint lo, jint hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
   // The 'SMALLINT' covers constants and also CC and its relatives.
-  assert(CC == NULL || (juint)(CC->_hi - CC->_lo) <= SMALLINT, "CC is truly small");
   if (lo <= hi) {
-    if ((juint)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
-    if ((juint)(hi - lo) >= max_juint)  w = Type::WidenMax; // plain int
+    if ((juint)(hi - lo) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT
+  } else {
+    if ((juint)(lo - hi) <= SMALLINT)  w = Type::WidenMin;
+    if ((juint)(lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT
   }
+  return w;
+}
+
+const TypeInt *TypeInt::make( jint lo, jint hi, int w ) {
+  w = normalize_int_widen(lo, hi, w);
   return (TypeInt*)(new TypeInt(lo,hi,w))->hashcons();
 }
 
@@ -1103,14 +1111,14 @@
 
   // Expand covered set
   const TypeInt *r = t->is_int();
-  // (Avoid TypeInt::make, to avoid the argument normalizations it enforces.)
-  return (new TypeInt( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeInt::xdual() const {
-  return new TypeInt(_hi,_lo,WidenMax-_widen);
+  int w = normalize_int_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeInt(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1202,7 +1210,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeInt::filter( const Type *kills ) const {
   const TypeInt* ft = join(kills)->isa_int();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
@@ -1304,13 +1312,21 @@
   return (TypeLong*)(new TypeLong(lo,lo,WidenMin))->hashcons();
 }
 
-const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+static int normalize_long_widen( jlong lo, jlong hi, int w ) {
   // Certain normalizations keep us sane when comparing types.
-  // The '1' covers constants.
+  // The 'SMALLINT' covers constants.
   if (lo <= hi) {
-    if ((julong)(hi - lo) <= SMALLINT)    w = Type::WidenMin;
-    if ((julong)(hi - lo) >= max_julong)  w = Type::WidenMax; // plain long
+    if ((julong)(hi - lo) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG
+  } else {
+    if ((julong)(lo - hi) <= SMALLINT)   w = Type::WidenMin;
+    if ((julong)(lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG
   }
+  return w;
+}
+
+const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) {
+  w = normalize_long_widen(lo, hi, w);
   return (TypeLong*)(new TypeLong(lo,hi,w))->hashcons();
 }
 
@@ -1351,14 +1367,14 @@
 
   // Expand covered set
   const TypeLong *r = t->is_long(); // Turn into a TypeLong
-  // (Avoid TypeLong::make, to avoid the argument normalizations it enforces.)
-  return (new TypeLong( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons();
+  return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) );
 }
 
 //------------------------------xdual------------------------------------------
 // Dual: reverse hi & lo; flip widen
 const Type *TypeLong::xdual() const {
-  return new TypeLong(_hi,_lo,WidenMax-_widen);
+  int w = normalize_long_widen(_hi,_lo, WidenMax-_widen);
+  return new TypeLong(_hi,_lo,w);
 }
 
 //------------------------------widen------------------------------------------
@@ -1453,7 +1469,7 @@
 //-----------------------------filter------------------------------------------
 const Type *TypeLong::filter( const Type *kills ) const {
   const TypeLong* ft = join(kills)->isa_long();
-  if (ft == NULL || ft->_lo > ft->_hi)
+  if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
     // Do not allow the value of kill->_widen to affect the outcome.
--- a/src/share/vm/runtime/arguments.cpp	Sat Jun 26 00:19:55 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Tue Jun 29 16:09:57 2010 -0700
@@ -1508,6 +1508,9 @@
   if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) {
     FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500);
   }
+  if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeStringConcat)) {
+    FLAG_SET_DEFAULT(OptimizeStringConcat, true);
+  }
 #endif
 
   if (AggressiveOpts) {