changeset 2550:63d3fb179034

Merge
author lana
date Thu, 02 Jun 2011 13:37:40 -0700
parents 472fc37e14a9 (diff) 1aa57c62d0e4 (current diff)
children 82a81d5c5700 a983caeb2b3e
files
diffstat 71 files changed, 1295 insertions(+), 403 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri May 27 15:01:07 2011 -0700
+++ b/.hgtags	Thu Jun 02 13:37:40 2011 -0700
@@ -172,4 +172,6 @@
 3aea9e9feb073f5500e031be6186666bcae89aa2 hs21-b11
 9ad1548c6b63d596c411afc35147ffd5254426d9 jdk7-b142
 9ad1548c6b63d596c411afc35147ffd5254426d9 hs21-b12
+c149193c768b8b7233da4c3a3fdc0756b975848e hs21-b13
 c149193c768b8b7233da4c3a3fdc0756b975848e jdk7-b143
+fe189d4a44e9e8f0c7d78fcbd1c63701745752ca jdk7-b144
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri May 27 15:01:07 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Jun 02 13:37:40 2011 -0700
@@ -1028,7 +1028,12 @@
                                     if (AddressOps.equal(val, value)) {
                                         if (!printed) {
                                             printed = true;
-                                            blob.printOn(out);
+                                            try {
+                                                blob.printOn(out);
+                                            } catch (Exception e) {
+                                                out.println("Exception printing blob at " + base);
+                                                e.printStackTrace();
+                                            }
                                         }
                                         out.println("found at " + base + "\n");
                                     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/AdapterBlob.java	Thu Jun 02 13:37:40 2011 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class AdapterBlob extends CodeBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("AdapterBlob");
+
+    // // FIXME: add any needed fields
+  }
+
+  public AdapterBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isAdapterBlob() {
+    return true;
+  }
+
+  public String getName() {
+    return "AdapterBlob: " + super.getName();
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Fri May 27 15:01:07 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,8 @@
   public boolean isUncommonTrapStub()   { return false; }
   public boolean isExceptionStub()      { return false; }
   public boolean isSafepointStub()      { return false; }
+  public boolean isRicochetBlob()       { return false; }
+  public boolean isAdapterBlob()        { return false; }
 
   // Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
   public boolean isJavaMethod()         { return false; }
--- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Fri May 27 15:01:07 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeCache.java	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,6 +57,8 @@
     virtualConstructor.addMapping("BufferBlob", BufferBlob.class);
     virtualConstructor.addMapping("nmethod", NMethod.class);
     virtualConstructor.addMapping("RuntimeStub", RuntimeStub.class);
+    virtualConstructor.addMapping("RicochetBlob", RicochetBlob.class);
+    virtualConstructor.addMapping("AdapterBlob", AdapterBlob.class);
     virtualConstructor.addMapping("SafepointBlob", SafepointBlob.class);
     virtualConstructor.addMapping("DeoptimizationBlob", DeoptimizationBlob.class);
     if (VM.getVM().isServerCompiler()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/RicochetBlob.java	Thu Jun 02 13:37:40 2011 -0700
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.code;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+/** RicochetBlob (currently only used by Compiler 2) */
+
+public class RicochetBlob extends SingletonBlob {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static void initialize(TypeDataBase db) {
+    // Type type = db.lookupType("RicochetBlob");
+
+    // FIXME: add any needed fields
+  }
+
+  public RicochetBlob(Address addr) {
+    super(addr);
+  }
+
+  public boolean isRicochetBlob() {
+    return true;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java	Fri May 27 15:01:07 2011 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ServiceThread.java	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/make/hotspot_version	Fri May 27 15:01:07 2011 -0700
+++ b/make/hotspot_version	Thu Jun 02 13:37:40 2011 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=21
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=13
+HS_BUILD_NUMBER=15
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.gmk	Fri May 27 15:01:07 2011 -0700
+++ b/make/jprt.gmk	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,24 @@
   ZIPFLAGS=-q -y
 endif
 
+jprt_build_productEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
+
+jprt_build_debugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
+
+jprt_build_fastdebugEmb:
+	$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
+
+jprt_build_productOpen:
+	$(MAKE) OPENJDK=true jprt_build_product
+
+jprt_build_debugOpen:
+	$(MAKE) OPENJDK=true jprt_build_debug
+
+jprt_build_fastdebugOpen:
+	$(MAKE) OPENJDK=true jprt_build_fastdebug
+
 jprt_build_product: all_product copy_product_jdk export_product_jdk
 	( $(CD) $(JDK_IMAGE_DIR) && \
 	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
--- a/make/jprt.properties	Fri May 27 15:01:07 2011 -0700
+++ b/make/jprt.properties	Thu Jun 02 13:37:40 2011 -0700
@@ -202,16 +202,21 @@
     ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     ${jprt.my.windows.x64}-{product|fastdebug|debug}
 
+jprt.build.targets.open= \
+    ${jprt.my.solaris.i586}-{productOpen}, \
+    ${jprt.my.solaris.x64}-{debugOpen}, \
+    ${jprt.my.linux.x64}-{productOpen}
+
 jprt.build.targets.embedded= \
-    ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
-    ${jprt.my.linux.ppc}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcv2}-{product|fastdebug}, \
-    ${jprt.my.linux.ppcsflt}-{product|fastdebug}, \
-    ${jprt.my.linux.armvfp}-{product|fastdebug}, \
-    ${jprt.my.linux.armsflt}-{product|fastdebug}
+    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
+    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \
+    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
 
 jprt.build.targets.all=${jprt.build.targets.standard}, \
-    ${jprt.build.targets.embedded}
+    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
 
 jprt.build.targets.jdk7=${jprt.build.targets.all}
 jprt.build.targets.jdk7temp=${jprt.build.targets.all}
@@ -453,6 +458,12 @@
     ${jprt.my.windows.x64}-product-c2-jbb_G1, \
     ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
 
+# Some basic "smoke" tests for OpenJDK builds
+jprt.test.targets.open = \
+    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
+    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
+
 # Testing for actual embedded builds is different to standard
 jprt.my.linux.i586.test.targets.embedded = \
     linux_i586_2.6-product-c1-scimark
@@ -461,6 +472,7 @@
 # Note: no PPC or ARM tests at this stage
 
 jprt.test.targets.standard = \
+  ${jprt.my.linux.i586.test.targets.embedded}, \
   ${jprt.my.solaris.sparc.test.targets}, \
   ${jprt.my.solaris.sparcv9.test.targets}, \
   ${jprt.my.solaris.i586.test.targets}, \
@@ -468,7 +480,8 @@
   ${jprt.my.linux.i586.test.targets}, \
   ${jprt.my.linux.x64.test.targets}, \
   ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}
+  ${jprt.my.windows.x64.test.targets}, \
+  ${jprt.test.targets.open}
 
 jprt.test.targets.embedded= 		\
   ${jprt.my.linux.i586.test.targets.embedded}, \
--- a/make/linux/README	Fri May 27 15:01:07 2011 -0700
+++ b/make/linux/README	Thu Jun 02 13:37:40 2011 -0700
@@ -1,4 +1,4 @@
-Copyright (c) 2007 Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   
 This code is free software; you can redistribute it and/or modify it
--- a/make/windows/projectfiles/kernel/Makefile	Fri May 27 15:01:07 2011 -0700
+++ b/make/windows/projectfiles/kernel/Makefile	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #   
 # This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -47,7 +47,7 @@
 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
   // of 128-bits operands for SSE instructions.
-  jlong *operand = (jlong*)(((long)adr)&((long)(~0xF)));
+  jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
   // Store the value to a 128-bits operand.
   operand[0] = lo;
   operand[1] = hi;
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -389,7 +389,7 @@
   }
 }
 
-#ifndef PRODUCT
+#ifdef ASSERT
 void MethodHandles::RicochetFrame::verify_offsets() {
   // Check compatibility of this struct with the more generally used offsets of class frame:
   int ebp_off = sender_link_offset_in_bytes();  // offset from struct base to local rbp value
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates.  All Rights Reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates.  All Rights Reserved.
+ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Fri May 27 15:01:07 2011 -0700
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 !!
-!! Copyright (c) 2005, 2008 Oracle and/or its affiliates. All rights reserved.
+!! Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
 !! DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 !!
 !! This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/hsdis/README	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/tools/hsdis/README	Thu Jun 02 13:37:40 2011 -0700
@@ -1,4 +1,4 @@
-Copyright (c) 2008 Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   
 This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciMethodHandle.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/ci/ciMethodHandle.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -43,7 +43,7 @@
   methodHandle callee(_callee->get_methodOop());
   // We catch all exceptions here that could happen in the method
   // handle compiler and stop the VM.
-  MethodHandleCompiler mhc(h, callee, _profile->count(), is_invokedynamic, THREAD);
+  MethodHandleCompiler mhc(h, callee->name(), callee->signature(), _profile->count(), is_invokedynamic, THREAD);
   if (!HAS_PENDING_EXCEPTION) {
     methodHandle m = mhc.compile(THREAD);
     if (!HAS_PENDING_EXCEPTION) {
--- a/src/share/vm/ci/ciObject.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/ci/ciObject.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -187,7 +187,7 @@
 // ciObject::can_be_constant
 bool ciObject::can_be_constant() {
   if (ScavengeRootsInCode >= 1)  return true;  // now everybody can encode as a constant
-  return handle() == NULL || !is_scavengable();
+  return handle() == NULL || is_perm();
 }
 
 // ------------------------------------------------------------------
@@ -204,7 +204,7 @@
       return true;
     }
   }
-  return handle() == NULL || !is_scavengable();
+  return handle() == NULL || is_perm();
 }
 
 
--- a/src/share/vm/ci/ciObject.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/ci/ciObject.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -108,7 +108,7 @@
   int hash();
 
   // Tells if this oop has an encoding as a constant.
-  // True if is_scavengable is false.
+  // True if is_perm is true.
   // Also true if ScavengeRootsInCode is non-zero.
   // If it does not have an encoding, the compiler is responsible for
   // making other arrangements for dealing with the oop.
@@ -116,7 +116,7 @@
   bool can_be_constant();
 
   // Tells if this oop should be made a constant.
-  // True if is_scavengable is false or ScavengeRootsInCode > 1.
+  // True if is_perm is true or ScavengeRootsInCode > 1.
   bool should_be_constant();
 
   // Is this object guaranteed to be in the permanent part of the heap?
--- a/src/share/vm/code/nmethod.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/code/nmethod.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1810,7 +1810,7 @@
   void maybe_print(oop* p) {
     if (_print_nm == NULL)  return;
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
                   (intptr_t)(*p), (intptr_t)p);
     (*p)->print();
@@ -2311,7 +2311,7 @@
       _nm->print_nmethod(true);
       _ok = false;
     }
-    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
@@ -2324,7 +2324,7 @@
     DebugScavengeRoot debug_scavenge_root(this);
     oops_do(&debug_scavenge_root);
     if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad non-perm oop in the code cache");
+      fatal("found an unadvertised bad scavengable oop in the code cache");
   }
   assert(scavenge_root_not_marked(), "");
 }
--- a/src/share/vm/code/nmethod.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/code/nmethod.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -109,7 +109,7 @@
 class nmethod : public CodeBlob {
   friend class VMStructs;
   friend class NMethodSweeper;
-  friend class CodeCache;  // non-perm oops
+  friend class CodeCache;  // scavengable oops
  private:
   // Shared fields for all nmethod's
   methodOop _method;
@@ -466,17 +466,17 @@
   bool is_at_poll_return(address pc);
   bool is_at_poll_or_poll_return(address pc);
 
-  // Non-perm oop support
+  // Scavengable oop support
   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
  protected:
-  enum { npl_on_list = 0x01, npl_marked = 0x10 };
-  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
+  enum { sl_on_list = 0x01, sl_marked = 0x10 };
+  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 #ifndef PRODUCT
-  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
-  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
-  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
+  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
+  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
+  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 #endif //PRODUCT
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -407,6 +407,11 @@
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
+                             "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
+                             _sweep_limit, bottom(), end());
+    }
   }
   NOT_PRODUCT(
     void clear_sweep_limit() { _sweep_limit = NULL; }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -7888,60 +7888,64 @@
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("\n====================\nStarting new sweep\n");
-  }
-}
-
-// We need this destructor to reclaim any space at the end
-// of the space, which do_blk below may not yet have added back to
-// the free lists.
+    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
+                        _limit);
+  }
+}
+
+void SweepClosure::print_on(outputStream* st) const {
+  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                _sp->bottom(), _sp->end());
+  tty->print_cr("_limit = " PTR_FORMAT, _limit);
+  tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
+  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
+  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
+                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
+}
+
+#ifndef PRODUCT
+// Assertion checking only:  no useful work in product mode --
+// however, if any of the flags below become product flags,
+// you may need to review this code to see if it needs to be
+// enabled in product mode.
 SweepClosure::~SweepClosure() {
   assert_lock_strong(_freelistLock);
   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
          "sweep _limit out of bounds");
-  // Flush any remaining coterminal free run as a single
-  // coalesced chunk to the appropriate free list.
   if (inFreeRange()) {
-    assert(freeFinger() < _limit, "freeFinger points too high");
-    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
-    if (CMSTraceSweeper) {
-      gclog_or_tty->print("Sweep: last chunk: ");
-      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
-                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
-    }
-  } // else nothing to flush
-  NOT_PRODUCT(
-    if (Verbose && PrintGC) {
-      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
-                          SIZE_FORMAT " bytes",
-                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
-                             SIZE_FORMAT" bytes  "
-        "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
-        _numObjectsLive, _numWordsLive*sizeof(HeapWord),
-        _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
-        sizeof(HeapWord);
-      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
-
-      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
-        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
-        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
-        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
-        gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
-          indexListReturnedBytes);
-        gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
-          dictReturnedBytes);
-      }
-    }
-  )
-  // Now, in debug mode, just null out the sweep_limit
-  NOT_PRODUCT(_sp->clear_sweep_limit();)
+    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
+    print();
+    ShouldNotReachHere();
+  }
+  if (Verbose && PrintGC) {
+    gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
+                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
+    gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
+                           SIZE_FORMAT" bytes  "
+      "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
+      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
+      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
+    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
+                        * sizeof(HeapWord);
+    gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
+
+    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
+      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
+      size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
+      size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
+      gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
+      gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
+        indexListReturnedBytes);
+      gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
+        dictReturnedBytes);
+    }
+  }
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("end of sweep\n================\n");
-  }
-}
+    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
+                           _limit);
+  }
+}
+#endif  // PRODUCT
 
 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
     bool freeRangeInFreeLists) {
@@ -8001,15 +8005,17 @@
   // we started the sweep, it may no longer be one because heap expansion
   // may have caused us to coalesce the block ending at the address _limit
   // with a newly expanded chunk (this happens when _limit was set to the
-  // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
+  // previous _end of the space), so we may have stepped past _limit:
+  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
   if (addr >= _limit) { // we have swept up to or past the limit: finish up
     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
            "sweep _limit out of bounds");
     assert(addr < _sp->end(), "addr out of bounds");
-    // Flush any remaining coterminal free run as a single
+    // Flush any free range we might be holding as a single
     // coalesced chunk to the appropriate free list.
     if (inFreeRange()) {
-      assert(freeFinger() < _limit, "finger points too high");
+      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
+             err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
       flush_cur_free_chunk(freeFinger(),
                            pointer_delta(addr, freeFinger()));
       if (CMSTraceSweeper) {
@@ -8033,7 +8039,16 @@
     res = fc->size();
     do_already_free_chunk(fc);
     debug_only(_sp->verifyFreeLists());
-    assert(res == fc->size(), "Don't expect the size to change");
+    // If we flush the chunk at hand in lookahead_and_flush()
+    // and it's coalesced with a preceding chunk, then the
+    // process of "mangling" the payload of the coalesced block
+    // will cause erasure of the size information from the
+    // (erstwhile) header of all the coalesced blocks but the
+    // first, so the first disjunct in the assert will not hold
+    // in that specific case (in which case the second disjunct
+    // will hold).
+    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
+           "Otherwise the size info doesn't change at this step");
     NOT_PRODUCT(
       _numObjectsAlreadyFree++;
       _numWordsAlreadyFree += res;
@@ -8103,7 +8118,7 @@
 //
 
 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
-  size_t size = fc->size();
+  const size_t size = fc->size();
   // Chunks that cannot be coalesced are not in the
   // free lists.
   if (CMSTestInFreeList && !fc->cantCoalesce()) {
@@ -8112,7 +8127,7 @@
   }
   // a chunk that is already free, should not have been
   // marked in the bit map
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const addr = (HeapWord*) fc;
   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
   // Verify that the bit map has no bits marked between
   // addr and purported end of this block.
@@ -8149,7 +8164,7 @@
         }
       } else {
         // the midst of a free range, we are coalescing
-        debug_only(record_free_block_coalesced(fc);)
+        print_free_block_coalesced(fc);
         if (CMSTraceSweeper) {
           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
         }
@@ -8173,6 +8188,10 @@
         }
       }
     }
+    // Note that if the chunk is not coalescable (the else arm
+    // below), we unconditionally flush, without needing to do
+    // a "lookahead," as we do below.
+    if (inFreeRange()) lookahead_and_flush(fc, size);
   } else {
     // Code path common to both original and adaptive free lists.
 
@@ -8191,8 +8210,8 @@
   // This is a chunk of garbage.  It is not in any free list.
   // Add it to a free list or let it possibly be coalesced into
   // a larger chunk.
-  HeapWord* addr = (HeapWord*) fc;
-  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
+  HeapWord* const addr = (HeapWord*) fc;
+  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
 
   if (_sp->adaptive_freelists()) {
     // Verify that the bit map has no bits marked between
@@ -8205,7 +8224,6 @@
       // start of a new free range
       assert(size > 0, "A free range should have a size");
       initialize_free_range(addr, false);
-
     } else {
       // this will be swept up when we hit the end of the
       // free range
@@ -8235,6 +8253,9 @@
     // addr and purported end of just dead object.
     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
   }
+  assert(_limit >= addr + size,
+         "A freshly garbage chunk can't possibly straddle over _limit");
+  if (inFreeRange()) lookahead_and_flush(fc, size);
   return size;
 }
 
@@ -8284,8 +8305,8 @@
            (!_collector->should_unload_classes()
             || oop(addr)->is_parsable()),
            "Should be an initialized object");
-    // Note that there are objects used during class redefinition
-    // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
+    // Note that there are objects used during class redefinition,
+    // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
     // which are discarded with their is_conc_safe state still
     // false.  These object may be floating garbage so may be
     // seen here.  If they are floating garbage their size
@@ -8307,7 +8328,7 @@
                                                  size_t chunkSize) {
   // do_post_free_or_garbage_chunk() should only be called in the case
   // of the adaptive free list allocator.
-  bool fcInFreeLists = fc->isFree();
+  const bool fcInFreeLists = fc->isFree();
   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   assert((HeapWord*)fc <= _limit, "sweep invariant");
   if (CMSTestInFreeList && fcInFreeLists) {
@@ -8318,11 +8339,11 @@
     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
   }
 
-  HeapWord* addr = (HeapWord*) fc;
+  HeapWord* const fc_addr = (HeapWord*) fc;
 
   bool coalesce;
-  size_t left  = pointer_delta(addr, freeFinger());
-  size_t right = chunkSize;
+  const size_t left  = pointer_delta(fc_addr, freeFinger());
+  const size_t right = chunkSize;
   switch (FLSCoalescePolicy) {
     // numeric value forms a coalition aggressiveness metric
     case 0:  { // never coalesce
@@ -8355,15 +8376,15 @@
   // If the chunk is in a free range and either we decided to coalesce above
   // or the chunk is near the large block at the end of the heap
   // (isNearLargestChunk() returns true), then coalesce this chunk.
-  bool doCoalesce = inFreeRange() &&
-    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
+  const bool doCoalesce = inFreeRange()
+                          && (coalesce || _g->isNearLargestChunk(fc_addr));
   if (doCoalesce) {
     // Coalesce the current free range on the left with the new
     // chunk on the right.  If either is on a free list,
     // it must be removed from the list and stashed in the closure.
     if (freeRangeInFreeLists()) {
-      FreeChunk* ffc = (FreeChunk*)freeFinger();
-      assert(ffc->size() == pointer_delta(addr, freeFinger()),
+      FreeChunk* const ffc = (FreeChunk*)freeFinger();
+      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
         "Size of free range is inconsistent with chunk size.");
       if (CMSTestInFreeList) {
         assert(_sp->verifyChunkInFreeLists(ffc),
@@ -8380,13 +8401,14 @@
       _sp->removeFreeChunkFromFreeLists(fc);
     }
     set_lastFreeRangeCoalesced(true);
+    print_free_block_coalesced(fc);
   } else {  // not in a free range and/or should not coalesce
     // Return the current free range and start a new one.
     if (inFreeRange()) {
       // In a free range but cannot coalesce with the right hand chunk.
       // Put the current free range into the free lists.
       flush_cur_free_chunk(freeFinger(),
-                           pointer_delta(addr, freeFinger()));
+                           pointer_delta(fc_addr, freeFinger()));
     }
     // Set up for new free range.  Pass along whether the right hand
     // chunk is in the free lists.
@@ -8394,6 +8416,42 @@
   }
 }
 
+// Lookahead flush:
+// If we are tracking a free range, and this is the last chunk that
+// we'll look at because its end crosses past _limit, we'll preemptively
+// flush it along with any free range we may be holding on to. Note that
+// this can be the case only for an already free or freshly garbage
+// chunk. If this block is an object, it can never straddle
+// over _limit. The "straddling" occurs when _limit is set at
+// the previous end of the space when this cycle started, and
+// a subsequent heap expansion caused the previously co-terminal
+// free block to be coalesced with the newly expanded portion,
+// thus rendering _limit a non-block-boundary making it dangerous
+// for the sweeper to step over and examine.
+void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
+  assert(inFreeRange(), "Should only be called if currently in a free range.");
+  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
+  assert(_sp->used_region().contains(eob - 1),
+         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
+                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
+                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
+  if (eob >= _limit) {
+    assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
+                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
+                             "[" PTR_FORMAT "," PTR_FORMAT ")",
+                             _limit, fc, eob, _sp->bottom(), _sp->end());
+    }
+    // Return the storage we are tracking back into the free lists.
+    if (CMSTraceSweeper) {
+      gclog_or_tty->print_cr("Flushing ... ");
+    }
+    assert(freeFinger() < eob, "Error");
+    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
+  }
+}
+
 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   assert(inFreeRange(), "Should only be called if currently in a free range.");
   assert(size > 0,
@@ -8419,6 +8477,8 @@
     }
     _sp->addChunkAndRepairOffsetTable(chunk, size,
             lastFreeRangeCoalesced());
+  } else if (CMSTraceSweeper) {
+    gclog_or_tty->print_cr("Already in free list: nothing to flush");
   }
   set_inFreeRange(false);
   set_freeRangeInFreeLists(false);
@@ -8477,13 +8537,14 @@
 bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
   return debug_cms_space->verifyChunkInFreeLists(fc);
 }
-
-void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
+#endif
+
+void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
   if (CMSTraceSweeper) {
-    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
-  }
-}
-#endif
+    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
+                           fc, fc->size());
+  }
+}
 
 // CMSIsAliveClosure
 bool CMSIsAliveClosure::do_object_b(oop obj) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1701,9 +1701,9 @@
   CMSCollector*                  _collector;  // collector doing the work
   ConcurrentMarkSweepGeneration* _g;    // Generation being swept
   CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;// the address at which the sweep should stop because
-                                        // we do not expect blocks eligible for sweeping past
-                                        // that address.
+  HeapWord*                      _limit;// the address at or above which the sweep should stop
+                                        // because we do not expect newly garbage blocks
+                                        // eligible for sweeping past that address.
   Mutex*                         _freelistLock; // Free list lock (in space)
   CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                 // generation)
@@ -1750,6 +1750,10 @@
   void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
   // Process a free chunk during sweeping.
   void do_already_free_chunk(FreeChunk *fc);
+  // Work method called when processing an already free or a
+  // freshly garbage chunk to do a lookahead and possibly a
+  // premptive flush if crossing over _limit.
+  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
   // Process a live chunk during sweeping.
@@ -1758,8 +1762,6 @@
   // Accessors.
   HeapWord* freeFinger() const          { return _freeFinger; }
   void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
-  size_t freeRangeSize() const          { return _freeRangeSize; }
-  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
   bool inFreeRange()    const           { return _inFreeRange; }
   void set_inFreeRange(bool v)          { _inFreeRange = v; }
   bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
@@ -1779,14 +1781,16 @@
   void do_yield_work(HeapWord* addr);
 
   // Debugging/Printing
-  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
+  void print_free_block_coalesced(FreeChunk* fc) const;
 
  public:
   SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
                CMSBitMap* bitMap, bool should_yield);
-  ~SweepClosure();
+  ~SweepClosure() PRODUCT_RETURN;
 
   size_t       do_blk_careful(HeapWord* addr);
+  void         print() const { print_on(tty); }
+  void         print_on(outputStream *st) const;
 };
 
 // Closures related to weak references processing
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -3054,6 +3054,28 @@
     _should_gray_objects = true;
 }
 
+// Resets the region fields of active CMTasks whose values point
+// into the collection set.
+void ConcurrentMark::reset_active_task_region_fields_in_cset() {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
+  assert(parallel_marking_threads() <= _max_task_num, "sanity");
+
+  for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
+    CMTask* task = _tasks[i];
+    HeapWord* task_finger = task->finger();
+    if (task_finger != NULL) {
+      assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
+      HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
+      if (finger_region->in_collection_set()) {
+        // The task's current region is in the collection set.
+        // This region will be evacuated in the current GC and
+        // the region fields in the task will be stale.
+        task->giveup_current_region();
+      }
+    }
+  }
+}
+
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -809,10 +809,19 @@
 
   // It indicates that a new collection set is being chosen.
   void newCSet();
+
   // It registers a collection set heap region with CM. This is used
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Resets the region fields of any active CMTask whose region fields
+  // are in the collection set (i.e. the region currently claimed by
+  // the CMTask will be evacuated and may be used, subsequently, as
+  // an alloc region). When this happens the region fields in the CMTask
+  // are stale and, hence, should be cleared causing the worker thread
+  // to claim a new region.
+  void reset_active_task_region_fields_in_cset();
+
   // Registers the maximum region-end associated with a set of
   // regions with CM. Again this is used to determine whether any
   // heap regions are located above the finger.
@@ -1039,9 +1048,6 @@
   void setup_for_region(HeapRegion* hr);
   // it brings up-to-date the limit of the region
   void update_region_limit();
-  // it resets the local fields after a task has finished scanning a
-  // region
-  void giveup_current_region();
 
   // called when either the words scanned or the refs visited limit
   // has been reached
@@ -1094,6 +1100,11 @@
   // exit the termination protocol after it's entered it.
   virtual bool should_exit_termination();
 
+  // Resets the local region fields after a task has finished scanning a
+  // region; or when they have become stale as a result of the region
+  // being evacuated.
+  void giveup_current_region();
+
   HeapWord* finger()            { return _finger; }
 
   bool has_aborted()            { return _has_aborted; }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -428,6 +428,37 @@
   _cmThread->stop();
 }
 
+#ifdef ASSERT
+// A region is added to the collection set as it is retired
+// so an address p can point to a region which will be in the
+// collection set but has not yet been retired.  This method
+// therefore is only accurate during a GC pause after all
+// regions have been retired.  It is used for debugging
+// to check if an nmethod has references to objects that can
+// be move during a partial collection.  Though it can be
+// inaccurate, it is sufficient for G1 because the conservative
+// implementation of is_scavengable() for G1 will indicate that
+// all nmethods must be scanned during a partial collection.
+bool G1CollectedHeap::is_in_partial_collection(const void* p) {
+  HeapRegion* hr = heap_region_containing(p);
+  return hr != NULL && hr->in_collection_set();
+}
+#endif
+
+// Returns true if the reference points to an object that
+// can move in an incremental collecction.
+bool G1CollectedHeap::is_scavengable(const void* p) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  HeapRegion* hr = heap_region_containing(p);
+  if (hr == NULL) {
+     // perm gen (or null)
+     return false;
+  } else {
+    return !hr->isHumongous();
+  }
+}
+
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
@@ -3292,8 +3323,9 @@
       // progress, this will be zero.
       _cm->set_oops_do_bound();
 
-      if (mark_in_progress())
+      if (mark_in_progress()) {
         concurrent_mark()->newCSet();
+      }
 
 #if YOUNG_LIST_VERBOSE
       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
@@ -3303,6 +3335,16 @@
 
       g1_policy()->choose_collection_set(target_pause_time_ms);
 
+      // We have chosen the complete collection set. If marking is
+      // active then, we clear the region fields of any of the
+      // concurrent marking tasks whose region fields point into
+      // the collection set as these values will become stale. This
+      // will cause the owning marking threads to claim a new region
+      // when marking restarts.
+      if (mark_in_progress()) {
+        concurrent_mark()->reset_active_task_region_fields_in_cset();
+      }
+
       // Nothing to do if we were unable to choose a collection set.
 #if G1_REM_SET_LOGGING
       gclog_or_tty->print_cr("\nAfter pause, heap:");
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1254,6 +1254,12 @@
     return hr != NULL && hr->is_young();
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr);
+
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2011 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -339,6 +339,21 @@
   return false;
 }
 
+bool ParallelScavengeHeap::is_scavengable(const void* addr) {
+  return is_in_young((oop)addr);
+}
+
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is perm (low addr), old, young (high addr)
+  return p >= old_gen()->reserved().end();
+}
+#endif
+
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -127,6 +127,12 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
+  // Return true if the reference points to an object that
+  // can be moved in a partial collection.  For currently implemented
+  // generational collectors that means during a collection of
+  // the young gen.
+  virtual bool is_scavengable(const void* addr);
+
   // Does this heap support heap inspection? (+PrintClassHistogram)
   bool supports_heap_inspection() const { return true; }
 
@@ -143,6 +149,10 @@
     return perm_gen()->reserved().contains(p);
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void *p);
+#endif
+
   bool is_permanent(const void *p) const {    // committed part
     return perm_gen()->is_in(p);
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -51,7 +51,12 @@
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
-  return young_gen()->is_in_reserved(p);
+  // Assumes the the old gen address range is lower than that of the young gen.
+  const void* loc = (void*) p;
+  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
+  assert(result == young_gen()->is_in_reserved(p),
+        err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
 }
 
 inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -269,6 +269,13 @@
   // space). If you need the more conservative answer use is_permanent().
   virtual bool is_in_permanent(const void *p) const = 0;
 
+
+#ifdef ASSERT
+  // Returns true if "p" is in the part of the
+  // heap being collected.
+  virtual bool is_in_partial_collection(const void *p) = 0;
+#endif
+
   bool is_in_permanent_or_null(const void *p) const {
     return p == NULL || is_in_permanent(p);
   }
@@ -284,11 +291,7 @@
 
   // An object is scavengable if its location may move during a scavenge.
   // (A scavenge is a GC which is not a full GC.)
-  // Currently, this just means it is not perm (and not null).
-  // This could change if we rethink what's in perm-gen.
-  bool is_scavengable(const void *p) const {
-    return !is_in_permanent_or_null(p);
-  }
+  virtual bool is_scavengable(const void *p) = 0;
 
   // Returns "TRUE" if "p" is a method oop in the
   // current heap, with high probability. This predicate
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -203,11 +203,14 @@
   if (value == NULL) {
     st->print_cr(" NULL");
   } else if (java_lang_String::is_instance(value)) {
-    EXCEPTION_MARK;
-    Handle h_value (THREAD, value);
-    Symbol* sym = java_lang_String::as_symbol(h_value, CATCH);
-    print_symbol(sym, st);
-    sym->decrement_refcount();
+    char buf[40];
+    int len = java_lang_String::utf8_length(value);
+    java_lang_String::as_utf8_string(value, buf, sizeof(buf));
+    if (len >= (int)sizeof(buf)) {
+      st->print_cr(" %s...[%d]", buf, len);
+    } else {
+      st->print_cr(" %s", buf);
+    }
   } else {
     st->print_cr(" " PTR_FORMAT, (intptr_t) value);
   }
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -139,9 +139,15 @@
   ResourceMark rm(thread);
   methodHandle m (thread, method(thread));
   Bytecode_loadconstant ldc(m, bci(thread));
-  oop result = ldc.resolve_constant(THREAD);
-  DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc.cache_index()));
-  assert(result == cpce->f1(), "expected result for assembly code");
+  oop result = ldc.resolve_constant(CHECK);
+#ifdef ASSERT
+  {
+    // The bytecode wrappers aren't GC-safe so construct a new one
+    Bytecode_loadconstant ldc2(m, bci(thread));
+    ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc2.cache_index());
+    assert(result == cpce->f1(), "expected result for assembly code");
+  }
+#endif
 }
 IRT_END
 
--- a/src/share/vm/interpreter/rewriter.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/interpreter/rewriter.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -63,6 +63,15 @@
   _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
 }
 
+// Unrewrite the bytecodes if an error occurs.
+void Rewriter::restore_bytecodes() {
+  int len = _methods->length();
+
+  for (int i = len-1; i >= 0; i--) {
+    methodOop method = (methodOop)_methods->obj_at(i);
+    scan_method(method, true);
+  }
+}
 
 // Creates a constant pool cache given a CPC map
 void Rewriter::make_constant_pool_cache(TRAPS) {
@@ -133,57 +142,94 @@
 
 
 // Rewrite a classfile-order CP index into a native-order CPC index.
-void Rewriter::rewrite_member_reference(address bcp, int offset) {
+void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  int  cp_index    = Bytes::get_Java_u2(p);
-  int  cache_index = cp_entry_to_cp_cache(cp_index);
-  Bytes::put_native_u2(p, cache_index);
+  if (!reverse) {
+    int  cp_index    = Bytes::get_Java_u2(p);
+    int  cache_index = cp_entry_to_cp_cache(cp_index);
+    Bytes::put_native_u2(p, cache_index);
+  } else {
+    int cache_index = Bytes::get_native_u2(p);
+    int pool_index = cp_cache_entry_pool_index(cache_index);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
-void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
+void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
   address p = bcp + offset;
-  assert(p[-1] == Bytecodes::_invokedynamic, "");
-  int cp_index = Bytes::get_Java_u2(p);
-  int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
-  int cpc2 = add_secondary_cp_cache_entry(cpc);
+  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
+  if (!reverse) {
+    int cp_index = Bytes::get_Java_u2(p);
+    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
+    int cpc2 = add_secondary_cp_cache_entry(cpc);
 
-  // Replace the trailing four bytes with a CPC index for the dynamic
-  // call site.  Unlike other CPC entries, there is one per bytecode,
-  // not just one per distinct CP entry.  In other words, the
-  // CPC-to-CP relation is many-to-one for invokedynamic entries.
-  // This means we must use a larger index size than u2 to address
-  // all these entries.  That is the main reason invokedynamic
-  // must have a five-byte instruction format.  (Of course, other JVM
-  // implementations can use the bytes for other purposes.)
-  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
-  // Note: We use native_u4 format exclusively for 4-byte indexes.
+    // Replace the trailing four bytes with a CPC index for the dynamic
+    // call site.  Unlike other CPC entries, there is one per bytecode,
+    // not just one per distinct CP entry.  In other words, the
+    // CPC-to-CP relation is many-to-one for invokedynamic entries.
+    // This means we must use a larger index size than u2 to address
+    // all these entries.  That is the main reason invokedynamic
+    // must have a five-byte instruction format.  (Of course, other JVM
+    // implementations can use the bytes for other purposes.)
+    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
+    // Note: We use native_u4 format exclusively for 4-byte indexes.
+  } else {
+    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
+                        Bytes::get_native_u4(p));
+    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
+    int pool_index = cp_cache_entry_pool_index(secondary_index);
+    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
+    // zero out 4 bytes
+    Bytes::put_Java_u4(p, 0);
+    Bytes::put_Java_u2(p, pool_index);
+  }
 }
 
 
 // Rewrite some ldc bytecodes to _fast_aldc
-void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
-  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
-  address p = bcp + offset;
-  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
-  constantTag tag = _pool->tag_at(cp_index).value();
-  if (tag.is_method_handle() || tag.is_method_type()) {
-    int cache_index = cp_entry_to_cp_cache(cp_index);
-    if (is_wide) {
-      (*bcp) = Bytecodes::_fast_aldc_w;
-      assert(cache_index == (u2)cache_index, "");
-      Bytes::put_native_u2(p, cache_index);
-    } else {
-      (*bcp) = Bytecodes::_fast_aldc;
-      assert(cache_index == (u1)cache_index, "");
-      (*p) = (u1)cache_index;
+void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
+                                 bool reverse) {
+  if (!reverse) {
+    assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
+    address p = bcp + offset;
+    int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
+    constantTag tag = _pool->tag_at(cp_index).value();
+    if (tag.is_method_handle() || tag.is_method_type()) {
+      int cache_index = cp_entry_to_cp_cache(cp_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_fast_aldc_w;
+        assert(cache_index == (u2)cache_index, "index overflow");
+        Bytes::put_native_u2(p, cache_index);
+      } else {
+        (*bcp) = Bytecodes::_fast_aldc;
+        assert(cache_index == (u1)cache_index, "index overflow");
+        (*p) = (u1)cache_index;
+      }
+    }
+  } else {
+    Bytecodes::Code rewritten_bc =
+              (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
+    if ((*bcp) == rewritten_bc) {
+      address p = bcp + offset;
+      int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
+      int pool_index = cp_cache_entry_pool_index(cache_index);
+      if (is_wide) {
+        (*bcp) = Bytecodes::_ldc_w;
+        assert(pool_index == (u2)pool_index, "index overflow");
+        Bytes::put_Java_u2(p, pool_index);
+      } else {
+        (*bcp) = Bytecodes::_ldc;
+        assert(pool_index == (u1)pool_index, "index overflow");
+        (*p) = (u1)pool_index;
+      }
     }
   }
 }
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(methodOop method) {
+void Rewriter::scan_method(methodOop method, bool reverse) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -236,6 +282,13 @@
 #endif
           break;
         }
+        case Bytecodes::_fast_linearswitch:
+        case Bytecodes::_fast_binaryswitch: {
+#ifndef CC_INTERP
+          (*bcp) = Bytecodes::_lookupswitch;
+#endif
+          break;
+        }
         case Bytecodes::_getstatic      : // fall through
         case Bytecodes::_putstatic      : // fall through
         case Bytecodes::_getfield       : // fall through
@@ -244,16 +297,18 @@
         case Bytecodes::_invokespecial  : // fall through
         case Bytecodes::_invokestatic   :
         case Bytecodes::_invokeinterface:
-          rewrite_member_reference(bcp, prefix_length+1);
+          rewrite_member_reference(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_invokedynamic:
-          rewrite_invokedynamic(bcp, prefix_length+1);
+          rewrite_invokedynamic(bcp, prefix_length+1, reverse);
           break;
         case Bytecodes::_ldc:
-          maybe_rewrite_ldc(bcp, prefix_length+1, false);
+        case Bytecodes::_fast_aldc:
+          maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
           break;
         case Bytecodes::_ldc_w:
-          maybe_rewrite_ldc(bcp, prefix_length+1, true);
+        case Bytecodes::_fast_aldc_w:
+          maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
           break;
         case Bytecodes::_jsr            : // fall through
         case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
@@ -273,12 +328,13 @@
   if (nof_jsrs > 0) {
     method->set_has_jsrs();
     // Second pass will revisit this method.
-    assert(method->has_jsrs(), "");
+    assert(method->has_jsrs(), "didn't we just set this?");
   }
 }
 
 // After constant pool is created, revisit methods containing jsrs.
 methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
+  ResourceMark rm(THREAD);
   ResolveOopMapConflicts romc(method);
   methodHandle original_method = method;
   method = romc.do_potential_rewrite(CHECK_(methodHandle()));
@@ -300,7 +356,6 @@
   return method;
 }
 
-
 void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   ResourceMark rm(THREAD);
   Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
@@ -343,34 +398,57 @@
   }
 
   // rewrite methods, in two passes
-  int i, len = _methods->length();
+  int len = _methods->length();
 
-  for (i = len; --i >= 0; ) {
+  for (int i = len-1; i >= 0; i--) {
     methodOop method = (methodOop)_methods->obj_at(i);
     scan_method(method);
   }
 
   // allocate constant pool cache, now that we've seen all the bytecodes
-  make_constant_pool_cache(CHECK);
+  make_constant_pool_cache(THREAD);
+
+  // Restore bytecodes to their unrewritten state if there are exceptions
+  // rewriting bytecodes or allocating the cpCache
+  if (HAS_PENDING_EXCEPTION) {
+    restore_bytecodes();
+    return;
+  }
+}
 
-  for (i = len; --i >= 0; ) {
-    methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
+// Relocate jsr/rets in a method.  This can't be done with the rewriter
+// stage because it can throw other exceptions, leaving the bytecodes
+// pointing at constant pool cache entries.
+// Link and check jvmti dependencies while we're iterating over the methods.
+// JSR292 code calls with a different set of methods, so two entry points.
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
+  objArrayHandle methods(THREAD, this_oop->methods());
+  relocate_and_link(this_oop, methods, THREAD);
+}
+
+void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
+                                 objArrayHandle methods, TRAPS) {
+  int len = methods->length();
+  for (int i = len-1; i >= 0; i--) {
+    methodHandle m(THREAD, (methodOop)methods->obj_at(i));
 
     if (m->has_jsrs()) {
       m = rewrite_jsrs(m, CHECK);
       // Method might have gotten rewritten.
-      _methods->obj_at_put(i, m());
+      methods->obj_at_put(i, m());
     }
 
-    // Set up method entry points for compiler and interpreter.
+    // Set up method entry points for compiler and interpreter    .
     m->link_method(m, CHECK);
 
+    // This is for JVMTI and unrelated to relocator but the last thing we do
 #ifdef ASSERT
     if (StressMethodComparator) {
       static int nmc = 0;
       for (int j = i; j >= 0 && j >= i-4; j--) {
         if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
-        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
+        bool z = MethodComparator::methods_EMCP(m(),
+                   (methodOop)methods->obj_at(j));
         if (j == i && !z) {
           tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
           assert(z, "method must compare equal to itself");
--- a/src/share/vm/interpreter/rewriter.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/interpreter/rewriter.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -85,13 +85,15 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(methodOop m);
-  methodHandle rewrite_jsrs(methodHandle m, TRAPS);
+  void scan_method(methodOop m, bool reverse = false);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  void rewrite_member_reference(address bcp, int offset);
-  void rewrite_invokedynamic(address bcp, int offset);
-  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
+  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
+  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
+  // Revert bytecodes in case of an exception.
+  void restore_bytecodes();
 
+  static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
  public:
   // Driver routine:
   static void rewrite(instanceKlassHandle klass, TRAPS);
@@ -100,6 +102,13 @@
   enum {
     _secondary_entry_tag = nth_bit(30)
   };
+
+  // Second pass, not gated by is_rewritten flag
+  static void relocate_and_link(instanceKlassHandle klass, TRAPS);
+  // JSR292 version to call with it's own methods.
+  static void relocate_and_link(instanceKlassHandle klass,
+                                objArrayHandle methods, TRAPS);
+
 };
 
 #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
--- a/src/share/vm/memory/blockOffsetTable.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,11 +566,17 @@
     q = n;
     n += _sp->block_size(n);
     assert(n > q,
-           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
-                   n, last, _sp->bottom(), _sp->end()));
+           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
+                   " while querying blk_start(" PTR_FORMAT ")"
+                   " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
+                   n, last, addr, _sp->bottom(), _sp->end()));
   }
-  assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
-  assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
+  assert(q <= addr,
+         err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
+                 q, addr));
+  assert(addr <= n,
+         err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
+                 addr, n));
   return q;
 }
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -711,15 +711,6 @@
   _gen_process_strong_tasks->set_n_threads(t);
 }
 
-class AssertIsPermClosure: public OopClosure {
-public:
-  void do_oop(oop* p) {
-    assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
-  }
-  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-static AssertIsPermClosure assert_is_perm_closure;
-
 void GenCollectedHeap::
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
@@ -962,6 +953,13 @@
   }
 }
 
+bool GenCollectedHeap::is_in_young(oop p) {
+  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
+  assert(result == _gens[0]->is_in_reserved(p),
+         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
+}
+
 // Returns "TRUE" iff "p" points into the allocated area of the heap.
 bool GenCollectedHeap::is_in(const void* p) const {
   #ifndef ASSERT
@@ -984,10 +982,16 @@
   return false;
 }
 
-// Returns "TRUE" iff "p" points into the allocated area of the heap.
-bool GenCollectedHeap::is_in_youngest(void* p) {
-  return _gens[0]->is_in(p);
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool GenCollectedHeap::is_in_partial_collection(const void* p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is young (low addr), old, perm (high addr)
+  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 }
+#endif
 
 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   for (int i = 0; i < _n_gens; i++) {
--- a/src/share/vm/memory/genCollectedHeap.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -216,8 +216,18 @@
     }
   }
 
-  // Returns "TRUE" iff "p" points into the youngest generation.
-  bool is_in_youngest(void* p);
+  // Returns true if the reference is to an object in the reserved space
+  // for the young generation.
+  // Assumes the the young gen address range is less than that of the old gen.
+  bool is_in_young(oop p);
+
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr) {
+    return is_in_young((oop)addr);
+  }
 
   // Iteration functions.
   void oop_iterate(OopClosure* cl);
@@ -283,7 +293,7 @@
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
-    return is_in_youngest((void*)new_obj);
+    return is_in_young(new_obj);
   }
 
   // Can a compiler elide a store barrier when it writes
--- a/src/share/vm/memory/sharedHeap.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -102,6 +102,17 @@
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
+#ifdef ASSERT
+class AssertNonScavengableClosure: public OopClosure {
+public:
+  virtual void do_oop(oop* p) {
+    assert(!Universe::heap()->is_in_partial_collection(*p),
+      "Referent should not be scavengable.");  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+static AssertNonScavengableClosure assert_is_non_scavengable_closure;
+#endif
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -196,9 +207,10 @@
         CodeCache::scavenge_root_nmethods_do(code_roots);
       }
     }
-    // Verify if the code cache contents are in the perm gen
-    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
-    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
+    // Verify that the code cache contents are not subject to
+    // movement by a scavenging collection.
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
+    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   }
 
   if (!collecting_perm_gen) {
--- a/src/share/vm/oops/instanceKlass.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -335,6 +335,9 @@
         this_oop->rewrite_class(CHECK_false);
       }
 
+      // relocate jsrs and link methods after they are all rewritten
+      this_oop->relocate_and_link_methods(CHECK_false);
+
       // Initialize the vtable and interface table after
       // methods have been rewritten since rewrite may
       // fabricate new methodOops.
@@ -365,17 +368,8 @@
 
 
 // Rewrite the byte codes of all of the methods of a class.
-// Three cases:
-//    During the link of a newly loaded class.
-//    During the preloading of classes to be written to the shared spaces.
-//      - Rewrite the methods and update the method entry points.
-//
-//    During the link of a class in the shared spaces.
-//      - The methods were already rewritten, update the metho entry points.
-//
 // The rewriter must be called exactly once. Rewriting must happen after
 // verification but before the first method of the class is executed.
-
 void instanceKlass::rewrite_class(TRAPS) {
   assert(is_loaded(), "must be loaded");
   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
@@ -383,10 +377,19 @@
     assert(this_oop()->is_shared(), "rewriting an unshared class?");
     return;
   }
-  Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
+  Rewriter::rewrite(this_oop, CHECK);
   this_oop->set_rewritten();
 }
 
+// Now relocate and link method entry points after class is rewritten.
+// This is outside is_rewritten flag. In case of an exception, it can be
+// executed more than once.
+void instanceKlass::relocate_and_link_methods(TRAPS) {
+  assert(is_loaded(), "must be loaded");
+  instanceKlassHandle this_oop(THREAD, this->as_klassOop());
+  Rewriter::relocate_and_link(this_oop, CHECK);
+}
+
 
 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   // Make sure klass is linked (verified) before initialization
--- a/src/share/vm/oops/instanceKlass.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -392,6 +392,7 @@
   bool link_class_or_fail(TRAPS); // returns false on failure
   void unlink_class();
   void rewrite_class(TRAPS);
+  void relocate_and_link_methods(TRAPS);
   methodOop class_initializer();
 
   // set the class to initialized if no static initializer is present
--- a/src/share/vm/oops/instanceRefKlass.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -397,7 +397,7 @@
 
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the referent
       // field is not part of the oop mask and therefore skipped by the
       // regular verify code.
@@ -415,7 +415,7 @@
   if (next != NULL) {
     guarantee(next->is_oop(), "next field verify failed");
     guarantee(next->is_instanceRef(), "next field verify failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the next field is
       // not part of the oop mask and therefore skipped by the regular
       // verify code.
--- a/src/share/vm/oops/methodOop.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -693,7 +693,10 @@
 // Called when the method_holder is getting linked. Setup entrypoints so the method
 // is ready to be called from interpreter, compiler, and vtables.
 void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
-  assert(_i2i_entry == NULL, "should only be called once");
+  // If the code cache is full, we may reenter this function for the
+  // leftover methods that weren't linked.
+  if (_i2i_entry != NULL) return;
+
   assert(_adapter == NULL, "init'd to NULL" );
   assert( _code == NULL, "nothing compiled yet" );
 
@@ -717,7 +720,7 @@
   // called from the vtable.  We need adapters on such methods that get loaded
   // later.  Ditto for mega-morphic itable calls.  If this proves to be a
   // problem we'll make these lazily later.
-  (void) make_adapters(h_method, CHECK);
+  if (UseCompiler) (void) make_adapters(h_method, CHECK);
 
   // ONLY USE the h_method now as make_adapter may have blocked
 
--- a/src/share/vm/opto/cfgnode.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1556,7 +1556,9 @@
 
   Node *top = phase->C->top();
   bool new_phi = (outcnt() == 0); // transforming new Phi
-  assert(!can_reshape || !new_phi, "for igvn new phi should be hooked");
+  // No change for igvn if new phi is not hooked
+  if (new_phi && can_reshape)
+    return NULL;
 
   // The are 2 situations when only one valid phi's input is left
   // (in addition to Region input).
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -615,6 +615,7 @@
       }
     }
 
+#ifdef ASSERT
     if (node->debug_orig() != NULL) {
       stringStream dorigStream;
       Node* dorig = node->debug_orig();
@@ -629,6 +630,7 @@
       }
       print_prop("debug_orig", dorigStream.as_string());
     }
+#endif
 
     if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) {
       buffer[0] = 0;
--- a/src/share/vm/opto/loopTransform.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1292,9 +1292,23 @@
       }
       assert(new_limit != NULL, "");
       // Replace in loop test.
-      _igvn.hash_delete(cmp);
-      cmp->set_req(2, new_limit);
-
+      assert(loop_end->in(1)->in(1) == cmp, "sanity");
+      if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
+        // Don't need to create new test since only one user.
+        _igvn.hash_delete(cmp);
+        cmp->set_req(2, new_limit);
+      } else {
+        // Create new test since it is shared.
+        Node* ctrl2 = loop_end->in(0);
+        Node* cmp2  = cmp->clone();
+        cmp2->set_req(2, new_limit);
+        register_new_node(cmp2, ctrl2);
+        Node* bol2 = loop_end->in(1)->clone();
+        bol2->set_req(1, cmp2);
+        register_new_node(bol2, ctrl2);
+        _igvn.hash_delete(loop_end);
+        loop_end->set_req(1, bol2);
+      }
       // Step 3: Find the min-trip test guaranteed before a 'main' loop.
       // Make it a 1-trip test (means at least 2 trips).
 
@@ -1453,6 +1467,23 @@
   return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
 }
 
+//------------------------------adjust_limit-----------------------------------
+// Helper function for add_constraint().
+Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
+  // Compute "I :: (limit-offset)/scale"
+  Node *con = new (C, 3) SubINode(rc_limit, offset);
+  register_new_node(con, pre_ctrl);
+  Node *X = new (C, 3) DivINode(0, con, scale);
+  register_new_node(X, pre_ctrl);
+
+  // Adjust loop limit
+  loop_limit = (stride_con > 0)
+               ? (Node*)(new (C, 3) MinINode(loop_limit, X))
+               : (Node*)(new (C, 3) MaxINode(loop_limit, X));
+  register_new_node(loop_limit, pre_ctrl);
+  return loop_limit;
+}
+
 //------------------------------add_constraint---------------------------------
 // Constrain the main loop iterations so the conditions:
 //    low_limit <= scale_con * I + offset  <  upper_limit
@@ -1469,7 +1500,11 @@
   // pre-loop must check for underflow and the post-loop for overflow.
   // Negative stride*scale reverses this; pre-loop checks for overflow and
   // post-loop for underflow.
-  if (stride_con*scale_con > 0) {
+
+  Node *scale = _igvn.intcon(scale_con);
+  set_ctrl(scale, C->root());
+
+  if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
     // The overflow limit: scale*I+offset < upper_limit
     // For main-loop compute
     //   ( if (scale > 0) /* and stride > 0 */
@@ -1478,23 +1513,10 @@
     //       I > (upper_limit-offset)/scale
     //   )
     //
-    // (upper_limit-offset) may overflow when offset < 0.
+    // (upper_limit-offset) may overflow or underflow.
     // But it is fine since main loop will either have
     // less iterations or will be skipped in such case.
-    Node *con = new (C, 3) SubINode(upper_limit, offset);
-    register_new_node(con, pre_ctrl);
-    Node *scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    Node *X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust main-loop last iteration
-    Node *loop_limit = *main_limit;
-    loop_limit = (stride_con > 0) // scale > 0
-      ? (Node*)(new (C, 3) MinINode(loop_limit, X))
-      : (Node*)(new (C, 3) MaxINode(loop_limit, X));
-    register_new_node(loop_limit, pre_ctrl);
-    *main_limit = loop_limit;
+    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
 
     // The underflow limit: low_limit <= scale*I+offset.
     // For pre-loop compute
@@ -1509,76 +1531,33 @@
     if (low_limit->get_int() == -max_jint) {
       if (!RangeLimitCheck) return;
       // We need this guard when scale*pre_limit+offset >= limit
-      // due to underflow so we need execute pre-loop until
-      // scale*I+offset >= min_int. But (low_limit-offset) will
-      // underflow when offset > 0 and X will be > original_limit.
-      // To avoid it we replace offset = offset > 0 ? 0 : offset
-      // and add min(pre_limit, original_limit).
+      // due to underflow. So we need execute pre-loop until
+      // scale*I+offset >= min_int. But (min_int-offset) will
+      // underflow when offset > 0 and X will be > original_limit
+      // when stride > 0. To avoid it we replace positive offset with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
       Node* shift = _igvn.intcon(31);
       set_ctrl(shift, C->root());
-      Node *neg_off = new (C, 3) RShiftINode(offset, shift);
-      register_new_node(neg_off, pre_ctrl);
-      offset = new (C, 3) AndINode(offset, neg_off);
+      Node* sign = new (C, 3) RShiftINode(offset, shift);
+      register_new_node(sign, pre_ctrl);
+      offset = new (C, 3) AndINode(offset, sign);
       register_new_node(offset, pre_ctrl);
     } else {
       assert(low_limit->get_int() == 0, "wrong low limit for range check");
       // The only problem we have here when offset == min_int
-      // since (0-min_int) == min_int. It may be fine for scale > 0
-      // but for scale < 0 X will be < original_limit.
+      // since (0-min_int) == min_int. It may be fine for stride > 0
+      // but for stride < 0 X will be < original_limit. To avoid it
+      // max(pre_limit, original_limit) is used in do_range_check().
     }
-    con = new (C, 3) SubINode(low_limit, offset);
-    register_new_node(con, pre_ctrl);
-    scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust pre-loop last iteration
-    loop_limit = *pre_limit;
-    loop_limit = (stride_con > 0) // scale > 0
-      ? (Node*)(new (C, 3) MaxINode(loop_limit, X))
-      : (Node*)(new (C, 3) MinINode(loop_limit, X));
-    register_new_node( loop_limit, pre_ctrl );
-    *pre_limit = loop_limit;
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
 
   } else { // stride_con*scale_con < 0
     // For negative stride*scale pre-loop checks for overflow and
     // post-loop for underflow.
     //
-    // The underflow limit: low_limit <= scale*I+offset.
-    // For main-loop compute
-    //   scale*I+offset+1 > low_limit
-    //   ( if (scale < 0) /* and stride > 0 */
-    //       I < (low_limit-(offset+1))/scale
-    //     else /* scale < 0 and stride < 0 */
-    //       I > (low_limit-(offset+1))/scale
-    //   )
-
-    if (low_limit->get_int() == -max_jint) {
-      if (!RangeLimitCheck) return;
-    } else {
-      assert(low_limit->get_int() == 0, "wrong low limit for range check");
-    }
-
-    Node *one  = _igvn.intcon(1);
-    set_ctrl(one, C->root());
-    Node *plus_one = new (C, 3) AddINode(offset, one);
-    register_new_node( plus_one, pre_ctrl );
-    Node *con = new (C, 3) SubINode(low_limit, plus_one);
-    register_new_node(con, pre_ctrl);
-    Node *scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    Node *X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
-
-    // Adjust main-loop last iteration
-    Node *loop_limit = *main_limit;
-    loop_limit = (stride_con > 0) // scale < 0
-      ? (Node*)(new (C, 3) MinINode(loop_limit, X))
-      : (Node*)(new (C, 3) MaxINode(loop_limit, X));
-    register_new_node(loop_limit, pre_ctrl);
-    *main_limit = loop_limit;
-
     // The overflow limit: scale*I+offset < upper_limit
     // For pre-loop compute
     //   NOT(scale*I+offset < upper_limit)
@@ -1586,26 +1565,55 @@
     //   scale*I+offset+1 > upper_limit
     //   ( if (scale < 0) /* and stride > 0 */
     //       I < (upper_limit-(offset+1))/scale
-    //     else /* scale < 0 and stride < 0 */
+    //     else /* scale > 0 and stride < 0 */
     //       I > (upper_limit-(offset+1))/scale
     //   )
-    plus_one = new (C, 3) AddINode(offset, one);
+    //
+    // (upper_limit-offset-1) may underflow or overflow.
+    // To avoid it min(pre_limit, original_limit) is used
+    // in do_range_check() for stride > 0 and max() for < 0.
+    Node *one  = _igvn.intcon(1);
+    set_ctrl(one, C->root());
+
+    Node *plus_one = new (C, 3) AddINode(offset, one);
     register_new_node( plus_one, pre_ctrl );
-    con = new (C, 3) SubINode(upper_limit, plus_one);
-    register_new_node(con, pre_ctrl);
-    scale = _igvn.intcon(scale_con);
-    set_ctrl(scale, C->root());
-    X = new (C, 3) DivINode(0, con, scale);
-    register_new_node(X, pre_ctrl);
+    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
+    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
 
-    // Adjust pre-loop last iteration
-    loop_limit = *pre_limit;
-    loop_limit = (stride_con > 0) // scale < 0
-      ? (Node*)(new (C, 3) MaxINode(loop_limit, X))
-      : (Node*)(new (C, 3) MinINode(loop_limit, X));
-    register_new_node( loop_limit, pre_ctrl );
-    *pre_limit = loop_limit;
+    if (low_limit->get_int() == -max_jint) {
+      if (!RangeLimitCheck) return;
+      // We need this guard when scale*main_limit+offset >= limit
+      // due to underflow. So we need execute main-loop while
+      // scale*I+offset+1 > min_int. But (min_int-offset-1) will
+      // underflow when (offset+1) > 0 and X will be < main_limit
+      // when scale < 0 (and stride > 0). To avoid it we replace
+      // positive (offset+1) with 0.
+      //
+      // Also (min_int+1 == -max_int) is used instead of min_int here
+      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
+      Node* shift = _igvn.intcon(31);
+      set_ctrl(shift, C->root());
+      Node* sign = new (C, 3) RShiftINode(plus_one, shift);
+      register_new_node(sign, pre_ctrl);
+      plus_one = new (C, 3) AndINode(plus_one, sign);
+      register_new_node(plus_one, pre_ctrl);
+    } else {
+      assert(low_limit->get_int() == 0, "wrong low limit for range check");
+      // The only problem we have here when offset == max_int
+      // since (max_int+1) == min_int and (0-min_int) == min_int.
+      // But it is fine since main loop will either have
+      // less iterations or will be skipped in such case.
+    }
+    // The underflow limit: low_limit <= scale*I+offset.
+    // For main-loop compute
+    //   scale*I+offset+1 > low_limit
+    //   ( if (scale < 0) /* and stride > 0 */
+    //       I < (low_limit-(offset+1))/scale
+    //     else /* scale > 0 and stride < 0 */
+    //       I > (low_limit-(offset+1))/scale
+    //   )
 
+    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
   }
 }
 
@@ -1869,13 +1877,8 @@
           // The underflow and overflow limits: 0 <= scale*I+offset < limit
           add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
-            // It is also needed if offset->_lo == min_int since
-            // (0-min_int) == min_int. It may be fine for stride > 0
-            // but for stride < 0 pre_limit will be < original_limit.
-            const TypeInt* offset_t = _igvn.type(offset)->is_int();
-            conditional_rc |= RangeLimitCheck && (offset_t->_lo == min_jint) &&
-                              (scale_con<0) && (stride_con<0);
+            // (0-offset)/scale could be outside of loop iterations range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
         } else {
 #ifndef PRODUCT
@@ -1905,16 +1908,14 @@
           // Fall into LT case
         case BoolTest::lt:
           // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
+          // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
+          // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
           add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
           if (!conditional_rc) {
-            conditional_rc = !loop->dominates_backedge(iff);
-            // It is also needed if scale*pre_limit+offset >= limit
-            // due to underflow so we need execute pre-loop until
-            // scale*I+offset >= min_int. But (low_limit-offset) will
-            // underflow when offset > 0 and X will be > original_limit.
-            const TypeInt* offset_t = _igvn.type(offset)->is_int();
-            conditional_rc |= RangeLimitCheck && (offset_t->_hi > 0) &&
-                              (scale_con>0) && (stride_con>0);
+            // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
+            // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
+            // still be outside of loop range.
+            conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
           }
           break;
         default:
--- a/src/share/vm/opto/loopnode.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -932,6 +932,8 @@
   // the pre-loop or the post-loop until the condition holds true in the main
   // loop.  Scale_con, offset and limit are all loop invariant.
   void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit );
+  // Helper function for add_constraint().
+  Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl );
 
   // Partially peel loop up through last_peel node.
   bool partial_peel( IdealLoopTree *loop, Node_List &old_new );
--- a/src/share/vm/opto/output.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/output.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -911,7 +911,7 @@
         }
       } else {
         const TypePtr *tp = obj_node->bottom_type()->make_ptr();
-        scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->constant_encoding());
+        scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
       }
 
       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
--- a/src/share/vm/opto/stringopts.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/opto/stringopts.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -768,6 +768,7 @@
         tty->cr();
       }
 #endif
+      fail = true;
       break;
     } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
       ptr = ptr->in(0)->in(0);
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -992,6 +992,9 @@
     }
 
     Rewriter::rewrite(scratch_class, THREAD);
+    if (!HAS_PENDING_EXCEPTION) {
+      Rewriter::relocate_and_link(scratch_class, THREAD);
+    }
     if (HAS_PENDING_EXCEPTION) {
       Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
       CLEAR_PENDING_EXCEPTION;
--- a/src/share/vm/prims/methodHandleWalk.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -265,7 +265,7 @@
         assert(dest == arg_state->_type, "");
         ArgToken arg = arg_state->_arg;
         ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
-        assert(arg.token_type() >= tt_symbolic || arg.index() == new_arg.index(), "should be the same index");
+        assert(!arg.has_index() || arg.index() == new_arg.index(), "should be the same index");
         debug_only(dest_klass = (klassOop)badOop);
         break;
       }
@@ -423,6 +423,7 @@
           arglist[1+i] = arg;
           if (!retain_original_args)
             change_argument(arg_type, slot, T_VOID, ArgToken(tt_void));
+          i++;
         }
         arglist[1+argc] = ArgToken();  // sentinel
         oop invoker = java_lang_invoke_MethodTypeForm::vmlayout(
@@ -442,8 +443,10 @@
             ret = make_conversion(T_OBJECT, rklass, Bytecodes::_checkcast, ret, CHECK_(empty));
           }
         }
-        int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0);
-        change_argument(T_VOID, ret_slot, rtype, ret);
+        if (rtype != T_VOID) {
+          int ret_slot = arg_slot + (retain_original_args ? coll_slots : 0);
+          change_argument(T_VOID, ret_slot, rtype, ret);
+        }
         break;
       }
 
@@ -487,7 +490,7 @@
         arglist[1] = length_arg;  // length to check
         arglist[2] = ArgToken();  // sentinel
         make_invoke(NULL, vmIntrinsics::_checkSpreadArgument,
-                    Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
+                    Bytecodes::_invokestatic, false, 2, &arglist[0], CHECK_(empty));
 
         // Spread out the array elements.
         Bytecodes::Code aload_op = Bytecodes::_nop;
@@ -689,9 +692,8 @@
 // -----------------------------------------------------------------------------
 // MethodHandleCompiler
 
-MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool is_invokedynamic, TRAPS)
+MethodHandleCompiler::MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool is_invokedynamic, TRAPS)
   : MethodHandleWalker(root, is_invokedynamic, THREAD),
-    _callee(callee),
     _invoke_count(invoke_count),
     _thread(THREAD),
     _bytecode(THREAD, 50),
@@ -705,8 +707,8 @@
   (void) _constants.append(NULL);
 
   // Set name and signature index.
-  _name_index      = cpool_symbol_put(_callee->name());
-  _signature_index = cpool_symbol_put(_callee->signature());
+  _name_index      = cpool_symbol_put(name);
+  _signature_index = cpool_symbol_put(signature);
 
   // Get return type klass.
   Handle first_mtype(THREAD, chain().method_type_oop());
@@ -714,7 +716,8 @@
   _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass);
   if (_rtype == T_ARRAY)  _rtype = T_OBJECT;
 
-  int params = _callee->size_of_parameters();  // Incoming arguments plus receiver.
+  ArgumentSizeComputer args(signature);
+  int params = args.size() + 1;  // Incoming arguments plus receiver.
   _num_params = for_invokedynamic() ? params - 1 : params;  // XXX Check if callee is static?
 }
 
@@ -732,7 +735,7 @@
 }
 
 
-void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
+void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index, int args_size) {
   Bytecodes::check(op);  // Are we legal?
 
   switch (op) {
@@ -808,6 +811,14 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
+  case Bytecodes::_iaload:
+  case Bytecodes::_laload:
+  case Bytecodes::_faload:
+  case Bytecodes::_daload:
+  case Bytecodes::_aaload:
+  case Bytecodes::_baload:
+  case Bytecodes::_caload:
+  case Bytecodes::_saload:
   case Bytecodes::_ireturn:
   case Bytecodes::_lreturn:
   case Bytecodes::_freturn:
@@ -821,9 +832,14 @@
   // bi
   case Bytecodes::_ldc:
     assert(Bytecodes::format_bits(op, false) == (Bytecodes::_fmt_b|Bytecodes::_fmt_has_k), "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      _bytecode.push(Bytecodes::_ldc_w);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   case Bytecodes::_iload:
@@ -837,9 +853,16 @@
   case Bytecodes::_dstore:
   case Bytecodes::_astore:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bi, "wrong bytecode format");
-    assert((char) index == index, "index does not fit in 8-bit");
-    _bytecode.push(op);
-    _bytecode.push(index);
+    if (index == (index & 0xff)) {
+      _bytecode.push(op);
+      _bytecode.push(index);
+    } else {
+      // doesn't fit in a u2
+      _bytecode.push(Bytecodes::_wide);
+      _bytecode.push(op);
+      _bytecode.push(index >> 8);
+      _bytecode.push(index);
+    }
     break;
 
   // bkk
@@ -847,7 +870,7 @@
   case Bytecodes::_ldc2_w:
   case Bytecodes::_checkcast:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bkk, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
@@ -858,12 +881,23 @@
   case Bytecodes::_invokespecial:
   case Bytecodes::_invokevirtual:
     assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
-    assert((short) index == index, "index does not fit in 16-bit");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
     _bytecode.push(op);
     _bytecode.push(index >> 8);
     _bytecode.push(index);
     break;
 
+  case Bytecodes::_invokeinterface:
+    assert(Bytecodes::format_bits(op, false) == Bytecodes::_fmt_bJJ, "wrong bytecode format");
+    assert((unsigned short) index == index, "index does not fit in 16-bit");
+    assert(args_size > 0, "valid args_size");
+    _bytecode.push(op);
+    _bytecode.push(index >> 8);
+    _bytecode.push(index);
+    _bytecode.push(args_size);
+    _bytecode.push(0);
+    break;
+
   default:
     ShouldNotReachHere();
   }
@@ -982,7 +1016,8 @@
                                       const ArgToken& src, TRAPS) {
 
   BasicType srctype = src.basic_type();
-  int index = src.index();
+  TokenType tt = src.token_type();
+  int index = -1;
 
   switch (op) {
   case Bytecodes::_i2l:
@@ -1003,18 +1038,31 @@
   case Bytecodes::_d2i:
   case Bytecodes::_d2l:
   case Bytecodes::_d2f:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+    }
     stack_pop(srctype);  // pop the src type
     emit_bc(op);
     stack_push(type);    // push the dest value
-    if (srctype != type)
+    if (tt != tt_constant)
+      index = src.index();
+    if (srctype != type || index == -1)
       index = new_local_index(type);
     emit_store(type, index);
     break;
 
   case Bytecodes::_checkcast:
-    emit_load(srctype, index);
+    if (tt == tt_constant) {
+      emit_load_constant(src);
+    } else {
+      emit_load(srctype, src.index());
+      index = src.index();
+    }
     emit_bc(op, cpool_klass_put(tk));
+    if (index == -1)
+      index = new_local_index(type);
     emit_store(srctype, index);
     break;
 
@@ -1057,6 +1105,11 @@
   Symbol*  name      = m->name();
   Symbol*  signature = m->signature();
 
+  // Count the number of arguments, not the size
+  ArgumentCount asc(signature);
+  assert(argc == asc.size() + ((op == Bytecodes::_invokestatic || op == Bytecodes::_invokedynamic) ? 0 : 1),
+         "argc mismatch");
+
   if (tailcall) {
     // Actually, in order to make these methods more recognizable,
     // let's put them in holder class MethodHandle.  That way stack
@@ -1105,9 +1158,13 @@
   case Bytecodes::_invokevirtual:
     emit_bc(op, methodref_index);
     break;
-  case Bytecodes::_invokeinterface:
-    Unimplemented();
+
+  case Bytecodes::_invokeinterface: {
+    ArgumentSizeComputer asc(signature);
+    emit_bc(op, methodref_index, asc.size() + 1);
     break;
+  }
+
   default:
     ShouldNotReachHere();
   }
@@ -1116,6 +1173,7 @@
   // Otherwise, make a recursive call to some helper routine.
   BasicType rbt = m->result_type();
   if (rbt == T_ARRAY)  rbt = T_OBJECT;
+  stack_push(rbt);  // The return value is already pushed onto the stack.
   ArgToken ret;
   if (tailcall) {
     if (rbt != _rtype) {
@@ -1170,7 +1228,6 @@
     ret = ArgToken();  // Dummy return value.
   }
   else {
-    stack_push(rbt);  // The return value is already pushed onto the stack.
     int index = new_local_index(rbt);
     switch (rbt) {
     case T_BOOLEAN: case T_BYTE: case T_CHAR:  case T_SHORT:
@@ -1195,8 +1252,32 @@
                                  const MethodHandleWalker::ArgToken& base,
                                  const MethodHandleWalker::ArgToken& offset,
                                  TRAPS) {
-  Unimplemented();
-  return ArgToken();
+  switch (base.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(base.basic_type(), base.index());
+      break;
+    case tt_constant:
+      emit_load_constant(base);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  switch (offset.token_type()) {
+    case tt_parameter:
+    case tt_temporary:
+      emit_load(offset.basic_type(), offset.index());
+      break;
+    case tt_constant:
+      emit_load_constant(offset);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  emit_bc(op);
+  int index = new_local_index(type);
+  emit_store(type, index);
+  return ArgToken(tt_temporary, type, index);
 }
 
 
@@ -1318,6 +1399,7 @@
   objArrayHandle methods(THREAD, m_array);
   methods->obj_at_put(0, m());
   Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty));  // Use fake class.
+  Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty));  // Use fake class.
 
   // Set the invocation counter's count to the invoke count of the
   // original call site.
@@ -1371,12 +1453,10 @@
     return s;
   }
   ArgToken token(const char* str) {
-    jvalue string_con;
-    string_con.j = (intptr_t) str;
-    return ArgToken(tt_symbolic, T_LONG, string_con);
+    return ArgToken(str);
   }
   const char* string(ArgToken token) {
-    return (const char*) (intptr_t) token.get_jlong();
+    return token.str();
   }
   void start_params() {
     _param_state <<= 1;
--- a/src/share/vm/prims/methodHandleWalk.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/prims/methodHandleWalk.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -126,26 +126,34 @@
     Handle    _handle;
 
   public:
-    ArgToken(TokenType tt = tt_illegal) : _tt(tt) {}
-    ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {}
+    ArgToken(TokenType tt = tt_illegal) : _tt(tt) {
+      assert(tt == tt_illegal || tt == tt_void, "invalid token type");
+    }
 
     ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) {
+      assert(_tt == tt_parameter || _tt == tt_temporary, "must have index");
       _value.i = index;
     }
 
-    ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) {
-      _handle = value;
+    ArgToken(BasicType bt, jvalue value) : _tt(tt_constant), _bt(bt), _value(value) {}
+    ArgToken(BasicType bt, Handle value) : _tt(tt_constant), _bt(bt), _handle(value) {}
+
+
+    ArgToken(const char* str) : _tt(tt_symbolic), _bt(T_LONG) {
+      _value.j = (intptr_t)str;
     }
 
     TokenType token_type()  const { return _tt; }
     BasicType basic_type()  const { return _bt; }
-    int       index()       const { return _value.i; }
-    Handle    object()      const { return _handle; }
+    bool      has_index()   const { return _tt == tt_parameter || _tt == tt_temporary; }
+    int       index()       const { assert(has_index(), "must have index");; return _value.i; }
+    Handle    object()      const { assert(_tt == tt_constant, "value type"); return _handle; }
+    const char* str()       const { assert(_tt == tt_symbolic, "string type"); return (const char*)_value.j; }
 
-    jint      get_jint()    const { return _value.i; }
-    jlong     get_jlong()   const { return _value.j; }
-    jfloat    get_jfloat()  const { return _value.f; }
-    jdouble   get_jdouble() const { return _value.d; }
+    jint      get_jint()    const { assert(_tt == tt_constant, "value types"); return _value.i; }
+    jlong     get_jlong()   const { assert(_tt == tt_constant, "value types"); return _value.j; }
+    jfloat    get_jfloat()  const { assert(_tt == tt_constant, "value types"); return _value.f; }
+    jdouble   get_jdouble() const { assert(_tt == tt_constant, "value types"); return _value.d; }
   };
 
   // Abstract interpretation state:
@@ -256,7 +264,6 @@
 // The IR happens to be JVM bytecodes.
 class MethodHandleCompiler : public MethodHandleWalker {
 private:
-  methodHandle _callee;
   int          _invoke_count;  // count the original call site has been executed
   KlassHandle  _rklass;        // Return type for casting.
   BasicType    _rtype;
@@ -404,7 +411,7 @@
     return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index);
   }
 
-  void emit_bc(Bytecodes::Code op, int index = 0);
+  void emit_bc(Bytecodes::Code op, int index = 0, int args_size = -1);
   void emit_load(BasicType bt, int index);
   void emit_store(BasicType bt, int index);
   void emit_load_constant(ArgToken arg);
@@ -414,10 +421,10 @@
   }
   virtual ArgToken make_oop_constant(oop con, TRAPS) {
     Handle h(THREAD, con);
-    return ArgToken(tt_constant, T_OBJECT, h);
+    return ArgToken(T_OBJECT, h);
   }
   virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
-    return ArgToken(tt_constant, type, *con);
+    return ArgToken(type, *con);
   }
 
   virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS);
@@ -431,7 +438,7 @@
   methodHandle get_method_oop(TRAPS) const;
 
 public:
-  MethodHandleCompiler(Handle root, methodHandle callee, int invoke_count, bool for_invokedynamic, TRAPS);
+  MethodHandleCompiler(Handle root, Symbol* name, Symbol* signature, int invoke_count, bool for_invokedynamic, TRAPS);
 
   // Compile the given MH chain into bytecode.
   methodHandle compile(TRAPS);
--- a/src/share/vm/prims/methodHandles.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -25,9 +25,11 @@
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "prims/methodHandles.hpp"
+#include "prims/methodHandleWalk.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/signature.hpp"
@@ -2599,6 +2601,50 @@
   }
 }
 
+#ifdef ASSERT
+
+extern "C"
+void print_method_handle(oop mh);
+
+static void stress_method_handle_walk_impl(Handle mh, TRAPS) {
+  if (StressMethodHandleWalk) {
+    // Exercise the MethodHandleWalk code in various ways and validate
+    // the resulting method oop.  Some of these produce output so they
+    // are guarded under Verbose.
+    ResourceMark rm;
+    HandleMark hm;
+    if (Verbose) {
+      print_method_handle(mh());
+    }
+    TempNewSymbol name = SymbolTable::new_symbol("invoke", CHECK);
+    Handle mt = java_lang_invoke_MethodHandle::type(mh());
+    TempNewSymbol signature = java_lang_invoke_MethodType::as_signature(mt(), true, CHECK);
+    MethodHandleCompiler mhc(mh, name, signature, 10000, false, CHECK);
+    methodHandle m = mhc.compile(CHECK);
+    if (Verbose) {
+      m->print_codes();
+    }
+    InterpreterOopMap mask;
+    OopMapCache::compute_one_oop_map(m, m->code_size() - 1, &mask);
+  }
+}
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {
+  stress_method_handle_walk_impl(mh, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop ex = PENDING_EXCEPTION;
+    CLEAR_PENDING_EXCEPTION;
+    tty->print("StressMethodHandleWalk: ");
+    java_lang_Throwable::print(ex, tty);
+    tty->cr();
+  }
+}
+#else
+
+static void stress_method_handle_walk(Handle mh, TRAPS) {}
+
+#endif
+
 //
 // Here are the native methods on sun.invoke.MethodHandleImpl.
 // They are the private interface between this JVM and the HotSpot-specific
@@ -2666,6 +2712,7 @@
   }
 
   MethodHandles::init_DirectMethodHandle(mh, m, (do_dispatch != JNI_FALSE), CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2694,11 +2741,11 @@
                                                        receiver_limit,
                                                        decode_flags,
                                                        CHECK);
-    return;
+  } else {
+    // Build a BMH on top of a DMH or another BMH:
+    MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
   }
-
-  // Build a BMH on top of a DMH or another BMH:
-  MethodHandles::init_BoundMethodHandle(mh, target, argnum, CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2716,6 +2763,7 @@
   assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null");
 
   MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK);
+  stress_method_handle_walk(mh, CHECK);
 }
 JVM_END
 
@@ -2922,6 +2970,20 @@
 }
 JVM_END
 
+JVM_ENTRY(jobject, MH_invoke_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invoke cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
+JVM_ENTRY(jobject, MH_invokeExact_UOE(JNIEnv *env, jobject igmh, jobjectArray igargs)) {
+    TempNewSymbol UOE_name = SymbolTable::new_symbol("java/lang/UnsupportedOperationException", CHECK_NULL);
+    THROW_MSG_NULL(UOE_name, "MethodHandle.invokeExact cannot be invoked reflectively");
+    return NULL;
+}
+JVM_END
+
 
 /// JVM_RegisterMethodHandleMethods
 
@@ -2960,6 +3022,12 @@
   {CC"getMembers",              CC"("CLS""STRG""STRG"I"CLS"I["MEM")I",  FN_PTR(MHN_getMembers)}
 };
 
+static JNINativeMethod invoke_methods[] = {
+  // void init(MemberName self, AccessibleObject ref)
+  {CC"invoke",                  CC"(["OBJ")"OBJ,                FN_PTR(MH_invoke_UOE)},
+  {CC"invokeExact",             CC"(["OBJ")"OBJ,                FN_PTR(MH_invokeExact_UOE)}
+};
+
 // This one function is exported, used by NativeLookup.
 
 JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
@@ -2976,6 +3044,12 @@
     ThreadToNativeFromVM ttnfv(thread);
 
     int status = env->RegisterNatives(MHN_class, methods, sizeof(methods)/sizeof(JNINativeMethod));
+    if (!env->ExceptionOccurred()) {
+      const char* L_MH_name = (JLINV "MethodHandle");
+      const char* MH_name = L_MH_name+1;
+      jclass MH_class = env->FindClass(MH_name);
+      status = env->RegisterNatives(MH_class, invoke_methods, sizeof(invoke_methods)/sizeof(JNINativeMethod));
+    }
     if (env->ExceptionOccurred()) {
       MethodHandles::set_enabled(false);
       warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
--- a/src/share/vm/prims/methodHandles.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -721,12 +721,10 @@
 //# include "methodHandles_zero.hpp"
 #endif
 #ifdef TARGET_ARCH_arm
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_arm.hpp"
+# include "methodHandles_arm.hpp"
 #endif
 #ifdef TARGET_ARCH_ppc
-#define TARGET_ARCH_NYI_6939861 1 //FIXME
-//# include "methodHandles_ppc.hpp"
+# include "methodHandles_ppc.hpp"
 #endif
 
 #ifdef TARGET_ARCH_NYI_6939861
--- a/src/share/vm/runtime/globals.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -2909,6 +2909,12 @@
   product(intx, NmethodSweepCheckInterval, 5,                               \
           "Compilers wake up every n seconds to possibly sweep nmethods")   \
                                                                             \
+  notproduct(bool, LogSweeper, false,                                       \
+            "Keep a ring buffer of sweeper activity")                       \
+                                                                            \
+  notproduct(intx, SweeperLogEntries, 1024,                                 \
+            "Number of records in the ring buffer of sweeper activity")     \
+                                                                            \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
                                                                             \
@@ -3718,6 +3724,9 @@
   diagnostic(bool, OptimizeMethodHandles, true,                             \
           "when constructing method handles, try to improve them")          \
                                                                             \
+  develop(bool, StressMethodHandleWalk, false,                              \
+          "Process all method handles with MethodHandleWalk")               \
+                                                                            \
   diagnostic(bool, UseRicochetFrames, true,                                 \
           "use ricochet stack frames for method handle combination, "       \
           "if the platform supports them")                                  \
--- a/src/share/vm/runtime/sweeper.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -37,6 +37,94 @@
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
 
+#ifdef ASSERT
+
+#define SWEEP(nm) record_sweep(nm, __LINE__)
+// Sweeper logging code
+class SweeperRecord {
+ public:
+  int traversal;
+  int invocation;
+  int compile_id;
+  long traversal_mark;
+  int state;
+  const char* kind;
+  address vep;
+  address uep;
+  int line;
+
+  void print() {
+      tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
+                    PTR_FORMAT " state = %d traversal_mark %d line = %d",
+                    traversal,
+                    invocation,
+                    compile_id,
+                    kind == NULL ? "" : kind,
+                    uep,
+                    vep,
+                    state,
+                    traversal_mark,
+                    line);
+  }
+};
+
+static int _sweep_index = 0;
+static SweeperRecord* _records = NULL;
+
+void NMethodSweeper::report_events(int id, address entry) {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      if (_records[i].uep == entry ||
+          _records[i].vep == entry ||
+          _records[i].compile_id == id) {
+        _records[i].print();
+      }
+    }
+  }
+}
+
+void NMethodSweeper::report_events() {
+  if (_records != NULL) {
+    for (int i = _sweep_index; i < SweeperLogEntries; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+    for (int i = 0; i < _sweep_index; i++) {
+      // skip empty records
+      if (_records[i].vep == NULL) continue;
+      _records[i].print();
+    }
+  }
+}
+
+void NMethodSweeper::record_sweep(nmethod* nm, int line) {
+  if (_records != NULL) {
+    _records[_sweep_index].traversal = _traversals;
+    _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
+    _records[_sweep_index].invocation = _invocations;
+    _records[_sweep_index].compile_id = nm->compile_id();
+    _records[_sweep_index].kind = nm->compile_kind();
+    _records[_sweep_index].state = nm->_state;
+    _records[_sweep_index].vep = nm->verified_entry_point();
+    _records[_sweep_index].uep = nm->entry_point();
+    _records[_sweep_index].line = line;
+
+    _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
+  }
+}
+#else
+#define SWEEP(nm)
+#endif
+
+
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
@@ -137,6 +225,13 @@
     if (old != 0) {
       return;
     }
+#ifdef ASSERT
+    if (LogSweeper && _records == NULL) {
+      // Create the ring buffer for the logging code
+      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
+      memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
+    }
+#endif
     if (_invocations > 0) {
       sweep_code_cache();
       _invocations--;
@@ -213,10 +308,29 @@
   }
 }
 
+class NMethodMarker: public StackObj {
+ private:
+  CompilerThread* _thread;
+ public:
+  NMethodMarker(nmethod* nm) {
+    _thread = CompilerThread::current();
+    _thread->set_scanned_nmethod(nm);
+  }
+  ~NMethodMarker() {
+    _thread->set_scanned_nmethod(NULL);
+  }
+};
+
 
 void NMethodSweeper::process_nmethod(nmethod *nm) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
+  // Make sure this nmethod doesn't get unloaded during the scan,
+  // since the locks acquired below might safepoint.
+  NMethodMarker nmm(nm);
+
+  SWEEP(nm);
+
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
@@ -224,8 +338,10 @@
       // Clean-up all inline caches that points to zombie/non-reentrant methods
       MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
+      SWEEP(nm);
     } else {
       _locked_seen++;
+      SWEEP(nm);
     }
     return;
   }
@@ -247,6 +363,7 @@
       }
       nm->mark_for_reclamation();
       _rescan = true;
+      SWEEP(nm);
     }
   } else if (nm->is_not_entrant()) {
     // If there is no current activations of this method on the
@@ -257,6 +374,7 @@
       }
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     } else {
       // Still alive, clean up its inline caches
       MutexLocker cl(CompiledIC_lock);
@@ -265,6 +383,7 @@
       // request a rescan.  If this method stays on the stack for a
       // long time we don't want to keep rescanning the code cache.
       _not_entrant_seen_on_stack++;
+      SWEEP(nm);
     }
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
@@ -273,10 +392,12 @@
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      SWEEP(nm);
       nm->flush();
     } else {
       nm->make_zombie();
       _rescan = true;
+      SWEEP(nm);
     }
   } else {
     assert(nm->is_alive(), "should be alive");
@@ -293,6 +414,7 @@
     // Clean-up all inline caches that points to zombie/non-reentrant methods
     MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
+    SWEEP(nm);
   }
 }
 
--- a/src/share/vm/runtime/sweeper.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/sweeper.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -57,6 +57,13 @@
  public:
   static long traversal_count() { return _traversals; }
 
+#ifdef ASSERT
+  // Keep track of sweeper activity in the ring buffer
+  static void record_sweep(nmethod* nm, int line);
+  static void report_events(int id, address entry);
+  static void report_events();
+#endif
+
   static void scan_stacks();      // Invoked at the end of each safepoint
   static void sweep_code_cache(); // Concurrent part of sweep job
   static void possibly_sweep();   // Compiler threads call this to sweep
--- a/src/share/vm/runtime/thread.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/thread.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -2861,6 +2861,7 @@
 }
 
 
+#ifdef ASSERT
 // Print or validate the layout of stack frames
 void JavaThread::print_frame_layout(int depth, bool validate_only) {
   ResourceMark rm;
@@ -2878,7 +2879,7 @@
     values.print();
   }
 }
-
+#endif
 
 void JavaThread::trace_stack_from(vframe* start_vf) {
   ResourceMark rm;
@@ -2942,12 +2943,22 @@
   _queue = queue;
   _counters = counters;
   _buffer_blob = NULL;
+  _scanned_nmethod = NULL;
 
 #ifndef PRODUCT
   _ideal_graph_printer = NULL;
 #endif
 }
 
+void CompilerThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+  JavaThread::oops_do(f, cf);
+  if (_scanned_nmethod != NULL && cf != NULL) {
+    // Safepoints can occur when the sweeper is scanning an nmethod so
+    // process it here to make sure it isn't unloaded in the middle of
+    // a scan.
+    cf->do_code_blob(_scanned_nmethod);
+  }
+}
 
 // ======= Threads ========
 
--- a/src/share/vm/runtime/thread.hpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/thread.hpp	Thu Jun 02 13:37:40 2011 -0700
@@ -439,7 +439,7 @@
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+  virtual void oops_do(OopClosure* f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
@@ -1381,7 +1381,7 @@
   void trace_frames()                            PRODUCT_RETURN;
 
   // Print an annotated view of the stack frames
-  void print_frame_layout(int depth = 0, bool validate_only = false) PRODUCT_RETURN;
+  void print_frame_layout(int depth = 0, bool validate_only = false) NOT_DEBUG_RETURN;
   void validate_frame_layout() {
     print_frame_layout(0, true);
   }
@@ -1698,6 +1698,8 @@
   CompileQueue* _queue;
   BufferBlob*   _buffer_blob;
 
+  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
+
  public:
 
   static CompilerThread* current();
@@ -1726,6 +1728,11 @@
     _log = log;
   }
 
+  // GC support
+  // Apply "f->do_oop" to all root oops in "this".
+  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+  void oops_do(OopClosure* f, CodeBlobClosure* cf);
+
 #ifndef PRODUCT
 private:
   IdealGraphPrinter *_ideal_graph_printer;
@@ -1737,6 +1744,12 @@
   // Get/set the thread's current task
   CompileTask*  task()                           { return _task; }
   void          set_task(CompileTask* task)      { _task = task; }
+
+  // Track the nmethod currently being scanned by the sweeper
+  void          set_scanned_nmethod(nmethod* nm) {
+    assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
+    _scanned_nmethod = nm;
+  }
 };
 
 inline CompilerThread* CompilerThread::current() {
--- a/src/share/vm/runtime/vmStructs.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -783,6 +783,7 @@
   nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
   nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
+  nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
   nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   nonstatic_field(nmethod,             _orig_pc_offset,                               int)                                   \
@@ -800,6 +801,8 @@
   nonstatic_field(nmethod,             _osr_entry_point,                              address)                               \
   nonstatic_field(nmethod,             _lock_count,                                   jint)                                  \
   nonstatic_field(nmethod,             _stack_traversal_mark,                         long)                                  \
+  nonstatic_field(nmethod,             _compile_id,                                   int)                                   \
+  nonstatic_field(nmethod,             _marked_for_deoptimization,                    bool)                                  \
                                                                                                                                      \
   /********************************/                                                                                                 \
   /* JavaCalls (NOTE: incomplete) */                                                                                                 \
@@ -1310,11 +1313,13 @@
                                                                           \
   declare_toplevel_type(CodeBlob)                                         \
   declare_type(BufferBlob,            CodeBlob)                           \
-  declare_type(nmethod,       CodeBlob)                           \
+  declare_type(AdapterBlob,           BufferBlob)                         \
+  declare_type(nmethod,               CodeBlob)                           \
   declare_type(RuntimeStub,           CodeBlob)                           \
   declare_type(SingletonBlob,         CodeBlob)                           \
   declare_type(SafepointBlob,         SingletonBlob)                      \
   declare_type(DeoptimizationBlob,    SingletonBlob)                      \
+  declare_type(RicochetBlob,          SingletonBlob)                      \
   declare_c2_type(ExceptionBlob,      SingletonBlob)                      \
   declare_c2_type(UncommonTrapBlob,   CodeBlob)                           \
                                                                           \
--- a/src/share/vm/utilities/yieldingWorkgroup.cpp	Fri May 27 15:01:07 2011 -0700
+++ b/src/share/vm/utilities/yieldingWorkgroup.cpp	Thu Jun 02 13:37:40 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/5091921/Test6890943.sh	Fri May 27 15:01:07 2011 -0700
+++ b/test/compiler/5091921/Test6890943.sh	Thu Jun 02 13:37:40 2011 -0700
@@ -52,7 +52,7 @@
 
 ${TESTJAVA}/bin/javac -d . Test6890943.java
 
-${TESTJAVA}/bin/java  ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
+${TESTJAVA}/bin/java -XX:-PrintVMOptions ${TESTVMOPTS} Test6890943 < input6890943.txt > test.out 2>&1
 
 diff output6890943.txt test.out
 
--- a/test/compiler/5091921/Test7005594.java	Fri May 27 15:01:07 2011 -0700
+++ b/test/compiler/5091921/Test7005594.java	Thu Jun 02 13:37:40 2011 -0700
@@ -27,7 +27,7 @@
  * @bug 7005594
  * @summary Array overflow not handled correctly with loop optimzations
  *
- * @run main/othervm -Xms2048m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594
+ * @run shell Test7005594.sh
  */
 
 public class Test7005594 {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/5091921/Test7005594.sh	Thu Jun 02 13:37:40 2011 -0700
@@ -0,0 +1,80 @@
+#!/bin/sh
+# 
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTCLASSES=${TESTCLASSES}"
+echo "CLASSPATH=${CLASSPATH}"
+
+set -x
+
+cp ${TESTSRC}/Test7005594.java .
+cp ${TESTSRC}/Test7005594.sh .
+
+${TESTJAVA}/bin/javac -d . Test7005594.java
+
+${TESTJAVA}/bin/java ${TESTVMOPTS} -Xms1600m -Xcomp -XX:CompileOnly=Test7005594.test Test7005594 > test.out 2>&1
+
+result=$?
+
+cat test.out
+
+if [ $result -eq 95 ]
+then
+  echo "Passed"
+  exit 0
+fi
+
+if [ $result -eq 97 ]
+then
+  echo "Failed"
+  exit 1
+fi
+
+# The test should pass when no enough space for object heap
+grep "Could not reserve enough space for object heap" test.out
+if [ $? = 0 ]
+then
+  echo "Passed"
+  exit 0
+else
+  echo "Failed"
+  exit 1
+fi